diff --git a/README.md b/README.md index 18a7fcba..4a1f0dc0 100644 --- a/README.md +++ b/README.md @@ -3,14 +3,13 @@ Installazione: https://kodiondemand.github.io/#download -KoD attualmente funziona con Kodi fino alla versione 18 (Python 2). - - KoD, come Alfa, è sotto licenza GPL v3, pertanto siete liberi di utilizzare parte del codice, a patto di rispettare i termini di suddetta licenza, che si possono riassumere in: - Il tuo addon deve essere rilasciando secondo la stessa licenza, ovvero essere open source (il fatto che lo zip sia visibile da chiunque non ha importanza, è necessario avere un repository git come questo) - Aggiungere i crediti a tutto ciò che copiate/modificate, ad esempio aggiungendo un commento nel file in questione o, meglio, facendo un cherry-pick (in modo da preservarnee lo storico) +Nota: KoD attualmente funziona con Kodi fino alla versione 18 (Python 2). ### Come contribuire o fare segnalazioni? Ti piace il progetto e vuoi dare una mano? Leggi [qui](https://github.com/kodiondemand/addon/blob/master/CONTRIBUTING.md) + diff --git a/addon.xml b/addon.xml index a23986c1..c104c5fb 100644 --- a/addon.xml +++ b/addon.xml @@ -1,11 +1,11 @@ - + - + - + video @@ -18,7 +18,10 @@ resources/media/themes/ss/2.png resources/media/themes/ss/3.png - - fix vari ed eventuali + - completato il supporto ai torrent e aggiunto ilcorsaronero.xyz +- aggiunto supporto agli episodi locali, ovvero poter inserire nella libreria di kodi un misto tra puntate "di kod" e file scaricati altrove +- le viste ora si salvano di nuovo dal menu laterale, ma rimangono salvate per il tipo di contenuto visualizzato e non per il singolo menu +- ripensato il menu rapido, che ora è "più rapido", ridisegnate alcune finestre Naviga velocemente sul web e guarda i contenuti presenti [COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR] [COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR] @@ -28,6 +31,6 @@ https://t.me/kodiondemand https://github.com/kodiondemand/addon - + \ No newline at end of file diff --git a/channels.json b/channels.json index 1cc56f0a..18feb7ed 100644 --- a/channels.json +++ b/channels.json @@ -1,46 +1,44 @@ { "altadefinizione01": "https://altadefinizione01.photo", - "altadefinizione01_link": "https://altadefinizione01.kim", - "altadefinizioneclick": "https://altadefinizione.rocks", + "altadefinizione01_link": "https://altadefinizione01.gallery", + "altadefinizioneclick": "https://altadefinizione.family", "animeforce": "https://ww1.animeforce.org", - "animeleggendari": "https://animepertutti.com", + "animeleggendari": "https://animepertutti.org", "animesaturn": "https://www.animesaturn.com", "animestream": "https://www.animeworld.it", "animesubita": "http://www.animesubita.org", "animetubeita": "http://www.animetubeita.com", "animeunity": "https://www.animeunity.it", "animeworld": "https://www.animeworld.tv", - "casacinema": "https://www.casacinema.me", + "casacinema": "https://www.casacinema.digital", "casacinemaInfo": "https://casacinema.life", - "cineblog01": "https://cb01.uno", - "cb01anime": "https://www.cineblog01.network", - "cinemalibero": "https://www.cinemalibero.plus", + "cb01anime": "https://www.cineblog01.network/", + "cinemalibero": "https://cinemalibero.plus", "cinetecadibologna": "http://cinestore.cinetecadibologna.it", "dreamsub": "https://dreamsub.stream", "dsda": "https://www.dsda.press/", "fastsubita": "https://fastsubita.online", "filmgratis": "https://www.filmaltadefinizione.tv", "filmigratis": "https://filmigratis.org", - "filmsenzalimiticc": "https://www.filmsenzalimiti.cafe", + "filmsenzalimiticc": "https://www.filmsenzalimiti.estate", "filmstreaming01": "https://filmstreaming01.com", "guardaserie_stream": "https://guardaserie.store", - "guardaserieclick": "https://www.guardaserie.productions", + "guardaserieclick": "https://www.guardaserie.style", "hd4me": "https://hd4me.net", - "ilcorsaronero": "https://ilcorsaronero.pizza", - "ilgeniodellostreaming": "https://ilgeniodellostreaming.pl", + "ilgeniodellostreaming": "https://ilgeniodellostreaming.tw", "italiaserie": "https://italiaserie.org", "mondoserietv": "https://mondoserietv.com", "netfreex": "https://www.netfreex.club", - "piratestreaming": "https://www.piratestreaming.online", + "piratestreaming": "https://www.piratestreaming.biz", "polpotv": "https://polpotv.club", "pufimovies": "https://pufimovies.com", "raiplay": "https://www.raiplay.it", - "seriehd": "https://seriehd.digital", + "seriehd": "https://seriehd.net", "serietvonline": "https://serietvonline.host", "serietvsubita": "http://serietvsubita.xyz", "serietvu": "https://www.serietvu.link", "streamtime": "https://t.me/s/StreamTime", - "tantifilm": "https://www.tantifilm.vip", + "tantifilm": "https://www.tantifilm.pizza", "toonitalia": "https://toonitalia.org", "vedohd": "https://vedohd.uno", "vvvvid": "https://www.vvvvid.it" diff --git a/channels/0example.json.txt b/channels/0example.json.txt index 783193c6..1551727f 100644 --- a/channels/0example.json.txt +++ b/channels/0example.json.txt @@ -21,10 +21,9 @@ se vanno cancellati tutti deve rimanere la voce: "name": "Nome del canale visualizzato in KOD", "language": ["ita", "sub-ita"], "active": false, - "adult": false, "thumbnail": "", "banner": "", - "categories": ["movie", "tvshow", "anime", "vos", "documentary", "adult"], + "categories": ["movie", "tvshow", "anime", "vos", "documentary"], "not_active": ["include_in_newest"], "settings": [ { diff --git a/channels/altadefinizione01.json b/channels/altadefinizione01.json index f7589d50..3fc2ef26 100644 --- a/channels/altadefinizione01.json +++ b/channels/altadefinizione01.json @@ -3,7 +3,6 @@ "name": "Altadefinizione01", "language": ["ita", "sub-ita"], "active": true, - "adult": false, "thumbnail": "altadefinizione01.png", "banner": "altadefinizione01.png", "categories": ["movie", "vos"], diff --git a/channels/altadefinizione01_link.json b/channels/altadefinizione01_link.json index fe102b43..56a48c4f 100644 --- a/channels/altadefinizione01_link.json +++ b/channels/altadefinizione01_link.json @@ -2,7 +2,6 @@ "id": "altadefinizione01_link", "name": "Altadefinizione01 L", "active": true, - "adult": false, "language": ["ita","sub-ita"], "thumbnail": "altadefinizione01_L.png", "banner": "altadefinizione01_L.png", diff --git a/channels/altadefinizioneclick.json b/channels/altadefinizioneclick.json index 8ca67198..ec82baa7 100644 --- a/channels/altadefinizioneclick.json +++ b/channels/altadefinizioneclick.json @@ -2,7 +2,6 @@ "id": "altadefinizioneclick", "name": "AltadefinizioneClick", "active": true, - "adult": false, "language": ["ita","sub-ita"], "thumbnail": "altadefinizioneclick.png", "bannermenu": "altadefinizioneciclk.png", diff --git a/channels/animeforce.json b/channels/animeforce.json index 386a6e61..95578357 100644 --- a/channels/animeforce.json +++ b/channels/animeforce.json @@ -3,7 +3,6 @@ "name": "AnimeForce", "language": ["ita"], "active": true, - "adult": false, "thumbnail": "animeforce.png", "banner": "animeforce.png", "categories": ["anime"], diff --git a/channels/animeleggendari.json b/channels/animeleggendari.json index 5fd0cf6b..74c686a5 100644 --- a/channels/animeleggendari.json +++ b/channels/animeleggendari.json @@ -2,7 +2,6 @@ "id": "animeleggendari", "name": "AnimePerTutti", "active": true, - "adult": false, "language": ["ita", "sub-ita"], "thumbnail": "animepertutti.png", "bannermenu": "animepertutti.png", diff --git a/channels/animesaturn.json b/channels/animesaturn.json index e409d3c4..a3cdd28c 100644 --- a/channels/animesaturn.json +++ b/channels/animesaturn.json @@ -2,7 +2,6 @@ "id": "animesaturn", "name": "AnimeSaturn", "active": true, - "adult": false, "language": ["ita"], "thumbnail": "animesaturn.png", "banner": "animesaturn.png", diff --git a/channels/animespace.json b/channels/animespace.json index 26fafae6..54a542ae 100644 --- a/channels/animespace.json +++ b/channels/animespace.json @@ -2,7 +2,6 @@ "id": "animespace", "name": "AnimeSpace", "active": false, - "adult": false, "language": [], "thumbnail": "", "banner": "", diff --git a/channels/animesubita.json b/channels/animesubita.json index b1d695e1..b612eb16 100644 --- a/channels/animesubita.json +++ b/channels/animesubita.json @@ -2,7 +2,6 @@ "id": "animesubita", "name": "AnimeSubIta", "active": true, - "adult": false, "language": ["sub-ita"], "thumbnail": "animesubita.png", "bannermenu": "animesubita.png", diff --git a/channels/animetubeita.json b/channels/animetubeita.json index afb108c2..1565e785 100644 --- a/channels/animetubeita.json +++ b/channels/animetubeita.json @@ -2,7 +2,6 @@ "id": "animetubeita", "name": "AnimeTubeITA", "active": true, - "adult": false, "language": ["sub-ita"], "thumbnail": "animetubeita.png", "bannermenu": "animetubeita.png", diff --git a/channels/animeunity.json b/channels/animeunity.json index 99dc0fe0..22e0be35 100644 --- a/channels/animeunity.json +++ b/channels/animeunity.json @@ -2,7 +2,6 @@ "id": "animeunity", "name": "AnimeUnity", "active": true, - "adult": false, "language": ["ita", "sub-ita"], "thumbnail": "animeunity.png", "banner": "animeunity.png", diff --git a/channels/animeworld.json b/channels/animeworld.json index 0aa9aa1d..964b9d3d 100644 --- a/channels/animeworld.json +++ b/channels/animeworld.json @@ -2,7 +2,6 @@ "id": "animeworld", "name": "AnimeWorld", "active": true, - "adult": false, "language": ["ita", "sub-ita"], "thumbnail": "animeworld.png", "banner": "animeworld.png", diff --git a/channels/casacinema.json b/channels/casacinema.json index 4532df5d..b7d283bd 100644 --- a/channels/casacinema.json +++ b/channels/casacinema.json @@ -3,7 +3,6 @@ "name": "Casacinema", "language": ["ita", "sub-ita"], "active": true, - "adult": false, "thumbnail": "casacinema.png", "banner": "casacinema.png", "categories": ["tvshow", "movie","vos"], diff --git a/channels/casacinemaInfo.json b/channels/casacinemaInfo.json index 0bb89bb4..f3953133 100644 --- a/channels/casacinemaInfo.json +++ b/channels/casacinemaInfo.json @@ -3,7 +3,6 @@ "name": "La Casa del Cinema", "language": ["ita", "sub-ita"], "active": true, - "adult": false, "thumbnail": "casacinemainfo.png", "banner": "casacinemainfo.png", "categories": ["movie", "vos"], diff --git a/channels/cb01anime.json b/channels/cb01anime.json index 24fd0c8d..621c7c8a 100644 --- a/channels/cb01anime.json +++ b/channels/cb01anime.json @@ -3,7 +3,6 @@ "name": "Cb01anime", "language": ["ita", "vos", "sub-ita"], "active": true, - "adult": false, "thumbnail": "cb01anime.png", "banner": "cb01anime.png", "categories": ["anime"], diff --git a/channels/cineblog01.json b/channels/cineblog01.json index 82faafe3..c97e7d66 100644 --- a/channels/cineblog01.json +++ b/channels/cineblog01.json @@ -3,7 +3,6 @@ "name": "CB01", "language": ["ita", "sub-ita"], "active": true, - "adult": false, "thumbnail": "cb01.png", "banner": "cb01.png", "categories": ["tvshow", "movie", "vos", "documentary"], diff --git a/channels/cineblog01.py b/channels/cineblog01.py index fdc50de4..97d99488 100644 --- a/channels/cineblog01.py +++ b/channels/cineblog01.py @@ -137,32 +137,47 @@ def peliculas(item): @support.scrape def episodios(item): - patronBlock = r'(?P
\s*(?:STAGION[EI]\s*(?:DA\s*[0-9]+\s*A)?\s*[0-9]+|MINISERIE) - (?P[^-<]+)(?:- (?P[^-<]+))?.*?[^<>]*?<\/div>.*?)
\[riduci\]<\/div>' - patron = r'(?:/>|

|)(?P.*?(?P[0-9]+(?:×|×)[0-9]+)\s*(?P.*?)?(?:\s*–|\s*-|\s*<).*?)(?:<\/p>|\s+<', '> <', data) + if 'TUTTA LA ' in data: + folderUrl = scrapertools.find_single_match(data, 'TUTTA LA \w+\s+(?:–|-)\s+]+>(?P[^<]+)' + sceneTitle = True + def itemHook(item): + item.serieFolder = True + return item + else: + patronBlock = r'(?P<block><div class="sp-head[a-z ]*?" title="Espandi">\s*(?:STAGION[EI]\s*(?:DA\s*[0-9]+\s*A)?\s*[0-9]+|MINISERIE) - (?P<lang>[^-<]+)(?:- (?P<quality>[^-<]+))?.*?[^<>]*?<\/div>.*?)<div class="spdiv">\[riduci\]<\/div>' + patron = r'(?:/>|<p>|<strong>)(?P<url>.*?(?P<episode>[0-9]+(?:×|×)[0-9]+)\s*(?P<title2>.*?)?(?:\s*–|\s*-|\s*<).*?)(?:<\/p>|<br)' + def itemlistHook(itemlist): + title_dict = {} + itlist = [] + for item in itemlist: + item.title = re.sub(r'\.(\D)',' \\1', item.title) + match = support.match(item.title, patron=r'(\d+.\d+)').match.replace('x','') + item.order = match + if match not in title_dict: + title_dict[match] = item + elif match in title_dict and item.contentLanguage == title_dict[match].contentLanguage \ + or item.contentLanguage == 'ITA' and not title_dict[match].contentLanguage \ + or title_dict[match].contentLanguage == 'ITA' and not item.contentLanguage: + title_dict[match].url = item.url + else: + title_dict[match + '1'] = item - for key, value in title_dict.items(): - itlist.append(value) + for key, value in title_dict.items(): + itlist.append(value) - return sorted(itlist, key=lambda it: (it.contentLanguage, int(it.order))) + return sorted(itlist, key=lambda it: (it.contentLanguage, int(it.order))) return locals() def findvideos(item): + if item.serieFolder: + return support.server(item, data=item.url) if item.contentType == "episode": return findvid_serie(item) @@ -205,7 +220,7 @@ def findvideos(item): itemlist = support.server(item, itemlist=itemlist) # Extract the quality format - patronvideos = '>([^<]+)</strong></div>' + patronvideos = '([\w.]+)</strong></div></td>' support.addQualityTag(item, itemlist, data, patronvideos) return itemlist diff --git a/channels/cinemalibero.json b/channels/cinemalibero.json index 72ca5d06..c64c78a3 100644 --- a/channels/cinemalibero.json +++ b/channels/cinemalibero.json @@ -3,7 +3,6 @@ "name": "Cinemalibero", "language": ["ita"], "active": true, - "adult": false, "thumbnail": "cinemalibero.png", "banner": "cinemalibero.png", "categories": ["movie","tvshow"], diff --git a/channels/cinetecadibologna.json b/channels/cinetecadibologna.json index d398eb63..fd556a17 100644 --- a/channels/cinetecadibologna.json +++ b/channels/cinetecadibologna.json @@ -3,7 +3,6 @@ "name": "Cineteca di Bologna", "language": ["ita"], "active": true, - "adult": false, "thumbnail": "cinetecadibologna.png", "banner": "cinetecadibologna.png", "categories": ["documentary"], diff --git a/channels/dreamsub.json b/channels/dreamsub.json index d0d39ea9..578ab806 100644 --- a/channels/dreamsub.json +++ b/channels/dreamsub.json @@ -3,7 +3,6 @@ "name": "DreamSub", "language": ["ita", "sub-ita"], "active": true, - "adult": false, "thumbnail": "dreamsub.png", "banner": "dreamsub.png", "categories": ["anime", "vos"], diff --git a/channels/dsda.json b/channels/dsda.json index f6d4adf6..4ec18a80 100644 --- a/channels/dsda.json +++ b/channels/dsda.json @@ -3,7 +3,6 @@ "name": "D.S.D.A", "language": ["ita"], "active": true, - "adult": false, "thumbnail": "dsda.png", "banner": "dsda.png", "categories": ["documentary"], diff --git a/channels/eurostreaming.json b/channels/eurostreaming.json index 88d7d021..79a3eb99 100644 --- a/channels/eurostreaming.json +++ b/channels/eurostreaming.json @@ -2,7 +2,6 @@ "id": "eurostreaming", "name": "Eurostreaming", "active": true, - "adult": false, "language": ["ita","sub-ita"], "thumbnail": "eurostreaming.png", "banner": "eurostreaming.png", diff --git a/channels/fastsubita.json b/channels/fastsubita.json index 3feb7834..c77eaa54 100644 --- a/channels/fastsubita.json +++ b/channels/fastsubita.json @@ -3,7 +3,6 @@ "name": "Fastsubita", "language": ["sub-ita"], "active": true, - "adult": false, "thumbnail": "fastsubita.png", "banner": "fastsubita.png", "categories": ["tvshow", "vos"], diff --git a/channels/filmigratis.json b/channels/filmigratis.json index 46290f5b..6a970b15 100644 --- a/channels/filmigratis.json +++ b/channels/filmigratis.json @@ -2,7 +2,6 @@ "id": "filmigratis", "name": "Filmi Gratis", "active": true, - "adult": false, "language": ["ita", "sub-ita"], "thumbnail": "filmigratis.png", "banner": "filmigratis.png", diff --git a/channels/filmpertutti.json b/channels/filmpertutti.json index 65179cbe..0130b04a 100644 --- a/channels/filmpertutti.json +++ b/channels/filmpertutti.json @@ -2,7 +2,6 @@ "id": "filmpertutti", "name": "Filmpertutti", "active": true, - "adult": false, "language": ["ita", "sub-ita"], "thumbnail": "filmpertutti.png", "banner": "filmpertutti.png", diff --git a/channels/filmsenzalimiticc.json b/channels/filmsenzalimiticc.json index 81f21615..04636231 100644 --- a/channels/filmsenzalimiticc.json +++ b/channels/filmsenzalimiticc.json @@ -2,7 +2,6 @@ "id": "filmsenzalimiticc", "name": "Filmsenzalimiti CC", "active": false, - "adult": false, "language": ["ita"], "thumbnail": "filmsenzalimiticc.png", "banner": "", diff --git a/channels/guardaserieclick.json b/channels/guardaserieclick.json index 2593ae40..056097f1 100644 --- a/channels/guardaserieclick.json +++ b/channels/guardaserieclick.json @@ -2,7 +2,6 @@ "id": "guardaserieclick", "name": "GuardaSerie.click", "active": true, - "adult": false, "language": ["ita", "vos"], "thumbnail": "guardaserieclick.png", "bannermenu": "guardaserieclick.png", diff --git a/channels/hd4me.json b/channels/hd4me.json index 04465df9..e85964a0 100644 --- a/channels/hd4me.json +++ b/channels/hd4me.json @@ -3,7 +3,6 @@ "name": "HD4ME", "language": ["ita", "sub-ita"], "active": true, - "adult": false, "thumbnail": "hd4me.png", "banner": "hd4me.png", "categories": ["movie", "vos"], diff --git a/channels/ilcorsaronero.json b/channels/ilcorsaronero.json index ab5be5ed..129bdc68 100644 --- a/channels/ilcorsaronero.json +++ b/channels/ilcorsaronero.json @@ -1,8 +1,7 @@ { "id": "ilcorsaronero", "name": "ilCorSaRoNeRo", - "active": false, - "adult": false, + "active": true, "language": ["ita"], "thumbnail": "ilcorsaronero.png", "banner": "ilcorsaronero.png", diff --git a/channels/ilcorsaronero.py b/channels/ilcorsaronero.py index ac5a47d7..39941c7d 100644 --- a/channels/ilcorsaronero.py +++ b/channels/ilcorsaronero.py @@ -5,7 +5,13 @@ from core import support -host = support.config.get_channel_url() +def findhost(): + data = support.httptools.downloadpage('https://lagazzettadelcorsaro.com/').data + url = support.scrapertools.find_single_match(data, '<li><a href="([^"]+)') + return url[:-1] if url.endswith('/') else url + +host = support.config.get_channel_url(findhost) +support.log('HOST',host) # host = 'https://ilcorsaronero.xyz' headers = [['Referer', host]] @@ -16,23 +22,24 @@ list_quality = ['default'] def mainlist(item): menu = [ - ('BDRiP {film}', ['/categoria.php?active=0&category=1&order=data&by=DESC&page=', 'peliculas', [0, 'movie']]), - ('Cerca BDRiP... submenu {film}', ['/categoria.php?active=0&category=1&order=data&by=DESC&argh.php?search=', 'search', 'search']), - ('DVD {film}', ['/categoria.php?active=0&category=20&order=data&by=DESC&page=', 'peliculas', [0, 'movie']]), - ('Cerca DVD... submenu {film}', ['/categoria.php?active=0&category=20&order=data&by=DESC&argh.php?search=', 'search', 'search']), - ('Screener {film}', ['/categoria.php?active=0&category=19&order=data&by=DESC&page=', 'peliculas', [0, 'movie']]), - ('Cerca Screener.. submenu {film}', ['/categoria.php?active=0&category=19&order=data&by=DESC&argh.php?search=', 'search', 'search']), - ('Serie TV', ['/categoria.php?active=0&category=15&order=data&by=DES&page=', 'peliculas', [0 , 'tvshow']]), - ('Cerca Serie TV.. submenu', ['/categoria.php?active=0&category=15&order=data&by=DESC&argh.php?search=', 'search', 'search']), - ('Anime', ['/categoria.php?active=0&category=5&order=data&by=DESC&page=', 'peliculas', [0, 'anime']]), - ('Cerca Anime.. submenu', ['/categoria.php?active=0&category=5&order=data&by=DESC&argh.php?search=', 'search', 'search']), - ('Musica', ['/categoria.php?active=0&category=2&order=data&by=DESC&page=', 'peliculas', [0, 'music']]), - ('Cerca Musica.. submenu', ['/categoria.php?active=0&category=2&order=data&by=DESC&argh.php?search=', 'search', 'search']), - ('Audiolibri {musica}', ['/categoria.php?active=0&category=18&order=data&by=DESC&page=', 'peliculas', [0, 'music']]), - ('Cerca Audiolibri.. submenu', ['/categoria.php?active=0&category=18&order=data&by=DESC&argh.php?search=', 'search', 'search']), - ('Altro {film}', ['/categoria.php?active=0&category=4&order=data&by=DESC&page=', 'peliculas', [0, 'movie']]), - ('Cerca altro.. submenu', ['/categoria.php?active=0&category=4&order=data&by=DESC&argh.php?search=', 'search', 'search']), - ('Cerca Tutto... color kod bold', ['/argh.php?search=', 'search', 'search']) + ('BDRiP {film}', ['/categoria.php?active=0&category=1&order=data&by=DESC&page=', 'peliculas', [0, 'movie', True]]), + ('Cerca BDRiP... submenu {film}', ['/torrent-ita/1/', 'search', ['search', 'movie', True]]), + ('DVD {film}', ['/categoria.php?active=0&category=20&order=data&by=DESC&page=', 'peliculas', [0, 'movie', True]]), + ('Cerca DVD... submenu {film}', ['/torrent-ita/20/', 'search', ['search', 'movie', True]]), + ('Screener {film}', ['/categoria.php?active=0&category=19&order=data&by=DESC&page=', 'peliculas', [0, 'movie', True]]), + ('Cerca Screener.. submenu {film}', ['/torrent-ita/19/', 'search', ['search', 'movie', True]]), + ('Serie TV', ['/categoria.php?active=0&category=15&order=data&by=DES&page=', 'peliculas', [0 , 'tvshow', True]]), + ('Cerca Serie TV.. submenu', ['/torrent-ita/15/', 'search', ['search', 'tvshow',True]]), + ('Anime', ['/categoria.php?active=0&category=5&order=data&by=DESC&page=', 'peliculas', [0, 'anime', True]]), + ('Cerca Anime.. submenu', ['/torrent-ita/5/', 'search', ['search', 'anime', True]]), + ('Musica', ['/categoria.php?active=0&category=2&order=data&by=DESC&page=', 'peliculas', [0, 'music', False]]), + ('Cerca Musica.. submenu', ['/torrent-ita/2/', 'search', ['search', 'music', False]]), + ('Audiolibri {musica}', ['/categoria.php?active=0&category=18&order=data&by=DESC&page=', 'peliculas', [0, 'music', False]]), + ('Cerca Audiolibri.. submenu', ['/torrent-ita/18/', 'search', ['search', 'music', False]]), + # mostrerebbe anche risultati non "multimediali" e allungherebbero inutilmente la ricerca globale + # ('Altro {film}', ['/categoria.php?active=0&category=4&order=data&by=DESC&page=', 'peliculas', [0, 'other', False]]), + # ('Cerca altro.. submenu', ['/torrent-ita/4/', 'search', ['search', 'other', False]]), + # ('Cerca Tutto... color kod bold', ['/argh.php?search=', 'search', ['search', 'all', False]]) ] return locals() @@ -40,23 +47,28 @@ def mainlist(item): @support.scrape def peliculas(item): - patron = r'>(?P<quality>[^"<]+)</td> <TD[^>]+><A class="tab" HREF="(?P<url>[^"]+)"\s*>(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<size>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<seed>[^<]+)' + sceneTitle = item.args[2] + if item.args[1] in ['tvshow', 'anime', 'music', 'other']: + patron = r'>[^"<]+' + else: + patron = r'>(?P<quality>[^"<]+)' + patron += '</td> <TD[^>]+><A class="tab" HREF="(?P<url>[^"]+)"\s*>(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<size>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<seed>[^<]+)' def itemHook(item): - item.title = item.title.replace('.',' ') - thumb = (item.args[1] if type(item.args) == list else item.args) + '.png' - item.thumbnail = support.thumb(thumb=thumb) + item.contentType = item.args[1] + return item if 'search' not in item.args: - support.log('OK') item.url += str(item.args[0]) def itemlistHook(itemlist): + args = item.args + args[0] += 1 itemlist.append( support.Item(channel=item.channel, action = item.action, contentType=item.contentType, title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), url=item.url, - args=item.args[0] + 1, + args=args, thumbnail=support.thumb())) return itemlist return locals() @@ -64,9 +76,11 @@ def peliculas(item): def search(item, text): support.log(item, text) - itemlist = [] - item.url += text + if 'all' in item.args: + item.url += text + else: + item.url += text + '.html' try: return peliculas(item) # Cattura la eccezione così non interrompe la ricerca globle se il canale si rompe! @@ -78,5 +92,6 @@ def search(item, text): def findvideos(item): + if item.contentType == 'tvshow': item.contentType = 'episode' video_library = True if 'movie' in item.args else False - return support.server(item, support.match(item.url, patron=r'"(magnet[^"]+)').match,down_load=False, video_library=video_library) + return support.server(item, support.match(item.url, patron=r'"(magnet[^"]+)').match, video_library=video_library) diff --git a/channels/ilgeniodellostreaming.json b/channels/ilgeniodellostreaming.json index 960aaabe..61bc398b 100644 --- a/channels/ilgeniodellostreaming.json +++ b/channels/ilgeniodellostreaming.json @@ -2,7 +2,6 @@ "id": "ilgeniodellostreaming", "name": "IlGenioDelloStreaming", "active": true, - "adult": false, "language": ["ita", "sub-ita"], "thumbnail": "ilgeniodellostreaming.png", "banner": "ilgeniodellostreaming.png", diff --git a/channels/italiaserie.json b/channels/italiaserie.json index a8d184b6..ad1b8c42 100644 --- a/channels/italiaserie.json +++ b/channels/italiaserie.json @@ -2,7 +2,6 @@ "id": "italiaserie", "name": "Italia Serie", "active": true, - "adult": false, "language": ["ita","sub-ita"], "thumbnail": "italiaserie.png", "bannermenu": "italiaserie.png", diff --git a/channels/metalvideo.json b/channels/metalvideo.json index e18004c7..bb2e792a 100644 --- a/channels/metalvideo.json +++ b/channels/metalvideo.json @@ -2,7 +2,6 @@ "id": "metalvideo", "name": "Metal Video", "active": true, - "adult": false, "language": ["*"], "thumbnail": "metalvideo.png", "banner": "metalvideo.png", diff --git a/channels/metalvideo.py b/channels/metalvideo.py index 95ccad47..823bc40e 100644 --- a/channels/metalvideo.py +++ b/channels/metalvideo.py @@ -25,6 +25,7 @@ def mainlist(item): patron = r'<a href="(?P<url>[^"]+)"(?: class="")?>(?P<title>[^<]+)<' def itemHook(item): item.thumbnail = support.thumb(thumb='music.png') + item.contentType = 'music' return item def itemlistHook(itemlist): itemlist.pop(0) @@ -32,7 +33,8 @@ def mainlist(item): support.Item( channel=item.channel, title=support.typo('Cerca...', 'bold color kod'), - url = item.url, + contentType='music', + url=item.url, action='search', thumbnail=support.thumb(thumb='search.png'))) return itemlist @@ -43,6 +45,7 @@ def peliculas(item): action = 'findvideos' patron= r'<img src="[^"]+" alt="(?P<title>[^"]+)" data-echo="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="(?P<url>[^"]+)"' patronNext = r'<a href="([^"]+)">»' + typeContentDict = {'': 'music'} return locals() diff --git a/channels/mondoserietv.json b/channels/mondoserietv.json index 3bd6ab63..9a7b1e0b 100644 --- a/channels/mondoserietv.json +++ b/channels/mondoserietv.json @@ -2,7 +2,6 @@ "id": "mondoserietv", "name": "MondoSerieTV", "active": true, - "adult": false, "language": ["ita"], "thumbnail": "mondoserietv.png", "bannermenu": "mondoserietv.png", diff --git a/channels/netfreex.json b/channels/netfreex.json index 775f3677..6c69f917 100644 --- a/channels/netfreex.json +++ b/channels/netfreex.json @@ -3,7 +3,6 @@ "name": "Netfreex", "language": ["ita"], "active": true, - "adult": false, "thumbnail": "netfreex.png", "banner": "netfreex.png", "categories": ["tvshow", "movie", "anime"], diff --git a/channels/piratestreaming.json b/channels/piratestreaming.json index b52b1c73..1f7ad819 100644 --- a/channels/piratestreaming.json +++ b/channels/piratestreaming.json @@ -2,7 +2,6 @@ "id": "piratestreaming", "name": "Pirate Streaming", "active": true, - "adult": false, "language": ["ita"], "thumbnail": "piratestreaming.png", "bannermenu": "piratestreaming.png", diff --git a/channels/polpotv.json b/channels/polpotv.json index 26c0610a..d3c8d09a 100644 --- a/channels/polpotv.json +++ b/channels/polpotv.json @@ -3,7 +3,6 @@ "name": "PolpoTV", "language": ["ita"], "active": true, - "adult": false, "thumbnail": "polpotv.png", "banner": "polpotv.png", "categories": ["movie","tvshow"], diff --git a/channels/porn/LIKUOO.json b/channels/porn/LIKUOO.json deleted file mode 100644 index 593db20f..00000000 --- a/channels/porn/LIKUOO.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "LIKUOO", - "name": "LIKUOO", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://likuoo.video/files_static/images/logo.jpg", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/LIKUOO.py b/channels/porn/LIKUOO.py deleted file mode 100644 index a0038dc6..00000000 --- a/channels/porn/LIKUOO.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.likuoo.video' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Pornstar" , action="categorias", url=host + "/pornstars/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="item_p">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - scrapedplot = "" - scrapedthumbnail = "https:" + scrapedthumbnail - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="item">.*?' - patron += '<a href="([^"]+)" title="(.*?)">.*?' - patron += 'src="(.*?)".*?' - patron += '<div class="runtime">(.*?)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: - url = urlparse.urljoin(item.url,scrapedurl) - scrapedtime = scrapedtime.replace("m", ":").replace("s", " ") - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " +scrapedtitle - contentTitle = title - thumbnail = "https:" + scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) - patron = 'url:\'([^\']+)\'.*?' - patron += 'data:\'([^\']+)\'' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl,post in matches: - post = post.replace("%3D", "=") - scrapedurl = host + scrapedurl - logger.debug( item.url +" , "+ scrapedurl +" , " +post ) - datas = httptools.downloadpage(scrapedurl, post=post, headers={'Referer':item.url}).data - datas = datas.replace("\\", "") - url = scrapertools.find_single_match(datas, '<iframe src="([^"]+)"') - itemlist.append( Item(channel=item.channel, action="play", title = "%s", url=url )) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - diff --git a/channels/porn/TXXX.json b/channels/porn/TXXX.json deleted file mode 100644 index ee4be410..00000000 --- a/channels/porn/TXXX.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "TXXX", - "name": "TXXX", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.txxx.com/images/desktop-logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/TXXX.py b/channels/porn/TXXX.py deleted file mode 100644 index 32a360b2..00000000 --- a/channels/porn/TXXX.py +++ /dev/null @@ -1,149 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://www.txxx.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Ultimas" , action="lista", url=host + "/latest-updates/")) - itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/top-rated/")) - itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="channel-thumb">.*?' - patron += '<a href="([^"]+)" title="([^"]+)".*?' - patron += '<img src="([^"]+)".*?' - patron += '<span>(.*?)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,num in matches: - scrapedplot = "" - scrapedurl = host + scrapedurl - title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]" - itemlist.append( Item(channel=item.channel, action="lista", title=title , url=scrapedurl , - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , - text_color="blue", url=next_page) ) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a class="categories-list__link" href="([^"]+)">.*?' - patron += '<span class="categories-list__name cat-icon" data-title="([^"]+)">.*?' - patron += '<span class="categories-list__badge">(.*?)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,num in matches: - url = urlparse.urljoin(item.url,scrapedurl) - scrapedthumbnail = "" - scrapedplot = "" - title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]" - itemlist.append( Item(channel=item.channel, action="lista", title=title , url=url , - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?' - patron += '<img src="([^"]+)" alt="([^"]+)".*?' - patron += '</div>(.*?)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches: - contentTitle = scrapedtitle - scrapedhd = scrapertools.find_single_match(scrapedtime, '<span class="thumb__hd">(.*?)</span>') - duration = scrapertools.find_single_match(scrapedtime, '<span class="thumb__duration">(.*?)</span>') - if scrapedhd != '': - title = "[COLOR yellow]" +duration+ "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle - else: - title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, - plot=plot, contentTitle=title) ) - next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next.*?" href="([^"]+)" title="Next Page"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"') - video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"') - partes = video_url.split('||') - video_url = decode_url(partes[0]) - video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url) - video_url += '&' if '?' in video_url else '?' - video_url += 'lip=' + partes[2] + '<=' + partes[3] - itemlist.append(item.clone(action="play", title=item.title, url=video_url)) - return itemlist - - -def decode_url(txt): - _0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~' - reto = ''; n = 0 - # En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes) - txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt) - txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M') - - while n < len(txt): - a = _0x52f6x15.index(txt[n]) - n += 1 - b = _0x52f6x15.index(txt[n]) - n += 1 - c = _0x52f6x15.index(txt[n]) - n += 1 - d = _0x52f6x15.index(txt[n]) - n += 1 - - a = a << 2 | b >> 4 - b = (b & 15) << 4 | c >> 2 - e = (c & 3) << 6 | d - reto += chr(a) - if c != 64: reto += chr(b) - if d != 64: reto += chr(e) - - return urllib.unquote(reto) - diff --git a/channels/porn/__init__.py b/channels/porn/__init__.py deleted file mode 100644 index 0b95a268..00000000 --- a/channels/porn/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import sys - -# Appends the main plugin dir to the PYTHONPATH if an internal package cannot be imported. -# Examples: In Plex Media Server all modules are under "Code.*" package, and in Enigma2 under "Plugins.Extensions.*" -try: - # from core import logger - import core -except: - sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) diff --git a/channels/porn/absoluporn.json b/channels/porn/absoluporn.json deleted file mode 100644 index b3234d03..00000000 --- a/channels/porn/absoluporn.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "absoluporn", - "name": "absoluporn", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.absoluporn.es/image/deco/logo.gif", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/absoluporn.py b/channels/porn/absoluporn.py deleted file mode 100644 index 32abc69c..00000000 --- a/channels/porn/absoluporn.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://www.absoluporn.es' - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/wall-date-1.html")) - itemlist.append( Item(channel=item.channel, title="Mas valorados" , action="lista", url=host + "/wall-note-1.html")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/wall-main-1.html")) - itemlist.append( Item(channel=item.channel, title="Mas largos" , action="lista", url=host + "/wall-time-1.html")) - - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search-%s-1.html" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = ' <a href="([^"]+)" class="link1">([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedurl = scrapedurl.replace(".html", "_date.html") - scrapedurl = host +"/" + scrapedurl - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<div class="thumb-main-titre"><a href="([^"]+)".*?' - patron += 'title="([^"]+)".*?' - patron += 'src="([^"]+)".*?' - patron += '<div class="time">(.*?)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, - fanart=thumbnail, contentTitle = scrapedtitle)) - next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = 'servervideo = \'([^\']+)\'.*?' - patron += 'path = \'([^\']+)\'.*?' - patron += 'filee = \'([^\']+)\'.*?' - matches = scrapertools.find_multiple_matches(data, patron) - for servervideo,path,filee in matches: - scrapedurl = servervideo + path + "56ea912c4df934c216c352fa8d623af3" + filee - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/alsoporn.json b/channels/porn/alsoporn.json deleted file mode 100644 index 25501c10..00000000 --- a/channels/porn/alsoporn.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "alsoporn", - "name": "alsoporn", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://alsoporn.com/images/alsoporn.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} diff --git a/channels/porn/alsoporn.py b/channels/porn/alsoporn.py deleted file mode 100644 index 5433405d..00000000 --- a/channels/porn/alsoporn.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools -import base64 - -host = 'http://www.alsoporn.com' - - -def mainlist(item): - logger.info() - itemlist = [] - # itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/en/g/All/new/1")) - itemlist.append( Item(channel=item.channel, title="Top" , action="lista", url=host + "/g/All/top/1")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/=%s/" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)">.*?' - patron += '<img src="([^"]+)" alt="([^"]+)" />' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return sorted(itemlist, key=lambda i: i.title) - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="alsoporn_prev">.*?' - patron += '<a href="([^"]+)">.*?' - patron += '<img src="([^"]+)" alt="([^"]+)">.*?' - patron += '<span>([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - if not "0:00" in scrapedtime: - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = scrapedtitle)) - - next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'([^\']+)\'') - data = httptools.downloadpage(scrapedurl).data - scrapedurl1 = scrapertools.find_single_match(data,'<iframe src="(.*?)"') - scrapedurl1 = scrapedurl1.replace("//www.playercdn.com/ec/i2.php?url=", "") - scrapedurl1 = base64.b64decode(scrapedurl1 + "=") - logger.debug(scrapedurl1) - data = httptools.downloadpage(scrapedurl1).data - if "xvideos" in scrapedurl1: - scrapedurl2 = scrapertools.find_single_match(data, 'html5player.setVideoHLS\(\'([^\']+)\'\)') - if "xhamster" in scrapedurl1: - scrapedurl2 = scrapertools.find_single_match(data, '"[0-9]+p":"([^"]+)"').replace("\\", "") - - logger.debug(scrapedurl2) - itemlist.append(item.clone(action="play", title=item.title, url=scrapedurl2)) - return itemlist - diff --git a/channels/porn/analdin.json b/channels/porn/analdin.json deleted file mode 100644 index f465b719..00000000 --- a/channels/porn/analdin.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "analdin", - "name": "analdin", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.analdin.com/images/logo-retina.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/analdin.py b/channels/porn/analdin.py deleted file mode 100644 index 65cb6793..00000000 --- a/channels/porn/analdin.py +++ /dev/null @@ -1,113 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - - -host = 'https://www.analdin.com/es' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/más-reciente/")) - itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/más-visto/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/mejor-valorado/")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorías/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data,'<strong class="popup-title">Canales</strong>(.*?)<strong>Models</strong>') - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li><a class="item" href="([^"]+)" title="([^"]+)">' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?' - patron += 'src="([^"]+)".*?' - patron += '<div class="videos">([^"]+)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return sorted(itemlist, key=lambda i: i.title) - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a class="popup-video-link" href="([^"]+)".*?' - patron += 'thumb="([^"]+)".*?' - patron += '<div class="duration">(.*?)</div>.*?' - patron += '<strong class="title">\s*([^"]+)</strong>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtime,scrapedtitle in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, - fanart=thumbnail, contentTitle = title)) - next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = 'video_url: \'([^\']+)\'' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl in matches: - url = scrapedurl - itemlist.append(item.clone(action="play", title=url, url=url)) - return itemlist - diff --git a/channels/porn/beeg.json b/channels/porn/beeg.json deleted file mode 100755 index 01d595cb..00000000 --- a/channels/porn/beeg.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "beeg", - "name": "Beeg", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "beeg.png", - "banner": "beeg.png", - "categories": [ - "adult" - ], - "settings": [ - ] -} diff --git a/channels/porn/beeg.py b/channels/porn/beeg.py deleted file mode 100755 index 3ce4c8f9..00000000 --- a/channels/porn/beeg.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urllib - -from core import jsontools as json -from core import scrapertools -from core.item import Item -from platformcode import logger -from core import httptools - - -url_api = "" -Host = "https://beeg.com" - - -def get_api_url(): - global url_api - data = httptools.downloadpage(Host).data - version = re.compile('var beeg_version = ([\d]+)').findall(data)[0] - url_api = Host + "/api/v6/" + version - - -get_api_url() - - -def mainlist(item): - logger.info() - get_api_url() - itemlist = [] - itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=url_api + "/index/main/0/pc", - viewmode="movie")) - itemlist.append(Item(channel=item.channel, action="canal", title="Canal", - url=url_api + "/channels")) - itemlist.append(Item(channel=item.channel, action="listcategorias", title="Categorias", - url=url_api + "/index/main/0/pc", extra="nonpopular")) - itemlist.append( - Item(channel=item.channel, action="search", title="Buscar")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = url_api + "/index/tag/0/pc?tag=%s" % (texto) - - try: - return videos(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - -def videos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - JSONData = json.load(data) - for Video in JSONData["videos"]: - thumbnail = "http://img.beeg.com/236x177/" + str(Video["id"]) + ".jpg" - url= '%s/video/%s?v=2&s=%s&e=%s' % (url_api, Video['svid'], Video['start'], Video['end']) - title = Video["title"] - itemlist.append( - Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot="", show="", - folder=True, contentType="movie")) - # Paginador - Actual = int(scrapertools.find_single_match(item.url, url_api + '/index/[^/]+/([0-9]+)/pc')) - if JSONData["pages"] - 1 > Actual: - scrapedurl = item.url.replace("/" + str(Actual) + "/", "/" + str(Actual + 1) + "/") - itemlist.append( - Item(channel=item.channel, action="videos", title="Página Siguiente", url=scrapedurl, thumbnail="", - viewmode="movie")) - return itemlist - - -def listcategorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - JSONData = json.load(data) - for Tag in JSONData["tags"]: - url = url_api + "/index/tag/0/pc?tag=" + Tag["tag"] - url = url.replace("%20", "-") - title = '%s (%s)' % (str(Tag["tag"]), str(Tag["videos"])) - itemlist.append( - Item(channel=item.channel, action="videos", title=title, url=url, viewmode="movie", type="item")) - return itemlist - - -def canal(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - JSONData = json.load(data) - for Tag in JSONData["channels"]: - url = url_api + "/index/channel/0/pc?channel=" + Tag["channel"] - url = url.replace("%20", "-") - title = '%s (%s)' % (str(Tag["ps_name"]), str(Tag["videos"])) - itemlist.append( - Item(channel=item.channel, action="videos", title=title, url=url, viewmode="movie", type="item")) - return itemlist - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - JSONData = json.load(data) - for key in JSONData: - videourl = re.compile("([0-9]+p)", re.DOTALL).findall(key) - if videourl: - videourl = videourl[0] - if not JSONData[videourl] == None: - url = JSONData[videourl] - url = url.replace("{DATA_MARKERS}", "data=pc.ES") - if not url.startswith("https:"): url = "https:" + url - title = videourl - itemlist.append(["%s %s [directo]" % (title, url[-4:]), url]) - itemlist.sort(key=lambda item: item[0]) - return itemlist - diff --git a/channels/porn/bravoporn.json b/channels/porn/bravoporn.json deleted file mode 100644 index 2ed81efb..00000000 --- a/channels/porn/bravoporn.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "bravoporn", - "name": "bravoporn", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.bravoporn.com/v/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/bravoporn.py b/channels/porn/bravoporn.py deleted file mode 100644 index 740dd779..00000000 --- a/channels/porn/bravoporn.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ - -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://www.bravoporn.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host +"/latest-updates/")) - itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/c/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/s/?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)" class="th">.*?' - patron += '<img src="([^"]+)".*?' - patron += '<span>([^"]+)</span>\s*(\d+) movies.*?</strong>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - scrapedthumbnail = "http:" + scrapedthumbnail - scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class=".*?video_block"><a href="([^"]+)".*?' - patron += '<img src="([^"]+)".*?alt="([^"]+)".*?' - patron += '<span class="time">([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - thumbnail = "https:" + scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = scrapedtitle)) - next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<source src="([^"]+)" type=\'video/mp4\' title="HQ" />' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl)) - return itemlist - diff --git a/channels/porn/camwhoresbay.json b/channels/porn/camwhoresbay.json deleted file mode 100644 index f57213ca..00000000 --- a/channels/porn/camwhoresbay.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "camwhoresbay", - "name": "camwhoresbay", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.camwhoresbay.com/images/porntrex.ico", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/camwhoresbay.py b/channels/porn/camwhoresbay.py deleted file mode 100644 index 85ba0b04..00000000 --- a/channels/porn/camwhoresbay.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.camwhoresbay.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated/")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - item.url = "%s/search/%s/" % (host, texto.replace("+", "-")) - item.extra = texto - try: - return lista(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?' - patron += '<img class="thumb" src="([^"]+)".*?' - patron += '<div class="videos">([^"]+)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - scrapedplot = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return sorted(itemlist, key=lambda i: i.title) - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="video-item ">.*?' - patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?' - patron += 'data-original="([^"]+)".*?' - patron += '<i class="fa fa-clock-o"></i>(.*?)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = "http:" + scrapedthumbnail + "|Referer=%s" % item.url - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, - contentTitle = scrapedtitle, fanart=thumbnail)) - if item.extra: - next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)') - if next_page: - if "from_videos=" in item.url: - next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url) - else: - next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \ - "&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - else: - next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"') - if next_page and not next_page.startswith("#"): - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - else: - next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)') - if next_page: - if "from" in item.url: - next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url) - else: - next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % ( - item.url, next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'') - if scrapedurl == "" : - scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'') - if scrapedurl == "" : - scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'') - if scrapedurl == "" : - scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'') - - itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo")) - return itemlist - - diff --git a/channels/porn/canalporno.json b/channels/porn/canalporno.json deleted file mode 100644 index 9d6913b7..00000000 --- a/channels/porn/canalporno.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "canalporno", - "name": "Canalporno", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://i.imgur.com/gAbPcvT.png?1", - "banner": "canalporno.png", - "categories": [ - "adult" - ] -} \ No newline at end of file diff --git a/channels/porn/canalporno.py b/channels/porn/canalporno.py deleted file mode 100644 index 58051909..00000000 --- a/channels/porn/canalporno.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -import urlparse,re - -from core import httptools -from core import scrapertools -from platformcode import logger - -host = "http://www.canalporno.com" - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(item.clone(action="lista", title="Útimos videos", url=host + "/ajax/homepage/?page=1")) - itemlist.append(item.clone(action="categorias", title="Canal", url=host + "/ajax/list_producers/?page=1")) - itemlist.append(item.clone(action="categorias", title="PornStar", url=host + "/ajax/list_pornstars/?page=1")) - itemlist.append(item.clone(action="categorias", title="Categorias", - url=host + "/categorias")) - itemlist.append(item.clone(action="search", title="Buscar")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/ajax/new_search/?q=%s&page=1" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - if "pornstars" in item.url: - patron = '<div class="muestra.*?href="([^"]+)".*?src=\'([^\']+)\'.*?alt="([^"]+)".*?' - else: - patron = '<div class="muestra.*?href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)".*?' - if "Categorias" in item.title: - patron += '<div class="numero">([^<]+)</div>' - else: - patron += '</span> (\d+) vídeos</div>' - matches = scrapertools.find_multiple_matches(data, patron) - for url, scrapedthumbnail, scrapedtitle, cantidad in matches: - title= "%s [COLOR yellow] %s [/COLOR]" % (scrapedtitle, cantidad) - url= url.replace("/videos-porno/", "/ajax/show_category/").replace("/sitio/", "/ajax/show_producer/").replace("/pornstar/", "/ajax/show_pornstar/") - url = host + url + "?page=1" - itemlist.append(item.clone(action="lista", title=title, url=url, thumbnail=scrapedthumbnail)) - if "/?page=" in item.url: - next_page=item.url - num= int(scrapertools.find_single_match(item.url,".*?/?page=(\d+)")) - num += 1 - next_page = "?page=" + str(num) - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = 'data-src="([^"]+)" alt="([^"]+)".*?<h2><a href="([^"]+)">.*?' \ - '<div class="duracion"><span class="ico-duracion sprite"></span> ([^"]+) min</div>' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedthumbnail, scrapedtitle, scrapedurl, duration in matches: - title = "[COLOR yellow] %s [/COLOR] %s" % (duration, scrapedtitle) - url = host + scrapedurl - itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=scrapedthumbnail)) - last=scrapertools.find_single_match(item.url,'(.*?)page=\d+') - num= int(scrapertools.find_single_match(item.url,".*?/?page=(\d+)")) - num += 1 - next_page = "page=" + str(num) - if next_page!="": - next_page = last + next_page - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - url = scrapertools.find_single_match(data, '<source src="([^"]+)"') - itemlist.append(item.clone(url=url, server="directo")) - return itemlist diff --git a/channels/porn/cat3plus.json b/channels/porn/cat3plus.json deleted file mode 100644 index ada3335f..00000000 --- a/channels/porn/cat3plus.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "cat3plus", - "name": "Cat3plus", - "active": true, - "adult": true, - "language": [], - "thumbnail": "https://i.imgur.com/SJxXKa2.png", - "fanart": "https://i.imgur.com/ejCwTxT.jpg", - "banner": "https://i.imgur.com/bXUyk6m.png", - "categories": [ - "movie", - "vos" - ] -} diff --git a/channels/porn/cat3plus.py b/channels/porn/cat3plus.py deleted file mode 100644 index ea0a7234..00000000 --- a/channels/porn/cat3plus.py +++ /dev/null @@ -1,130 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- Channel SleazeMovies -*- -# -*- Created for Alfa-addon -*- -# -*- By Sculkurt -*- - - -import re -import urllib -import urlparse -from channelselector import get_thumb -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config, logger - -host = 'http://www.cat3plus.com/' - -headers = [ - ['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'], - ['Accept-Encoding', 'gzip, deflate'], - ['Referer', host] -] - -def mainlist(item): - logger.info() - - itemlist = list() - itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True))) - itemlist.append(item.clone(title="Años", action="years", url=host, thumbnail=get_thumb('year', auto=True))) - itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True))) - - return itemlist - -def years(item): - logger.info() - itemlist = list() - data = httptools.downloadpage(item.url, cookies=False).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = "<a dir='ltr' href='([^']+)'>([^<]+)</a>" - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedtitle in matches: - itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl)) - return itemlist - -def get_source(url): - logger.info() - data = httptools.downloadpage(url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - return data - - -def list_all(item): - logger.info() - itemlist = [] - data = get_source(item.url) - - patron = "<h2 class='post-title entry-title'><a href='([^']+)'>([^(]+).*?\(([^)]+).*?" - patron += 'src="([^"]+).*?' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedtitle, year, img in matches: - itemlist.append(Item(channel = item.channel, - title = scrapedtitle, - url = scrapedurl, - action = "findvideos", - thumbnail = img, - contentTitle = scrapedtitle, - contentType = "movie", - infoLabels = {'year': year})) - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) - - # Extraer la marca de siguiente página - next_page = scrapertools.find_single_match(data, "<a class='blog-pager-older-link' href='([^']+)'") - if next_page != "": - itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True)) - - return itemlist - - - -def search(item, texto): - logger.info() - if texto != "": - texto = texto.replace(" ", "+") - item.url = host + "search?q=" + texto - item.extra = "busqueda" - try: - return list_all(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def findvideos(item): - logger.info() - - itemlist = [] - - data = httptools.downloadpage(item.url).data - - patron = '<h2>\s*<a href="([^"]+)" target="_blank">.*?</a></h2>' - matches = re.compile(patron, re.DOTALL).findall(data) - - for url in matches: - data = httptools.downloadpage(url, headers={'Referer': item.url}).data - - itemlist.extend(servertools.find_video_items(data=data)) - - for video in itemlist: - - video.channel = item.channel - video.contentTitle = item.contentTitle - video.title = video.server.capitalize() - - # Opción "Añadir esta pelicula a la videoteca" - if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': - itemlist.append(Item(channel = item.channel, - title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url = item.url, - action = "add_pelicula_to_library", - extra = "findvideos", - contentTitle = item.contentTitle, - thumbnail = item.thumbnail - )) - - return itemlist \ No newline at end of file diff --git a/channels/porn/cinetemagay.json b/channels/porn/cinetemagay.json deleted file mode 100644 index a94f5844..00000000 --- a/channels/porn/cinetemagay.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "cinetemagay", - "name": "Cinetemagay", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "cinetemagay.png", - "banner": "cinetemagay.png", - "categories": [ - "adult" - ] -} \ No newline at end of file diff --git a/channels/porn/cinetemagay.py b/channels/porn/cinetemagay.py deleted file mode 100644 index ad5f5a6d..00000000 --- a/channels/porn/cinetemagay.py +++ /dev/null @@ -1,128 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import re - -from core import scrapertools -from core import servertools -from core import httptools -from core.item import Item -from platformcode import config, logger - -IMAGES_PATH = os.path.join(config.get_runtime_path(), 'resources', 'images', 'cinetemagay') - - -def strip_tags(value): - return re.sub(r'<[^>]*?>', '', value) - - -def mainlist(item): - logger.info() - - itemlist = [] - itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay latinoamericano", - url="http://cinegaylatinoamericano.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1", - thumbnail="http://www.americaeconomia.com/sites/default/files/imagecache/foto_nota/homosexual1.jpg")) - itemlist.append(Item(channel=item.channel, action="lista", title="Cine y cortos gay", - url="http://cineycortosgay.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1", - thumbnail="http://www.elmolar.org/wp-content/uploads/2015/05/cortometraje.jpg")) - itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay online (México)", - url="http://cinegayonlinemexico.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1", - thumbnail="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTmmqL6tS2Ced1VoxlGQT0q-ibPEz1DCV3E1waHFDI5KT0pg1lJ")) - itemlist.append(Item(channel=item.channel, action="lista", title="Sentido gay", - url="http://www.sentidogay.blogspot.com.es//feeds/posts/default/?max-results=100&start-index=1", - thumbnail="http://1.bp.blogspot.com/-epOPgDD_MQw/VPGZGQOou1I/AAAAAAAAAkI/lC25GrukDuo/s1048/SentidoGay.jpg")) - itemlist.append(Item(channel=item.channel, action="lista", title="PGPA", - url="http://pgpa.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1", - thumbnail="http://themes.googleusercontent.com/image?id=0BwVBOzw_-hbMNTRlZjk2YWMtYTVlMC00ZjZjLWI3OWEtMWEzZDEzYWVjZmQ4")) - - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - - # Descarga la pagina - data = httptools.downloadpage(item.url).data - - # Extrae las entradas (carpetas) - patronvideos = '<img .*?src="(.*?)"' - patronvideos += "(.*?)<link rel='alternate' type='text/html' href='([^']+)' title='([^']+)'.*?>" - matches = re.compile(patronvideos, re.DOTALL).findall(data) - - for match in matches: - scrapedtitle = match[3] - scrapedtitle = scrapedtitle.replace("'", "'") - scrapedtitle = scrapedtitle.replace(""", "'") - scrapedtitle = scrapedtitle.replace("&amp;", "'") - scrapedtitle = scrapedtitle.replace("&#39;", "'") - scrapedurl = match[2] - scrapedthumbnail = match[0] - imagen = "" - scrapedplot = match[1] - tipo = match[1] - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - scrapedplot = "<" + scrapedplot - scrapedplot = scrapedplot.replace(">", ">") - scrapedplot = scrapedplot.replace("<", "<") - scrapedplot = scrapedplot.replace("</div>", "\n") - scrapedplot = scrapedplot.replace("<br />", "\n") - scrapedplot = scrapedplot.replace("&", "") - scrapedplot = scrapedplot.replace("nbsp;", "") - scrapedplot = strip_tags(scrapedplot) - itemlist.append( - Item(channel=item.channel, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - plot=scrapedurl + scrapedplot, folder=True)) - - variable = item.url.split("index=")[1] - variable = int(variable) - variable += 100 - variable = str(variable) - variable_url = item.url.split("index=")[0] - url_nueva = variable_url + "index=" + variable - itemlist.append( - Item(channel=item.channel, action="lista", title="Ir a la página siguiente (desde " + variable + ")", - url=url_nueva, thumbnail="", plot="Pasar a la página siguiente (en grupos de 100)\n\n" + url_nueva)) - - return itemlist - - -def detail(item): - logger.info() - itemlist = [] - - # Descarga la pagina - data = httptools.downloadpage(item.url).data - - data = data.replace("%3A", ":") - data = data.replace("%2F", "/") - data = data.replace("%3D", "=") - data = data.replace("%3", "?") - data = data.replace("%26", "&") - descripcion = "" - plot = "" - patrondescrip = 'SINOPSIS:(.*?)' - matches = re.compile(patrondescrip, re.DOTALL).findall(data) - if len(matches) > 0: - descripcion = matches[0] - descripcion = descripcion.replace(" ", "") - descripcion = descripcion.replace("<br/>", "") - descripcion = descripcion.replace("\r", "") - descripcion = descripcion.replace("\n", " ") - descripcion = descripcion.replace("\t", " ") - descripcion = re.sub("<[^>]+>", " ", descripcion) - descripcion = descripcion - try: - plot = unicode(descripcion, "utf-8").encode("iso-8859-1") - except: - plot = descripcion - - # Busca los enlaces a los videos de servidores - video_itemlist = servertools.find_video_items(data=data) - for video_item in video_itemlist: - itemlist.append(Item(channel=item.channel, action="play", server=video_item.server, - title=item.title + " " + video_item.title, url=video_item.url, thumbnail=item.thumbnail, - plot=video_item.url, folder=False)) - - return itemlist diff --git a/channels/porn/cliphunter.json b/channels/porn/cliphunter.json deleted file mode 100644 index 98a6fe42..00000000 --- a/channels/porn/cliphunter.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "cliphunter", - "name": "cliphunter", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.cliphunter.com/gfx/new/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/cliphunter.py b/channels/porn/cliphunter.py deleted file mode 100644 index 16f3d093..00000000 --- a/channels/porn/cliphunter.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - - -host = 'https://www.cliphunter.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/categories/All")) - itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular/ratings/yesterday")) - itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstars/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)">\s*<img src=\'([^\']+)\'/>.*?<span>([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)"/>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<img class=".*?" src="([^"]+)".*?<div class="tr">(.*?)</div>.*?<a href="([^"]+)\s*" class="vttl.*?">(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedtime,scrapedurl,scrapedtitle in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, - fanart=thumbnail, contentTitle = title )) - next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '"url"\:"(.*?)"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - scrapedurl = scrapedurl.replace("\/", "/") - title = scrapedurl - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo")) - return itemlist - diff --git a/channels/porn/coomelonitas.json b/channels/porn/coomelonitas.json deleted file mode 100644 index e20668d1..00000000 --- a/channels/porn/coomelonitas.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "coomelonitas", - "name": "Coomelonitas", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.coomelonitas.com/wp-content/themes/3xTheme/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/coomelonitas.py b/channels/porn/coomelonitas.py deleted file mode 100644 index ae943a7d..00000000 --- a/channels/porn/coomelonitas.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host ='http://www.coomelonitas.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host+ "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="all"(.*?)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for match in matches: - title = scrapertools.find_single_match(match,'title="([^"]+)"') - url = scrapertools.find_single_match(match,'<a href="([^"]+)"') - plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>') - thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"') - itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, - fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie") ) - next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">') - if next_page!="": - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - diff --git a/channels/porn/cumlouder.json b/channels/porn/cumlouder.json deleted file mode 100644 index 10bcc031..00000000 --- a/channels/porn/cumlouder.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "cumlouder", - "name": "Cumlouder", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "cumlouder.png", - "banner": "cumlouder.png", - "categories": [ - "adult" - ] -} \ No newline at end of file diff --git a/channels/porn/cumlouder.py b/channels/porn/cumlouder.py deleted file mode 100644 index 4380a53a..00000000 --- a/channels/porn/cumlouder.py +++ /dev/null @@ -1,180 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urllib -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import config, logger - - -host = 'https://www.cumlouder.com' - -def mainlist(item): - logger.info() - itemlist = [] - config.set_setting("url_error", False, "cumlouder") - itemlist.append(item.clone(title="Últimos videos", action="videos", url= host + "/porn/")) - itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url=host + "/girls/")) - itemlist.append(item.clone(title="Listas", action="series", url= host + "/series/")) - itemlist.append(item.clone(title="Categorias", action="categorias", url= host + "/categories/")) - itemlist.append(item.clone(title="Buscar", action="search", url= host + "/search?q=%s")) - return itemlist - - -def search(item, texto): - logger.info() - item.url = item.url % texto - item.action = "videos" - try: - return videos(item) - except: - import traceback - logger.error(traceback.format_exc()) - return [] - - -def pornstars_list(item): - logger.info() - itemlist = [] - itemlist.append(item.clone(title="Mas Populares", action="pornstars", url=host + "/girls/1/")) - for letra in "abcdefghijklmnopqrstuvwxyz": - itemlist.append(item.clone(title=letra.upper(), url=urlparse.urljoin(item.url, letra), action="pornstars")) - return itemlist - - -def pornstars(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a girl-url=.*?' - patron += 'href="([^"]+)" title="([^"]+)">.*?' - patron += 'data-lazy="([^"]+)".*?' - patron += '<span class="ico-videos sprite"></span>([^<]+)</span>' - matches = re.compile(patron, re.DOTALL).findall(data) - for url, title, thumbnail, count in matches: - if "go.php?" in url: - url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0]) - thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0]) - else: - url = urlparse.urljoin(item.url, url) - if not thumbnail.startswith("https"): - thumbnail = "https:%s" % thumbnail - itemlist.append(item.clone(title="%s (%s)" % (title, count), url=url, action="videos", fanart=thumbnail, thumbnail=thumbnail)) - # Paginador - matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data) - if matches: - if "go.php?" in matches[0]: - url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0]) - else: - url = urlparse.urljoin(item.url, matches[0]) - itemlist.append(item.clone(title="Página Siguiente >>", url=url)) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '<a tag-url=.*?' - patron += 'href="([^"]+)" title="([^"]+)".*?' - patron += 'data-lazy="([^"]+)".*?' - patron += '<span class="cantidad">([^<]+)</span>' - matches = re.compile(patron, re.DOTALL).findall(data) - for url, title, thumbnail, count in matches: - if "go.php?" in url: - url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0]) - thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0]) - else: - url = urlparse.urljoin(item.url, url) - if not thumbnail.startswith("https"): - thumbnail = "https:%s" % thumbnail - itemlist.append( - item.clone(title="%s (%s videos)" % (title, count), url=url, action="videos", fanart=thumbnail, thumbnail=thumbnail)) - # Paginador - matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data) - if matches: - if "go.php?" in matches[0]: - url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0]) - else: - url = urlparse.urljoin(item.url, matches[0]) - itemlist.append(item.clone(title="Página Siguiente >>", url=url)) - return itemlist - - -def series(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '<a onclick=.*?href="([^"]+)".*?\<img src="([^"]+)".*?h2 itemprop="name">([^<]+).*?p>([^<]+)</p>' - matches = re.compile(patron, re.DOTALL).findall(data) - for url, thumbnail, title, count in matches: - itemlist.append( - item.clone(title="%s (%s) " % (title, count), url=urlparse.urljoin(item.url, url), action="videos", fanart=thumbnail, thumbnail=thumbnail)) - # Paginador - matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data) - if matches: - if "go.php?" in matches[0]: - url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0]) - else: - url = urlparse.urljoin(item.url, matches[0]) - itemlist.append(item.clone(title="Página Siguiente >>", url=url)) - return itemlist - - -def videos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a class="muestra-escena" href="([^"]+)" title="([^"]+)".*?' - patron += 'data-lazy="([^"]+)".*?' - patron += '<span class="ico-minutos sprite"></span>([^<]+)</span>(.*?)</a>' - matches = re.compile(patron, re.DOTALL).findall(data) - for url, title, thumbnail, duration,calidad in matches: - if "hd sprite" in calidad: - title="[COLOR yellow] %s [/COLOR][COLOR red] HD [/COLOR] %s" % (duration, title) - else: - title="[COLOR yellow] %s [/COLOR] %s" % (duration, title) - if "go.php?" in url: - url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0]) - thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0]) - else: - url = urlparse.urljoin(host, url) - if not thumbnail.startswith("https"): - thumbnail = "https:%s" % thumbnail - itemlist.append(item.clone(title=title, url=url, - action="play", thumbnail=thumbnail, contentThumbnail=thumbnail, - fanart=thumbnail, contentType="movie", contentTitle=title)) - # Paginador - nextpage = scrapertools.find_single_match(data, '<ul class="paginador"(.*?)</ul>') - matches = re.compile('<a href="([^"]+)" rel="nofollow">Next »</a>', re.DOTALL).findall(nextpage) - if not matches: - matches = re.compile('<li[^<]+<a href="([^"]+)">Next »</a[^<]+</li>', re.DOTALL).findall(nextpage) - if matches: - if "go.php?" in matches[0]: - url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0]) - else: - url = urlparse.urljoin(item.url, matches[0]) - itemlist.append(item.clone(title="Página Siguiente >>", url=url)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<source src="([^"]+)" type=\'video/([^\']+)\' label=\'[^\']+\' res=\'([^\']+)\'' - url, type, res = re.compile(patron, re.DOTALL).findall(data)[0] - if "go.php?" in url: - url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0]) - elif not url.startswith("http"): - url = "https:" + url.replace("&", "&") - itemlist.append( - Item(channel='cumlouder', action="play", title='Video' + res, contentTitle=type.upper() + ' ' + res, url=url, - server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/czechvideo.json b/channels/porn/czechvideo.json deleted file mode 100644 index 84b1c775..00000000 --- a/channels/porn/czechvideo.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "czechvideo", - "name": "Czechvideo", - "active": false, - "adult": true, - "language": ["*"], - "thumbnail": "http://czechvideo.org/templates/Default/images/black75.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/czechvideo.py b/channels/porn/czechvideo.py deleted file mode 100644 index 23b444aa..00000000 --- a/channels/porn/czechvideo.py +++ /dev/null @@ -1,87 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ - -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://czechvideo.org' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/tags/%s/" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data,'<div class="category">(.*?)</ul>') - patron = '<li><a href="(.*?)".*?>(.*?)</a></li>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - thumbnail = urlparse.urljoin(item.url,scrapedthumbnail) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="short-story">.*?' - patron += '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)".*?' - patron += 'div class="short-time">(.*?)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail) - scrapedplot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<del><a href="([^"]+)">Next</a></del>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - data = httptools.downloadpage(item.url).data - - itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: - videoitem.title = item.title - videoitem.contentTitle = item.contentTitle - videoitem.thumbnail = item.thumbnail - videoitem.channel = item.channel - return itemlist - diff --git a/channels/porn/datoporn.json b/channels/porn/datoporn.json deleted file mode 100644 index 1a9f2211..00000000 --- a/channels/porn/datoporn.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "datoporn", - "name": "DatoPorn", - "language": ["*"], - "active": true, - "adult": true, - "thumbnail": "http://i.imgur.com/tBSWudd.png?1", - "banner": "datoporn.png", - "categories": [ - "adult" - ] -} \ No newline at end of file diff --git a/channels/porn/datoporn.py b/channels/porn/datoporn.py deleted file mode 100644 index 243e4a8c..00000000 --- a/channels/porn/datoporn.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from core import httptools -from core import scrapertools -from platformcode import config, logger - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append(item.clone(action="categorias", title="Categorías", url="http://dato.porn/categories_all", contentType="movie", viewmode="movie")) - itemlist.append(item.clone(title="Buscar...", action="search", contentType="movie", viewmode="movie")) - return itemlist - - -def search(item, texto): - logger.info() - item.url = "http://dato.porn/?k=%s&op=search" % texto.replace(" ", "+") - return lista(item) - - -def lista(item): - logger.info() - itemlist = [] - data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) - patron = '<div class="videobox">\s*<a href="([^"]+)".*?' - patron += 'url\(\'([^\']+)\'.*?' - patron += '<span>(.*?)<\/span>.*?' - patron += 'class="title">(.*?)<\/a>' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedthumbnail, duration, scrapedtitle in matches: - if "/embed-" not in scrapedurl: - scrapedurl = scrapedurl.replace("datoporn.co/", "datoporn.co/embed-") + ".html" - if not config.get_setting('unify'): - scrapedtitle = '[COLOR yellow] %s [/COLOR] %s' % (duration , scrapedtitle) - else: - scrapedtitle += ' gb' - scrapedtitle = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle - scrapedtitle = scrapedtitle.replace(":", "'") - # logger.debug(scrapedurl + ' / ' + scrapedthumbnail + ' / ' + duration + ' / ' + scrapedtitle) - itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, server="datoporn", - fanart=scrapedthumbnail.replace("_t.jpg", ".jpg"), plot = "")) - next_page = scrapertools.find_single_match(data, '<a class=["|\']page-link["|\'] href=["|\']([^["|\']+)["|\']>Next') - if next_page and itemlist: - itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="vid_block">\s*<a href="([^"]+)".*?url\((.*?)\).*?<span>(.*?)</span>.*?<b>(.*?)</b>' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedthumbnail, numero, scrapedtitle in matches: - if numero: - scrapedtitle = "%s (%s)" % (scrapedtitle, numero) - itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail)) - return itemlist - diff --git a/channels/porn/elreyx.json b/channels/porn/elreyx.json deleted file mode 100644 index af3f59de..00000000 --- a/channels/porn/elreyx.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "elreyx", - "name": "elreyx", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.elreyx.com/template/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/elreyx.py b/channels/porn/elreyx.py deleted file mode 100644 index 8f6a20a7..00000000 --- a/channels/porn/elreyx.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import re -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger -from platformcode import config - -host = 'http://www.elreyx.com' - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/peliculasporno.html") ) - itemlist.append( Item(channel=item.channel, title="Escenas" , action="lista", url=host + "/index.html")) - itemlist.append( Item(channel=item.channel, title="Productora" , action="categorias", url=host + "/index.html") ) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html") ) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search") ) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search-%s" % texto + ".html" - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - if item.title == "Categorias" : - patron = '<td><a href="([^<]+)" title="Movies ([^<]+)">.*?</a>' - else: - patron = '<a href="([^<]+)" title="View Category ([^<]+)">.*?</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - url="https:" + scrapedurl - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - if not "/peliculasporno" in item.url: - patron = '<div class="notice_image">.*?<a title="([^"]+)" href="([^"]+)">.*?<img src="(.*?)">' - else: - patron = '<div class="captura"><a title="([^"]+)" href="([^"]+)".*?><img src="(.*?)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedtitle,scrapedurl,scrapedthumbnail in matches: - scrapedplot = "" - url="https:" + scrapedurl - thumbnail="https:" + scrapedthumbnail - itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li class="float-xs-right"><a href=\'([^\']+)\' title=\'Pagina \d+\'>') - if next_page == "": - next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>»</a>') - if next_page!= "": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - data = httptools.downloadpage(item.url).data - url = scrapertools.find_single_match(data, '<IFRAME SRC="(.*?)"') - if url == "": - url = scrapertools.find_single_match(data,'<iframe src="(.*?)"') - data = httptools.downloadpage(url).data - itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: - videoitem.title = item.title - videoitem.fulltitle = item.fulltitle - videoitem.thumbnail = item.thumbnail - videoitem.channel = item.channel - return itemlist - diff --git a/channels/porn/eporner.json b/channels/porn/eporner.json deleted file mode 100644 index 9bdf1de4..00000000 --- a/channels/porn/eporner.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "eporner", - "name": "Eporner", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "eporner.png", - "banner": "eporner.png", - "categories": [ - "adult" - ] -} \ No newline at end of file diff --git a/channels/porn/eporner.py b/channels/porn/eporner.py deleted file mode 100644 index b82358c0..00000000 --- a/channels/porn/eporner.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from platformcode import logger - -host = 'http://www.eporner.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(item.clone(title="Últimos videos", action="videos", url=host + "/0/")) - itemlist.append(item.clone(title="Más visto", action="videos", url=host + "/most-viewed/")) - itemlist.append(item.clone(title="Mejor valorado", action="videos", url=host + "/top-rated/")) - itemlist.append(item.clone(title="Categorias", action="categorias", url=host + "/categories/")) - itemlist.append(item.clone(title="Pornstars", action="pornstars", url=host + "/pornstars/")) - itemlist.append(item.clone(title=" Alfabetico", action="pornstars_list", url=host + "/pornstars/")) - itemlist.append(item.clone(title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "-") - item.url = host + "/search/%s/" % texto - try: - return videos(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def pornstars_list(item): - logger.info() - itemlist = [] - for letra in "ABCDEFGHIJKLMNOPQRSTUVWXYZ": - itemlist.append(item.clone(title=letra, url=urlparse.urljoin(item.url, letra), action="pornstars")) - return itemlist - - -def pornstars(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="mbprofile">.*?' - patron += '<a href="([^"]+)" title="([^"]+)">.*?' - patron += '<img src="([^"]+)".*?' - patron += '<div class="mbtim"><span>Videos: </span>([^<]+)</div>' - matches = re.compile(patron, re.DOTALL).findall(data) - for url, title, thumbnail, count in matches: - itemlist.append( - item.clone(title="%s (%s videos)" % (title, count), url=urlparse.urljoin(item.url, url), action="videos", - thumbnail=thumbnail)) - # Paginador - next_page = scrapertools.find_single_match(data,"<a href='([^']+)' class='nmnext' title='Next page'>") - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="pornstars", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<span class="addrem-cat">.*?' - patron += '<a href="([^"]+)" title="([^"]+)">.*?' - patron +='<div class="cllnumber">([^<]+)</div>' - matches = re.compile(patron, re.DOTALL).findall(data) - for url, title, cantidad in matches: - url = urlparse.urljoin(item.url, url) - title = title + " " + cantidad - thumbnail = "" - if not thumbnail: - thumbnail = scrapertools.find_single_match(data,'<img src="([^"]+)" alt="%s"> % title') - itemlist.append(item.clone(title=title, url=url, action="videos", thumbnail=thumbnail)) - return sorted(itemlist, key=lambda i: i.title) - - -def videos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="mvhdico"><span>([^<]+)</span>.*?' - patron += '<a href="([^"]+)" title="([^"]+)" id="[^"]+">.*?' - patron += 'src="([^"]+)"[^>]+>.*?' - patron += '<div class="mbtim">([^<]+)</div>' - matches = re.compile(patron, re.DOTALL).findall(data) - for quality, url, title, thumbnail, duration in matches: - title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" + quality + "[/COLOR] " +title - itemlist.append(item.clone(title=title, url=urlparse.urljoin(item.url, url), - action="play", thumbnail=thumbnail, contentThumbnail=thumbnail, - contentType="movie", contentTitle=title)) - # Paginador - next_page = scrapertools.find_single_match(data,"<a href='([^']+)' class='nmnext' title='Next page'>") - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="videos", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def int_to_base36(num): - """Converts a positive integer into a base36 string.""" - assert num >= 0 - digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'.lower() - - res = '' - while not res or num > 0: - num, i = divmod(num, 36) - res = digits[i] + res - return res - - -def play(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - - patron = "EP: { vid: '([^']+)', hash: '([^']+)'" - - vid, hash = re.compile(patron, re.DOTALL).findall(data)[0] - - hash = int_to_base36(int(hash[0:8], 16)) + int_to_base36(int(hash[8:16], 16)) + int_to_base36( - int(hash[16:24], 16)) + int_to_base36(int(hash[24:32], 16)) - - url = "https://www.eporner.com/xhr/video/%s?hash=%s" % (vid, hash) - jsondata = httptools.downloadpage(url).json - - for source in jsondata["sources"]["mp4"]: - url = jsondata["sources"]["mp4"][source]["src"] - title = source.split(" ")[0] - - itemlist.append(["%s %s [directo]" % (title, url[-4:]), url]) - - return sorted(itemlist, key=lambda i: int(i[0].split("p")[0])) diff --git a/channels/porn/eroticage.json b/channels/porn/eroticage.json deleted file mode 100644 index e45b4f9b..00000000 --- a/channels/porn/eroticage.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "eroticage", - "name": "eroticage", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://eroticage.net/wp-content/themes/oz-movie-v3/img/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/eroticage.py b/channels/porn/eroticage.py deleted file mode 100644 index fab51da2..00000000 --- a/channels/porn/eroticage.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://www.eroticage.net' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data,'<h2>TAGS</h2>(.*?)<div class="sideitem"') - patron = '<a href="(.*?)".*?>(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="titleFilm"><a href="([^"]+)">([^"]+)</a>.*?src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - contentTitle = scrapedtitle - title = scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail, - plot=plot, fanart=scrapedthumbnail, contentTitle=contentTitle )) - next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - data = httptools.downloadpage(item.url).data - itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: - videoitem.title = item.title - videoitem.thumbnail = item.thumbnail - videochannel=item.channel - return itemlist - diff --git a/channels/porn/eroticasonlinetv.json b/channels/porn/eroticasonlinetv.json deleted file mode 100644 index fdcdd76f..00000000 --- a/channels/porn/eroticasonlinetv.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "id": "eroticasonlinetv", - "name": "eroticasonlinetv", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.peliculaseroticasonline.tv/wp-content/themes/wpeliculaseroticasonlinetv/favicon.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - - diff --git a/channels/porn/eroticasonlinetv.py b/channels/porn/eroticasonlinetv.py deleted file mode 100644 index b3d7f3d6..00000000 --- a/channels/porn/eroticasonlinetv.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import re -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger -from platformcode import config - -host = 'http://www.peliculaseroticasonline.tv' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)".*?>([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="movie-poster"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - plot = "" - url = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=plot, contentTitle = scrapedtitle) ) - next_page = scrapertools.find_single_match(data, '<div class="naviright"><a href="([^"]+)">Siguiente »</a>') - if next_page: - next_page = urlparse.urljoin(item.url, next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page )) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"') - url = urlparse.urljoin(item.url, url) - data = httptools.downloadpage(url).data - itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: - videoitem.title = item.title - videoitem.fulltitle = item.fulltitle - videoitem.thumbnail = item.thumbnail - videoitem.channel = item.channel - return itemlist - diff --git a/channels/porn/erotik.json b/channels/porn/erotik.json deleted file mode 100644 index 8724bd2f..00000000 --- a/channels/porn/erotik.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "erotik", - "name": "Erotik", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.youfreeporntube.com/uploads/custom-logo.png", - "banner": "http://www.youfreeporntube.com/uploads/custom-logo.png", - "categories": [ - "adult" - ], - "settings": [ - ] -} diff --git a/channels/porn/erotik.py b/channels/porn/erotik.py deleted file mode 100644 index 55f089fb..00000000 --- a/channels/porn/erotik.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger - -host = "https://www.youfreeporntube.net" - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos", - url= host + "/newvideos.html?&page=1")) - itemlist.append(Item(channel=item.channel, action="lista", title="Populares", - url=host + "/topvideos.html?page=1")) - itemlist.append( - Item(channel=item.channel, action="categorias", title="Categorias", url=host + "/browse.html")) - - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", - url=host + "/search.php?keywords=")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = "{0}{1}".format(item.url, texto) - try: - return lista(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}", "", data) - patron = '<div class="pm-li-category"><a href="([^"]+)">.*?.<h3>(.*?)</h3></a>' - matches = re.compile(patron, re.DOTALL).findall(data) - for url, actriz in matches: - itemlist.append(Item(channel=item.channel, action="lista", title=actriz, url=url)) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}", "", data) - patron = '<li><div class=".*?' - patron += '<a href="([^"]+)".*?' - patron += '<img src="([^"]+)".*?alt="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - itemlist = [] - for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - url = urlparse.urljoin(item.url, scrapedurl) - thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) - title = scrapedtitle.strip() - itemlist.append(Item(channel=item.channel, action="play", thumbnail=thumbnail, fanart=thumbnail, title=title, - url=url, - viewmode="movie", folder=True)) - paginacion = scrapertools.find_single_match(data, - '<li class="active">.*?</li>.*?<a href="([^"]+)">') - if paginacion: - paginacion = urlparse.urljoin(item.url,paginacion) - itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente", - url= paginacion)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - url = scrapertools.find_single_match(data, '<div id="video-wrapper">.*?<iframe.*?src="([^"]+)"') - itemlist.append(item.clone(action="play", title=url, url=url )) - itemlist = servertools.get_servers_itemlist(itemlist) - return itemlist - - diff --git a/channels/porn/fapality.json b/channels/porn/fapality.json deleted file mode 100644 index 564fc8d4..00000000 --- a/channels/porn/fapality.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "fapality", - "name": "fapality", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://i.imgur.com/Orguh85.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/fapality.py b/channels/porn/fapality.py deleted file mode 100644 index d0527c27..00000000 --- a/channels/porn/fapality.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://fapality.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/newest/")) - itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/popular/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top/")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/")) - itemlist.append( Item(channel=item.channel, title="PornStar" , action="categorias", url=host + "/pornstars/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="item"><a href="([^"]+)" title="([^"]+)">.*?' - patron += '<img src="([^"]+)">.*?' - patron += '<div class="right">([^"]+)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle.replace("movies", "") + " (" + cantidad + ")" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) - if "/categories/" in item.url: - itemlist = sorted(itemlist, key=lambda i: i.title) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li class="masonry-item item ".*?' - patron += '<a href="([^"]+)" class="kt_imgrc popfire" title="([^"]+)" >.*?' - patron += '<img src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - title = scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle) ) - next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<source id="video_source_1" src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl in matches: - url = scrapedurl - itemlist.append(item.clone(action="play", title=url, contentTitle = item.title, url=url)) - return itemlist - diff --git a/channels/porn/fetishshrine.json b/channels/porn/fetishshrine.json deleted file mode 100644 index 1aa17d67..00000000 --- a/channels/porn/fetishshrine.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "fetishshrine", - "name": "fetishshrine", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.fetishshrine.com/images/foot-logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/fetishshrine.py b/channels/porn/fetishshrine.py deleted file mode 100644 index a0a3971d..00000000 --- a/channels/porn/fetishshrine.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.fetishshrine.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/latest-updates/")) - itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Mejor Valorado" , action="lista", url=host + "/top-rated/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)" title="([^"]+)">.*?' - patron += '<img src="([^"]+)".*?' - patron += '<span class="vids">(\d+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)" itemprop="url">.*?' - patron += '<img src="([^"]+)" alt="([^"]+)">.*?' - patron += '<span itemprop="duration" class="length">(.*?)</span>(.*?)<span class="thumb-info">' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,duracion,calidad in matches: - url = scrapedurl - if ">HD<" in calidad: - title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " +scrapedtitle - else: - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - contentTitle = scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - plot=plot, fanart=scrapedthumbnail, contentTitle = contentTitle )) - next_page = scrapertools.find_single_match(data,'<li><a data=\'\d+\' href="([^"]+)" title="Next">') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista" , title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = 'video_url: \'([^\']+)\'' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/filmoviXXX.json b/channels/porn/filmoviXXX.json deleted file mode 100644 index 051c0fbb..00000000 --- a/channels/porn/filmoviXXX.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "filmoviXXX", - "name": "filmoviXXX", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.filmovix.net/wp-content/themes/Video/skins/1-default/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/filmoviXXX.py b/channels/porn/filmoviXXX.py deleted file mode 100644 index 5993b540..00000000 --- a/channels/porn/filmoviXXX.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -# BLOQUEO ESET INTERNET SECURITY -def mainlist(item): - logger.info() - itemlist = [] - if item.url=="": - item.url = "http://www.filmovix.net/videoscategory/porno/" - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data,'<h1 class="cat_head">XXX</h1>(.*?)<h3> Novo dodato </h3>') - patron = '<li class="clearfix">.*?' - patron += 'src="([^"]+)".*?' - patron += '<p class="title"><a href="([^"]+)" rel="bookmark" title="([^"]+)">' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedurl,scrapedtitle in matches: - contentTitle = scrapedtitle - title = scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle=contentTitle)) - next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append(item.clone(action="mainlist", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) - return itemlist - - -def play(item): - logger.info() - data = httptools.downloadpage(item.url).data - itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: - videoitem.title = item.title - videoitem.thumbnail = item.thumbnail - videoitem.channel = item.channel - return itemlist - diff --git a/channels/porn/filmpornoita.json b/channels/porn/filmpornoita.json deleted file mode 100644 index 27329265..00000000 --- a/channels/porn/filmpornoita.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "filmpornoita", - "name": "filmpornoita", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.filmpornoita.net/wp-content/uploads/2015/01/header1.jpg", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/filmpornoita.py b/channels/porn/filmpornoita.py deleted file mode 100644 index 30171546..00000000 --- a/channels/porn/filmpornoita.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import re -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger -from platformcode import config - -host = 'http://www.streamxxxx.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url= host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host)) - return itemlist - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href=\'([^\']+)\' class=\'tag-link-.*?>([^<]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="post" id="post-\d+">.*?' - patron += '<a href="([^"]+)" title="([^"]+)">.*?' - patron += 'src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=plot , viewmode="movie") ) - next_page = scrapertools.find_single_match(data,'<a href="([^"]+)">Next') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - diff --git a/channels/porn/foxtube.json b/channels/porn/foxtube.json deleted file mode 100644 index 6d58342d..00000000 --- a/channels/porn/foxtube.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "foxtube", - "name": "foxtube", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://fxtimg.com/xlogo_.png.pagespeed.ic.doVRQMV5ub.png|Referer=http://es.foxtube.com", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/foxtube.py b/channels/porn/foxtube.py deleted file mode 100644 index ae2fae11..00000000 --- a/channels/porn/foxtube.py +++ /dev/null @@ -1,123 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://es.foxtube.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="PornStar" , action="catalogo", url=host + '/actrices/')) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/buscador/%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a class="tco5" href="([^"]+)">.*?' - patron += 'data-origen="([^"]+)" alt="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - # <a class="bgco2 tco3" rel="next" href="/actrices/2/">></a> - next_page = scrapertools.find_single_match(data,'<a class="bgco2 tco3" rel="next" href="([^"]+)">></a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista" , title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li class="bgco1"><a class="tco2" href="([^"]+)">([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - if "/actrices/" in item.url: - data=scrapertools.find_single_match(data,'<section class="container">(.*?)>Actrices similares</h3>') - patron = '<a class="thumb tco1" href="([^"]+)">.*?' - patron += 'src="([^"]+)".*?' - patron += 'alt="([^"]+)".*?' - patron += '<span class="t">(.*?)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches: - url = urlparse.urljoin(item.url,scrapedurl) - contentTitle = scrapedtitle - time = scrapertools.find_single_match(duracion, '<i class="m tc2">([^"]+)</i>') - if not 'HD' in duracion : - title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle - else: - title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail + "|Referer=%s" %host - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<a class="bgco2 tco3" rel="next" href="([^"]+)">></a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista" , title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - url = scrapertools.find_single_match(data,'<iframe title="video" src="([^"]+)"') - url = url.replace("https://flashservice.xvideos.com/embedframe/", "https://www.xvideos.com/video") + "/" - data = httptools.downloadpage(url).data - patron = 'html5player.setVideoHLS\\(\'([^\']+)\'' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - scrapedurl = scrapedurl.replace("\/", "/") - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/freeporn.json b/channels/porn/freeporn.json deleted file mode 100644 index a6fbb792..00000000 --- a/channels/porn/freeporn.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "freeporn", - "name": "freeporn", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://frprn.com/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/freeporn.py b/channels/porn/freeporn.py deleted file mode 100644 index c2ef1df2..00000000 --- a/channels/porn/freeporn.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://frprn.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-raped/")) - itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/%s/" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li class="thumb thumb-\w+">.*?' - patron += '<a href="([^"]+)">.*?' - patron += '<img class="lazy" data-original="([^"]+)".*?' - patron += '<div class="title">(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - title = scrapertools.find_single_match(scrapedtitle,'<div class="text">([^<]+)<') - if "/categories/" in item.url: - cantidad = scrapertools.find_single_match(scrapedtitle,'<div class="count">(\d+)</div>') - scrapedtitle = scrapertools.find_single_match(scrapedtitle,'<div class="name">([^<]+)</div>') - title = scrapedtitle + " (" + cantidad + ")" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="thumb">.*?' - patron += '<a href="([^"]+)".*?' - patron += '<img class="lazy" data-original="([^"]+)" alt="([^"]+)".*?' - patron += '<span class="duration">([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - year = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<meta property="og:video" content="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - title = scrapedurl - itemlist.append(item.clone(action="play", title=title, contentTitle = scrapedurl, url=scrapedurl)) - return itemlist - diff --git a/channels/porn/freepornstreams.json b/channels/porn/freepornstreams.json deleted file mode 100644 index f3bf0f7a..00000000 --- a/channels/porn/freepornstreams.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "freepornstreams", - "name": "freepornstreams", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://freepornstreams.org/wp-content/uploads/2016/11/FPS_Logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/freepornstreams.py b/channels/porn/freepornstreams.py deleted file mode 100644 index f5eae9c7..00000000 --- a/channels/porn/freepornstreams.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://freepornstreams.org' #es http://xxxstreams.org - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/free-full-porn-movies/")) - itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/free-stream-porn/")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - if item.title == "Categorias" : - data = scrapertools.find_single_match(data,'>Top Tags(.*?)</ul>') - else: - data = scrapertools.find_single_match(data,'>Top Sites</a>(.*?)</aside>') - patron = '<a href="([^"]+)">(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - if not "Featured" in scrapedtitle: - scrapedplot = "" - scrapedthumbnail = "" - scrapedurl = scrapedurl.replace ("http://freepornstreams.org/freepornst/stout.php?s=100,75,65:*&u=" , "") - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<article id="post-\d+".*?' - patron += '<a href="([^"]+)" rel="bookmark">(.*?)</a>.*?' - patron += '<img src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - if '/HD' in scrapedtitle : title= "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle - elif 'SD' in scrapedtitle : title= "[COLOR red]" + "SD" + "[/COLOR] " + scrapedtitle - elif 'FullHD' in scrapedtitle : title= "[COLOR red]" + "FullHD" + "[/COLOR] " + scrapedtitle - elif '1080' in scrapedtitle : title= "[COLOR red]" + "1080p" + "[/COLOR] " + scrapedtitle - else: title = scrapedtitle - thumbnail = scrapedthumbnail.replace("jpg#", "jpg") - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle=title) ) - next_page = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def findvideos(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) - patron = '<a href="([^"]+)" rel="nofollow"[^<]+>(?:Streaming|Download)' - matches = scrapertools.find_multiple_matches(data, patron) - for url in matches: - if not "ubiqfile" in url: - itemlist.append(item.clone(action='play',title="%s", url=url)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist diff --git a/channels/porn/gotporn.json b/channels/porn/gotporn.json deleted file mode 100755 index 96df587b..00000000 --- a/channels/porn/gotporn.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "gotporn", - "name": "gotporn", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://cdn2-static-cf.gotporn.com/desktop/img/gotporn-logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/gotporn.py b/channels/porn/gotporn.py deleted file mode 100755 index fc42255d..00000000 --- a/channels/porn/gotporn.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.gotporn.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/?page=1")) - itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated?page=1")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-viewed?page=1")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/longest?page=1")) - - itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels?page=1")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/results?search_query=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - - - patron = '<a href="([^"]+)">' - patron += '<span class="text">([^<]+)</span>' - patron += '<span class="num">([^<]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedtitle = "%s %s" % (scrapedtitle,cantidad) - scrapedurl = scrapedurl + "?page=1" - thumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=thumbnail , plot=scrapedplot) ) - return itemlist - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - logger.debug(data) - patron = '<header class="clearfix" itemscope>.*?' - patron += '<a href="([^"]+)".*?' - patron += '<img src="([^"]+)" alt="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - scrapedurl = scrapedurl + "?page=1" - thumbnail = "https:" + scrapedthumbnail - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=thumbnail , plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="btn btn-secondary"><span class="text">Next') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<li class="video-item poptrigger".*?' - patron += 'href="([^"]+)" data-title="([^"]+)".*?' - patron += '<span class="duration">(.*?)</span>.*?' - patron += 'src=\'([^\']+)\'.*?' - patron += '<h3 class="video-thumb-title(.*?)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedtime,scrapedthumbnail,quality in matches: - scrapedtime = scrapedtime.strip() - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - if quality: - title = "[COLOR yellow]%s[/COLOR] [COLOR red]HD[/COLOR] %s" % (scrapedtime,scrapedtitle) - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail, - fanart=thumbnail, plot=plot,)) - next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="btn btn-secondary') - if "categories" in item.url: - next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="btn btn-secondary paginate-show-more') - if "search_query" in item.url: - next_page = scrapertools.find_single_match(data, '<link rel=\'next\' href="([^"]+)">') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<source src="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for url in matches: - url += "|Referer=%s" % host - itemlist.append(item.clone(action="play", title = item.title, url=url )) - return itemlist - diff --git a/channels/porn/hclips.json b/channels/porn/hclips.json deleted file mode 100644 index 8207dd3d..00000000 --- a/channels/porn/hclips.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "hclips", - "name": "hclips", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.hclips.com/apple-touch-icon-152x152.png?v=3", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/hclips.py b/channels/porn/hclips.py deleted file mode 100644 index 9ae7ce89..00000000 --- a/channels/porn/hclips.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://www.hclips.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/")) - itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/?")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/?")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?q=%s" % texto - try: - return peliculas(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)" class="thumb">.*?' - patron += 'src="([^"]+)".*?' - patron += '<strong class="title">([^"]+)</strong>.*?' - patron += '<b>(.*?)</b>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches: - scrapedplot = "" - title = scrapedtitle + " \(" + vidnum + "\)" - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def peliculas(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a href="([^"]+)" class="thumb">.*?' - patron += '<img src="([^"]+)" alt="([^"]+)".*?' - patron += '<span class="dur">(.*?)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches: - title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - thumbnail=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" title="Next Page">Next</a>') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"') - video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"') - partes = video_url.split('||') - video_url = decode_url(partes[0]) - video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url) - video_url += '&' if '?' in video_url else '?' - video_url += 'lip=' + partes[2] + '<=' + partes[3] - itemlist.append(item.clone(action="play", title=item.title, url=video_url)) - return itemlist - - -def decode_url(txt): - _0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~' - reto = ''; n = 0 - # En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes) - txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt) - txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M') - - while n < len(txt): - a = _0x52f6x15.index(txt[n]) - n += 1 - b = _0x52f6x15.index(txt[n]) - n += 1 - c = _0x52f6x15.index(txt[n]) - n += 1 - d = _0x52f6x15.index(txt[n]) - n += 1 - - a = a << 2 | b >> 4 - b = (b & 15) << 4 | c >> 2 - e = (c & 3) << 6 | d - reto += chr(a) - if c != 64: reto += chr(b) - if d != 64: reto += chr(e) - - return urllib.unquote(reto) - diff --git a/channels/porn/hdzog.json b/channels/porn/hdzog.json deleted file mode 100644 index fe66dbd5..00000000 --- a/channels/porn/hdzog.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "hdzog", - "name": "hdzog", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.hdzog.com/apple-touch-icon-120x120.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/hdzog.py b/channels/porn/hdzog.py deleted file mode 100644 index 09ef0158..00000000 --- a/channels/porn/hdzog.py +++ /dev/null @@ -1,119 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://www.hdzog.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/new/")) - itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular/")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/longest/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data,'<ul class="cf">(.*?)</ul>') - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li>.*?<a href="([^"]+)".*?' - patron += '<img class="thumb" src="([^"]+)" alt="([^"]+)".*?' - patron += '<span class="videos-count">(\d+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches: - scrapedplot = "" - - url= scrapedurl + "?sortby=post_date" - title = scrapedtitle + " \(" + vidnum + "\)" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data,'<ul class="cf">(.*?)<h2>Advertisement</h2>') - patron = '<li>.*?<a href="([^"]+)".*?' - patron += 'src="([^"]+)" alt="([^"]+)".*?' - patron += '<span class="time">(.*?)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches: - contentTitle = scrapedtitle - title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - thumbnail=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" title="Next Page" data-page-num="\d+">Next page »</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info(item) - itemlist = [] - data = httptools.downloadpage(item.url).data - video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"') - video_url += scrapertools.find_single_match(data, 'video_url\+="([^"]*)"') - partes = video_url.split('||') - video_url = decode_url(partes[0]) - video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url) - video_url += '&' if '?' in video_url else '?' - video_url += 'lip=' + partes[2] + '<=' + partes[3] - itemlist.append(item.clone(action="play", title=item.title, url=video_url)) - return itemlist - - -def decode_url(txt): - _0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~' - reto = ''; n = 0 - # En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes) - txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt) - txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M') - - while n < len(txt): - a = _0x52f6x15.index(txt[n]) - n += 1 - b = _0x52f6x15.index(txt[n]) - n += 1 - c = _0x52f6x15.index(txt[n]) - n += 1 - d = _0x52f6x15.index(txt[n]) - n += 1 - - a = a << 2 | b >> 4 - b = (b & 15) << 4 | c >> 2 - e = (c & 3) << 6 | d - reto += chr(a) - if c != 64: reto += chr(b) - if d != 64: reto += chr(e) - - return urllib.unquote(reto) - diff --git a/channels/porn/hellporno.json b/channels/porn/hellporno.json deleted file mode 100644 index bbb61486..00000000 --- a/channels/porn/hellporno.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "hellporno", - "name": "hellporno", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://hellporno.com/apple-touch-icon-152x152.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/hellporno.py b/channels/porn/hellporno.py deleted file mode 100644 index 0dab3be3..00000000 --- a/channels/porn/hellporno.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://hellporno.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/?page=1")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)">.*?' - patron += '<img src="([^"]+)" alt="([^"]+) - Porn videos">.*?' - patron += '<span>(\d+) videos</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page »</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="categorias" , title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="video-thumb"><a href="([^"]+)" class="title".*?>([^"]+)</a>.*?' - patron += '<span class="time">([^<]+)</span>.*?' - patron += '<video muted poster="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,duracion,scrapedthumbnail in matches: - url = scrapedurl - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page »</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data,'<source data-fluid-hd src="([^"]+)/?br=\d+"') - if scrapedurl=="": - scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)/?br=\d+"') - itemlist.append(item.clone(action="play", title=scrapedurl, url=scrapedurl)) - return itemlist - diff --git a/channels/porn/hentaiid.json b/channels/porn/hentaiid.json deleted file mode 100644 index 0519590a..00000000 --- a/channels/porn/hentaiid.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "hentaiid", - "name": "Hentai ID", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.hentai-id.tv/wp-content/themes/moviescript/assets/img/logo.png", - "banner": "http://www.hentai-id.tv/wp-content/themes/moviescript/assets/img/background.jpg", - "categories": [ - "adult" - ] -} diff --git a/channels/porn/hentaiid.py b/channels/porn/hentaiid.py deleted file mode 100644 index 495fae4d..00000000 --- a/channels/porn/hentaiid.py +++ /dev/null @@ -1,147 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger - -CHANNEL_HOST = "http://hentai-id.tv/" - - -def mainlist(item): - logger.info() - - itemlist = list() - itemlist.append(Item(channel=item.channel, action="series", title="Novedades", - url=urlparse.urljoin(CHANNEL_HOST, "archivos/h2/"), extra="novedades")) - itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", url=CHANNEL_HOST)) - itemlist.append(Item(channel=item.channel, action="series", title="Sin Censura", - url=urlparse.urljoin(CHANNEL_HOST, "archivos/sin-censura/"))) - itemlist.append(Item(channel=item.channel, action="series", title="High Definition", - url=urlparse.urljoin(CHANNEL_HOST, "archivos/high-definition/"))) - itemlist.append(Item(channel=item.channel, action="series", title="Mejores Hentais", - url=urlparse.urljoin(CHANNEL_HOST, "archivos/ranking-hentai/"))) - - return itemlist - - -def generos(item): - logger.info() - - itemlist = [] - data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data) - - pattern = 'id="hentai2"><div[^>]+>(.*?)</div></div>' - data = scrapertools.find_single_match(data, pattern) - - patron = 'href="([^"]+)"[^>]+>(.*?)</a>' - matches = re.compile(patron, re.DOTALL).findall(data) - - for url, title in matches: - # logger.debug("title=[{0}], url=[{1}]".format(title, url)) - itemlist.append(Item(channel=item.channel, action="series", title=title, url=url)) - - return itemlist - - -def series(item): - logger.info() - - data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data) - - pattern = "<div class='wp-pagenavi'>(.*?)</div>" - pagination = scrapertools.find_single_match(data, pattern) - - pattern = '<div class="col-xs-12 col-md-12 col-lg-9px-3"><ul>(.*?)</ul><div class="clearfix">' - data = scrapertools.find_single_match(data, pattern) - - pattern = '<a href="([^"]+)".*?<img src="([^"]+)" title="([^"]+)"' - matches = re.compile(pattern, re.DOTALL).findall(data) - itemlist = [] - - if item.extra == "novedades": - action = "findvideos" - else: - action = "episodios" - - for url, thumbnail, title in matches: - contentTitle = title - show = title - # logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) - itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, - show=show, fanart=thumbnail, folder=True)) - - if pagination: - page = scrapertools.find_single_match(pagination, '>(?:Page|Página)\s*(\d+)\s*(?:of|de)\s*\d+<') - pattern = 'href="([^"]+)">%s<' % (int(page) + 1) - url_page = scrapertools.find_single_match(pagination, pattern) - - if url_page: - itemlist.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_page)) - - return itemlist - - -def episodios(item): - logger.info() - itemlist = [] - - data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data) - pattern = '<div class="box-entry-title text-center">Lista de Capítulos</div>(.*?)</div></div>' - - data = scrapertools.find_single_match(data, pattern) - patron = '<a href="([^"]+)"[^>]+>([^<]+)</a>' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedtitle in matches: - title = scrapertools.unescape(scrapedtitle) - url = urlparse.urljoin(item.url, scrapedurl) - thumbnail = item.thumbnail - plot = item.plot - - # logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, - thumbnail=thumbnail, plot=plot, - fanart=thumbnail)) - - return itemlist - - -def findvideos(item): - logger.info() - - data = httptools.downloadpage(item.url).data - video_urls = [] - down_urls = [] - patron = '<(?:iframe)?(?:IFRAME)?\s*(?:src)?(?:SRC)?="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - - for url in matches: - if 'goo.gl' in url or 'tinyurl' in url: - video = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers["location"] - video_urls.append(video) - else: - video_urls.append(url) - paste = scrapertools.find_single_match(data, 'https://gpaste.us/([a-zA-Z0-9]+)') - if paste: - try: - new_data = httptools.downloadpage('https://gpaste.us/'+paste).data - - bloq = scrapertools.find_single_match(new_data, 'id="input_text">(.*?)</div>') - matches = bloq.split('<br>') - for url in matches: - down_urls.append(url) - except: - pass - video_urls.extend(down_urls) - from core import servertools - itemlist = servertools.find_video_items(data=",".join(video_urls)) - for videoitem in itemlist: - videoitem.contentTitle = item.contentTitle - videoitem.channel = item.channel - videoitem.thumbnail = item.thumbnail - - return itemlist diff --git a/channels/porn/hotmovs.json b/channels/porn/hotmovs.json deleted file mode 100644 index dd056521..00000000 --- a/channels/porn/hotmovs.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "hotmovs", - "name": "hotmovs", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://hotmovs.com/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/hotmovs.py b/channels/porn/hotmovs.py deleted file mode 100644 index 37ec2078..00000000 --- a/channels/porn/hotmovs.py +++ /dev/null @@ -1,147 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://hotmovs.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/latest-updates/")) - itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/most-popular/?sort_by=video_viewed_week")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/?sort_by=rating_week")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/?sort_by=cs_viewed")) - itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/models/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/?sort_by=title")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a class="thumbnail" href="([^"]+)">.*?' - patron += '<img src="([^"]+)".*?' - patron += '<span class="thumbnail__info__right">\s+([^"]+)\s+</span>.*?' - patron += '<h5>([^"]+)</h5>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches: - scrapedplot = "" - cantidad = cantidad.replace(" ", "") - scrapedtitle = scrapedtitle + " (" + cantidad +")" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a class="thumbnail" href="([^"]+)" title="([^"]+)">.*?' - patron += '<img src="([^"]+)".*?' - patron += '<i class="mdi mdi-video"></i>([^"]+)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: - scrapedplot = "" - cantidad = cantidad.replace(" ", "") - scrapedtitle = scrapedtitle + " (" + cantidad +")" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<article class="item" data-video-id="([^"]+)">.*?src="([^"]+)" alt="([^"]+)".*?<div class="thumbnail__info__right">(.*?)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches: - url = urlparse.urljoin(item.url,"/embed/" + scrapedurl) - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - plot=plot, contentTitle = title)) - next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"') - video_url += scrapertools.find_single_match(data, 'video_url\+=\'([^\']+)\'') - partes = video_url.split('||') - video_url = decode_url(partes[0]) - video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url) - video_url += '&' if '?' in video_url else '?' - video_url += 'lip=' + partes[2] + '<=' + partes[3] - itemlist.append(item.clone(action="play", title=item.title, url=video_url)) - return itemlist - - -def decode_url(txt): - _0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~' - reto = ''; n = 0 - # En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes) - txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt) - txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M') - - while n < len(txt): - a = _0x52f6x15.index(txt[n]) - n += 1 - b = _0x52f6x15.index(txt[n]) - n += 1 - c = _0x52f6x15.index(txt[n]) - n += 1 - d = _0x52f6x15.index(txt[n]) - n += 1 - - a = a << 2 | b >> 4 - b = (b & 15) << 4 | c >> 2 - e = (c & 3) << 6 | d - reto += chr(a) - if c != 64: reto += chr(b) - if d != 64: reto += chr(e) - - return urllib.unquote(reto) - diff --git a/channels/porn/javlin.json b/channels/porn/javlin.json deleted file mode 100644 index 31aa65d0..00000000 --- a/channels/porn/javlin.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "javlin", - "name": "javlin", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://javl.in/wp-content/uploads/2015/07/favicon1.ico", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/javlin.py b/channels/porn/javlin.py deleted file mode 100644 index 6ab1d1f7..00000000 --- a/channels/porn/javlin.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import re -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger -from platformcode import config - -host = 'http://javl.in' - -# BLOQUEO ANTIVIRUS - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="lista" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar" , action="search")) - return itemlist - - -def search(item, texto): - logger.info("pelisalacarta.gmobi mainlist") - texto = texto.replace(" ", "+") - item.url = host+ "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<option class="level-0" value="([^"]+)">([^"]+)  \((.*?)\)<' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,number in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = str(scrapedtitle) + " ("+ str(number) + ")" - scrapedurl = "http://javl.in/?cat=" + scrapedurl - thumbnail = urlparse.urljoin(item.url,scrapedthumbnail) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - scrapedplot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<span class=\'currenttext\'>.*?href=\'([^\']+)\' class=\'inactive\'') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel , action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - diff --git a/channels/porn/javtasty.json b/channels/porn/javtasty.json deleted file mode 100644 index 82151579..00000000 --- a/channels/porn/javtasty.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "id": "javtasty", - "name": "JavTasty", - "language": ["*"], - "active": true, - "adult": true, - "thumbnail": "http://i.imgur.com/OTYwbAa.png?1", - "categories": [ - "adult" - ], - "settings": [ - { - "id": "menu_info", - "type": "bool", - "label": "Mostrar menú antes de reproducir con imágenes", - "default": true, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/channels/porn/javtasty.py b/channels/porn/javtasty.py deleted file mode 100644 index 46c41026..00000000 --- a/channels/porn/javtasty.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- - -import urlparse - -from core import httptools -from core import scrapertools -from platformcode import config, logger - -host = "https://www.javwhores.com" - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/")) - itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/")) - itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/")) - itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/")) - itemlist.append(item.clone(title="Buscar...", action="search")) - itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - return itemlist - - -def configuracion(item): - from platformcode import platformtools - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def search(item, texto): - logger.info() - item.url = "%s/search/%s/" % (host, texto) - item.extra = texto - try: - return lista(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - action = "play" - if config.get_setting("menu_info", "javtasty"): - action = "menu_info" - # PURGA los PRIVATE - patron = 'div class="video-item\s+".*?href="([^"]+)".*?' - patron += 'data-original="([^"]+)" ' - patron += 'alt="([^"]+)"(.*?)fa fa-clock-o"></i>([^<]+)<' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedthumbnail, scrapedtitle, quality, duration in matches: - scrapedurl = urlparse.urljoin(host, scrapedurl) - scrapedtitle = scrapedtitle.strip() - if duration: - scrapedtitle = "%s - %s" % (duration.strip(), scrapedtitle) - if '>HD<' in quality: - scrapedtitle += " [COLOR red][HD][/COLOR]" - itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - fanart=scrapedthumbnail)) - # Extrae la marca de siguiente página - next_page = scrapertools.find_single_match(data, 'next"><a href="([^"]+)') - if next_page: - itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=host + next_page)) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '(?s)<a class="item" href="([^"]+)".*?' - patron += 'src="([^"]+)" ' - patron += 'alt="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - scrapedurl = urlparse.urljoin(host, scrapedurl) - scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail) - itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - fanart=scrapedthumbnail)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - videourl = scrapertools.find_single_match(data, "video_alt_url2:\s*'([^']+)'") - if videourl: - itemlist.append(['.mp4 HD [directo]', videourl]) - videourl = scrapertools.find_single_match(data, "video_alt_url:\s*'([^']+)'") - if videourl: - itemlist.append(['.mp4 HD [directo]', videourl]) - videourl = scrapertools.find_single_match(data, "video_url:\s*'([^']+)'") - if videourl: - itemlist.append(['.mp4 [directo]', videourl]) - if item.extra == "play_menu": - return itemlist, data - return itemlist - - -def menu_info(item): - logger.info() - itemlist = [] - video_urls, data = play(item.clone(extra="play_menu")) - itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls)) - matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)" class="item" rel="screenshots"') - for i, img in enumerate(matches): - if i == 0: - continue - title = "Imagen %s" % (str(i)) - itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img)) - return itemlist diff --git a/channels/porn/javus.json b/channels/porn/javus.json deleted file mode 100644 index 17595685..00000000 --- a/channels/porn/javus.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "javus", - "name": "javus.net", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://s15.postimg.cc/pzd3h4vy3/javus.png", - "banner": "https://s21.postimg.cc/5pqzedp2f/javus_banner.png", - "categories": [ - "adult" - ], - "settings": [ - ] -} \ No newline at end of file diff --git a/channels/porn/javus.py b/channels/porn/javus.py deleted file mode 100644 index 83979eb7..00000000 --- a/channels/porn/javus.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger - -host = 'http://javus.net/' - - -def mainlist(item): - if item.url == "": - item.url = host - - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a href="([^"]+)" title="([^"]+)" rel="nofollow" class="post-image post-image-left".*?\s*<div class="featured-thumbnail"><img width="203" height="150" src="([^"]+)" class="attachment-featured size-featured wp-post-image" alt="" title="" \/><\/div>' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedtitle, scrapedthumbnail in matches: - url = scrapedurl - title = scrapedtitle.decode('utf-8') - thumbnail = scrapedthumbnail - fanart = '' - - itemlist.append( - Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail)) - - # Paginacion - title = '' - siguiente = scrapertools.find_single_match(data, "<a rel='nofollow' href='([^']+)' class='inactive'>Next <") - ultima = scrapertools.find_single_match(data, "<a rel='nofollow' class='inactive' href='([^']+)'>Last <") - if siguiente != ultima: - titlen = 'Pagina Siguiente >>> ' - fanart = '' - itemlist.append(Item(channel=item.channel, action="mainlist", title=titlen, url=siguiente, fanart=fanart)) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = item.url + texto - - if texto != '': - return todas(item) - else: - return [] diff --git a/channels/porn/javwhores.json b/channels/porn/javwhores.json deleted file mode 100644 index 46a2bf11..00000000 --- a/channels/porn/javwhores.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "javwhores", - "name": "javwhores", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.javwhores.com/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/javwhores.py b/channels/porn/javwhores.py deleted file mode 100644 index 6746f9d9..00000000 --- a/channels/porn/javwhores.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - - -host = 'https://www.javwhores.com/' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated/")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/%s/" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?' - patron += '<img class="thumb" src="([^"]+)".*?' - patron += '<div class="videos">([^"]+)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - scrapedplot = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return sorted(itemlist, key=lambda i: i.title) - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="video-item ">.*?' - patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?' - patron += 'data-original="([^"]+)".*?' - patron += '<span class="ico-fav-1(.*?)<p class="inf">' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches: - url = urlparse.urljoin(item.url,scrapedurl) - time = scrapertools.find_single_match(duracion, '<i class="fa fa-clock-o"></i>([^"]+)</div>') - if not 'HD' in duracion : - title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle - else: - title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - plot=plot, contentTitle = title)) - next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"') - if "#videos" in next_page: - next_page = scrapertools.find_single_match(data, 'data-parameters="sort_by:post_date;from:(\d+)">Next') - next = scrapertools.find_single_match(item.url, '(.*?/)\d+') - next_page = next + "%s/" % next_page - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title= next_page, text_color="blue", url=next_page ) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'') - if scrapedurl == "" : - scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'') - if scrapedurl == "" : - scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'') - if scrapedurl == "" : - scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'') - - itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - - diff --git a/channels/porn/jizzbunker.json b/channels/porn/jizzbunker.json deleted file mode 100644 index 2f764fc9..00000000 --- a/channels/porn/jizzbunker.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "id": "jizzbunker", - "name": "jizzbunker", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://s0.cdn3x.com/jb/i/logo-new.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - - diff --git a/channels/porn/jizzbunker.py b/channels/porn/jizzbunker.py deleted file mode 100644 index d6a22407..00000000 --- a/channels/porn/jizzbunker.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://jizzbunker.com/es' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest")) - itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular1")) - itemlist.append( Item(channel=item.channel, title="Tendencia" , action="peliculas", url=host + "/trending")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search?query=%s/" % texto - try: - return peliculas(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li><figure>.*?<a href="([^"]+)".*?' - patron += '<img class="lazy" data-original="([^"]+)" alt="([^"]+)".*?' - patron += '<span class="score">(\d+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedurl = scrapedurl.replace("channel", "channel30") - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return itemlist - - -def peliculas(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li><figure>.*?<a href="([^"]+)/([^"]+).html".*?' - patron += '<img class="lazy" data-original="([^"]+)".*?' - patron += '<time datetime=".*?">([^"]+)</time>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches: - url = scrapedurl + "/" + scrapedtitle + ".html" - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, - plot=plot, contentTitle = contentTitle)) - next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">→</a>') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = 'type:\'video/mp4\',src:\'([^\']+)\'' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - scrapedurl = scrapedurl.replace("https", "http") - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/justporn.json b/channels/porn/justporn.json deleted file mode 100644 index e0b656a5..00000000 --- a/channels/porn/justporn.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "justporn", - "name": "justporn", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://xxx.justporno.tv/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/justporn.py b/channels/porn/justporn.py deleted file mode 100644 index 50aa533f..00000000 --- a/channels/porn/justporn.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://xxx.justporno.tv' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host + "/latest-updates/1/")) - itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/top-rated/")) - itemlist.append( Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + "/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar" , action="search")) - return itemlist - - -def search(item, texto): - logger.info() - item.url = "%s/search/%s/" % (host, texto.replace("+", "-")) - item.extra = texto - try: - return lista(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?' - patron += '<div class="videos">(\d+) video.*?</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,numero in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = scrapedtitle + " (" + numero + ")" - thumbnail = urlparse.urljoin(item.url,scrapedthumbnail) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - - return sorted(itemlist, key=lambda i: i.title) - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a href="http://xxx.justporno.tv/videos/(\d+)/.*?" title="([^"]+)" >.*?' - patron += 'data-original="([^"]+)".*?' - patron += '<div class="duration">(.*?)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: - scrapedplot = "" - scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle - scrapedurl = "http://xxx.justporno.tv/embed/" + scrapedurl - itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - - if item.extra: - next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(.*?)>') - if next_page: - if "from_videos=" in item.url: - next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url) - else: - next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result"\ - "&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - else: - next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"') - if next_page and not next_page.startswith("#"): - next_page = urlparse.urljoin(host, next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - else: - next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)') - if next_page: - if "from" in item.url: - next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url) - else: - next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list" \ - "&sort_by=post_date&from=%s" % (item.url, next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = 'video_url: \'([^\']+)\'' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl in matches: - scrapedplot = "" - itemlist.append(item.clone(channel=item.channel, action="play", title=item.title , url=scrapedurl , plot="" , folder=True) ) - return itemlist - diff --git a/channels/porn/kingsizetits.json b/channels/porn/kingsizetits.json deleted file mode 100755 index 69493b3e..00000000 --- a/channels/porn/kingsizetits.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "kingsizetits", - "name": "Kingsizetits", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://cdn.images.kingsizetits.com/resources/kingsizetits.com/rwd_5/default/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/kingsizetits.py b/channels/porn/kingsizetits.py deleted file mode 100755 index c8cdc8a3..00000000 --- a/channels/porn/kingsizetits.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://kingsizetits.com' - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/most-recent/")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-viewed-week/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/")) - itemlist.append( Item(channel=item.channel, title="Mas largos" , action="lista", url=host + "/longest/")) - - - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/videos/%s/" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<a href="([^"]+)" class="video-box.*?' - patron += 'src=\'([^\']+)\' alt=\'([^\']+)\'.*?' - patron += 'data-video-count="(\d+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - title = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<script>stat.*?' - patron += '<a href="([^"]+)".*?' - patron += 'src="([^"]+)".*?' - patron += '<span class="video-length">([^<]+)</span>.*?' - patron += '<span class="pic-name">([^<]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtime,scrapedtitle in matches: - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - fanart=thumbnail, thumbnail=thumbnail, plot=plot, contentTitle = scrapedtitle)) - next_page = scrapertools.find_single_match(data, '<a class="btn default-btn page-next page-nav" href="([^"]+)"') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - logger.debug(data) - url = scrapertools.find_single_match(data,'label:"\d+", file\:"([^"]+)"') - itemlist.append(item.clone(action="play", server="directo", url=url )) - return itemlist - - diff --git a/channels/porn/mangovideo.json b/channels/porn/mangovideo.json deleted file mode 100755 index af5b8c23..00000000 --- a/channels/porn/mangovideo.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "mangovideo", - "name": "mangovideo", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://mangovideo.pw/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/mangovideo.py b/channels/porn/mangovideo.py deleted file mode 100755 index 509d2f93..00000000 --- a/channels/porn/mangovideo.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - - -server = {'1': 'https://www.mangovideo.pw/contents/videos', '7' : 'https://server9.mangovideo.pw/contents/videos/', - '8' : 'https://s10.mangovideo.pw/contents/videos/', '9' : 'https://server2.mangovideo.pw/contents/videos/', - '10' : 'https://server217.mangovideo.pw/contents/videos/', '11' : 'https://234.mangovideo.pw/contents/videos/' - } - -host = 'http://mangovideo.pw' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/")) - itemlist.append( Item(channel=item.channel, title="Sitios" , action="categorias", url=host + "/sites/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/%s/" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<a class="item" href="([^"]+)" title="([^"]+)".*?' - patron += '<div class="videos">(\d+) videos</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedthumbnail = "" - title = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - - next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="categorias", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - - - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<div class="item\s+">.*?' - patron += '<a href="([^"]+)" title="([^"]+)".*?' - patron += 'data-original="([^"]+)".*?' - patron += '<div class="duration">([^<]+)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = scrapedtitle)) - next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def play(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) - scrapedtitle = "" - patron = 'video_url: \'function/0/https://mangovideo.pw/get_file/(\d+)/\w+/(.*?)/\'' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedtitle,url in matches: - scrapedtitle = server.get(scrapedtitle, scrapedtitle) - url = scrapedtitle + url - if not scrapedtitle: - url = scrapertools.find_single_match(data, '<div class="embed-wrap".*?<iframe src="([^"]+)\?ref=') - itemlist.append(item.clone(action="play", title="%s", url=url)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - - return itemlist - diff --git a/channels/porn/mporno.json b/channels/porn/mporno.json deleted file mode 100644 index 76b4cf2c..00000000 --- a/channels/porn/mporno.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "mporno", - "name": "mporno", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://mporno.tv/templates/fluidporn/img/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/mporno.py b/channels/porn/mporno.py deleted file mode 100644 index e568594d..00000000 --- a/channels/porn/mporno.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://mporno.tv' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host + "/most-recent/")) - itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/top-rated/")) - itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="lista", url=host + "/most-viewed/")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/longest/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/videos/%s/page1.html" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<h3><a href="([^"]+)">(.*?)</a> <small>(.*?)</small></h3>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = scrapedtitle + " " + cantidad - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<img class="content_image" src="([^"]+).mp4/.*?" alt="([^"]+)".*?this.src="(.*?)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - contentTitle = scrapedtitle - title = scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, server= "directo", contentTitle=contentTitle)) - next_page_url = scrapertools.find_single_match(data,'<a href=\'([^\']+)\' class="next">Next >></a>') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) - - return itemlist - - -def play(item): - logger.info() - itemlist = [] - url = item.url.replace("/thumbs/", "/videos/") + ".mp4" - itemlist.append( Item(channel=item.channel, action="play", title= item.title, server= "directo", url=url)) - return itemlist \ No newline at end of file diff --git a/channels/porn/muchoporno.json b/channels/porn/muchoporno.json deleted file mode 100644 index a4c4ea81..00000000 --- a/channels/porn/muchoporno.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "muchoporno", - "name": "muchoporno", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.muchoporno.xxx/assets/css/logo/images/sprite-muchoporno.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/muchoporno.py b/channels/porn/muchoporno.py deleted file mode 100644 index 73130718..00000000 --- a/channels/porn/muchoporno.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.pornburst.xxx' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/page3.html")) - itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/pornstars/")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/sites/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - if "/sites/" in item.url: - patron = '<div class="muestra-escena muestra-canales">.*?' - patron += 'href="([^"]+)">.*?' - patron += 'data-src="([^"]+)".*?' - patron += '<a title="([^"]+)".*?' - patron += '</span> (\d+) videos</span>' - if "/pornstars/" in item.url: - patron = '<a class="muestra-escena muestra-pornostar" href="([^"]+)">.*?' - patron += 'data-src="([^"]+)".*?' - patron += 'alt="([^"]+)".*?' - patron += '</span> (\d+) videos</span>' - else: - patron = '<a class="muestra-escena muestra-categoria" href="([^"]+)" title="[^"]+">.*?' - patron += 'data-src="([^"]+)".*?' - patron += '</span> ([^"]+) </h2>(.*?)>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: - logger.debug(scrapedurl + ' / ' + scrapedthumbnail + ' / ' + cantidad + ' / ' + scrapedtitle) - scrapedplot = "" - cantidad = " (" + cantidad + ")" - if "</a" in cantidad: - cantidad = "" - scrapedtitle = scrapedtitle + cantidad - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) ) - - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a class="muestra-escena"\s*href="([^"]+)".*?' - patron += 'data-stats-video-name="([^"]+)".*?' - patron += '<img src="([^"]+)".*?' - patron += '<span class="ico-minutos sprite" title="Length"></span>([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - year = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<source src="([^"]+)" type="video/mp4"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - title = scrapedurl - itemlist.append(item.clone(action="play", title=title, url=scrapedurl)) - return itemlist - diff --git a/channels/porn/nuvid.json b/channels/porn/nuvid.json deleted file mode 100644 index aee728e6..00000000 --- a/channels/porn/nuvid.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "nuvid", - "name": "Nuvid", - "language": ["*"], - "active": true, - "adult": true, - "thumbnail": "http://i.imgur.com/rSbuStX.png", - "banner": "nuvid.png", - "categories": [ - "adult" - ] -} \ No newline at end of file diff --git a/channels/porn/nuvid.py b/channels/porn/nuvid.py deleted file mode 100644 index d87be509..00000000 --- a/channels/porn/nuvid.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- - -import base64 -import hashlib -import urlparse - -from core import httptools -from core import scrapertools -from platformcode import logger - -host = "https://www.nuvid.com" - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append( - item.clone(action="lista", title="Nuevos Vídeos", url="https://www.nuvid.com/search/videos/_empty_/")) - itemlist.append( - item.clone(action="lista", title="Mejor Valorados", url="https://www.nuvid.com/search/videos/_empty_/", - extra="rt")) - itemlist.append( - item.clone(action="lista", title="Solo HD", url="https://www.nuvid.com/search/videos/hd", calidad="1")) - itemlist.append(item.clone(action="categorias", title="Categorías", url=host)) - itemlist.append(item.clone(title="Buscar...", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - item.url = "https://www.nuvid.com/search/videos/" + texto.replace(" ", "%20") - item.extra = "buscar" - return lista(item) - - -def lista(item): - logger.info() - itemlist = [] - - if not item.calidad: - item.calidad = "0" - filter = 'ch=178.1.2.3.4.191.7.8.5.9.10.169.11.12.13.14.15.16.17.18.28.190.20.21.22.27.23.24.25.26.189.30.31.32.181' \ - '.35.36.37.180.176.38.33.34.39.40.41.42.177.44.43.45.47.48.46.49.50.51.52.53.54.55.56.57.58.179.59.60.61.' \ - '62.63.64.65.66.69.68.71.67.70.72.73.74.75.182.183.77.76.78.79.80.81.82.84.85.88.86.188.87.91.90.92.93.94' \ - '&hq=%s&rate=&dur=&added=&sort=%s' % (item.calidad, item.extra) - header = {'X-Requested-With': 'XMLHttpRequest'} - if item.extra != "buscar": - header['Cookie'] = 'area=EU; lang=en; search_filter_new=%s' % filter - # Descarga la pagina - data = httptools.downloadpage(item.url, headers=header, cookies=False).data - - # Extrae las entradas - patron = '<div class="box-tumb related_vid.*?href="([^"]+)" title="([^"]+)".*?src="([^"]+)"(.*?)<i class="time">([^<]+)<' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches: - scrapedurl = urlparse.urljoin(host, scrapedurl) - if duration: - scrapedtitle = "%s - %s" % (duration, scrapedtitle) - if item.calidad == "0" and 'class="hd"' in quality: - scrapedtitle += " [COLOR red][HD][/COLOR]" - itemlist.append( - item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, folder=False)) - - # Extrae la marca de siguiente página - next_page = scrapertools.find_single_match(data, '<li class="next1">.*?href="([^"]+)"') - if next_page: - next_page = urlparse.urljoin(host, next_page) - itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) - - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - - # Descarga la pagina - data = httptools.downloadpage("https://www.nuvid.com/categories").data - - # Extrae las entradas (carpetas) - bloques = scrapertools.find_multiple_matches(data, '<h2 class="c-mt-output title2">.*?>([^<]+)</h2>(.*?)</div>') - for cat, b in bloques: - cat = cat.replace("Straight", "Hetero") - itemlist.append(item.clone(action="", title=cat, text_color="gold")) - matches = scrapertools.find_multiple_matches(b, '<li>.*?href="([^"]+)" >(.*?)</span>') - for scrapedurl, scrapedtitle in matches: - scrapedtitle = " " + scrapedtitle.replace("<span>", "") - scrapedurl = urlparse.urljoin(host, scrapedurl) - itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl)) - - return itemlist - - -def play(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url, cookies=False).data - h = scrapertools.find_single_match(data, "params\s*\+=\s*'h=([^']+)'") - t = scrapertools.find_single_match(data, "params\s*\+=\s*'%26t=([^']+)'") - vkey = scrapertools.find_single_match(data, "params\s*\+=\s*'%26vkey='.*?'([^']+)'") - pkey = hashlib.md5(vkey + base64.b64decode("aHlyMTRUaTFBYVB0OHhS")).hexdigest() - - url = 'https://www.nuvid.com/player_config/?h=%s&check_speed=1&t=%s&vkey=%s&pkey=%s&aid=&domain_id=' % ( - h, t, vkey, pkey) - data = httptools.downloadpage(url, cookies=False).data - videourl = scrapertools.find_single_match(data, '<video_file>.*?(http.*?)\]') - if videourl: - itemlist.append(['.mp4 [directo]', videourl]) - videourl = scrapertools.find_single_match(data, '<hq_video_file>.*?(http.*?)\]') - if videourl: - itemlist.append(['.mp4 HD [directo]', videourl]) - - return itemlist diff --git a/channels/porn/pandamovie.json b/channels/porn/pandamovie.json deleted file mode 100644 index ce411a6f..00000000 --- a/channels/porn/pandamovie.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "pandamovie", - "name": "pandamovie", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.pandamovies.com/templates/pandamovies/images/logo.png?v1482157699", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/pandamovie.py b/channels/porn/pandamovie.py deleted file mode 100644 index e7961206..00000000 --- a/channels/porn/pandamovie.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -import urlparse -import re -import base64 - -from platformcode import config, logger -from core import scrapertools -from core import servertools -from core.item import Item -from core import httptools - -host = 'https://pandamovies.pw' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista", url=host + "/movies")) - itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/movies")) - itemlist.append(Item(channel=item.channel, title="Canal", action="categorias", url=host + "/movies")) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - if item.title == "Categorias": - data = scrapertools.find_single_match(data, '<a href="#">Genres</a>(.*?)</ul>') - else: - data = scrapertools.find_single_match(data, '<a href="#">Studios</a>(.*?)</ul>') - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)">([^<]+)</a>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedurl = scrapedurl.replace("https:", "") - scrapedurl = "https:" + scrapedurl - itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot)) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div data-movie-id="\d+".*?' - patron += '<a href="([^"]+)".*?oldtitle="([^"]+)".*?' - patron += '<img data-original="([^"]+)".*?' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle, scrapedthumbnail in matches: - url = urlparse.urljoin(item.url, scrapedurl) - title = scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle=title)) - next_page = scrapertools.find_single_match(data, '<li class=\'active\'>.*?href=\'([^\']+)\'>') - if next_page == "": - next_page = scrapertools.find_single_match(data, '<a.*?href="([^"]+)" >Next »</a>') - if next_page != "": - next_page = urlparse.urljoin(item.url, next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - return itemlist - - -def findvideos(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) - patron = '- on ([^"]+)" href="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedtitle,url in matches: - if 'aHR0' in url: - n = 3 - while n > 0: - url= url.replace("https://vshares.tk/goto/", "").replace("https://waaws.tk/goto/", "").replace("https://openloads.tk/goto/", "") - logger.debug (url) - url = base64.b64decode(url) - n -= 1 - if "mangovideo" in url: #Aparece como directo - data = httptools.downloadpage(url).data - patron = 'video_url: \'function/0/https://mangovideo.pw/get_file/(\d+)/\w+/(.*?)/\?embed=true\'' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedtitle,url in matches: - if scrapedtitle =="1": scrapedtitle= "https://www.mangovideo.pw/contents/videos/" - if scrapedtitle =="7": scrapedtitle= "https://server9.mangovideo.pw/contents/videos/" - if scrapedtitle =="8": scrapedtitle= "https://s10.mangovideo.pw/contents/videos/" - if scrapedtitle =="10": scrapedtitle= "https://server217.mangovideo.pw/contents/videos/" - if scrapedtitle =="11": scrapedtitle= "https://234.mangovideo.pw/contents/videos/" - url = scrapedtitle + url - itemlist.append( Item(channel=item.channel, action="play", title = "%s", url=url )) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - diff --git a/channels/porn/peliculaseroticas.json b/channels/porn/peliculaseroticas.json deleted file mode 100644 index 51e679b4..00000000 --- a/channels/porn/peliculaseroticas.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "peliculaseroticas", - "name": "PeliculasEroticas", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "peliculaseroticas.png", - "banner": "peliculaseroticas.png", - "categories": [ - "adult" - ] -} \ No newline at end of file diff --git a/channels/porn/peliculaseroticas.py b/channels/porn/peliculaseroticas.py deleted file mode 100644 index 300cb786..00000000 --- a/channels/porn/peliculaseroticas.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger -from platformcode import config - - -def mainlist(item): - logger.info() - itemlist = [] - if item.url == "": - item.url = "http://www.peliculaseroticas.net/" - data = httptools.downloadpage(item.url).data - patron = '<div class="post">.*?' - patron += '<a href="([^"]+)">([^<]+)</a>.*?' - patron += '<img src="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle, scrapedthumbnail in matches: - url = urlparse.urljoin(item.url, scrapedurl) - title = scrapedtitle.strip() - thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, - fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie")) - # Extrae la marca de siguiente página - if item.url == "http://www.peliculaseroticas.net/": - next_page_url = "http://www.peliculaseroticas.net/cine-erotico/2.html" - else: - current_page = scrapertools.find_single_match(item.url, "(\d+)") - next_page = int(current_page) + 1 - next_page_url = "http://www.peliculaseroticas.net/cine-erotico/" + str(next_page) + ".html" - itemlist.append( Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url)) - return itemlist - diff --git a/channels/porn/pelisxporno.json b/channels/porn/pelisxporno.json deleted file mode 100644 index e01fb74e..00000000 --- a/channels/porn/pelisxporno.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "pelisxporno", - "name": "Pelisxporno", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://i.imgur.com/ywMHwat.png", - "banner": "pelisxporno.png", - "categories": [ - "adult" - ], - "settings": [ - ] -} \ No newline at end of file diff --git a/channels/porn/pelisxporno.py b/channels/porn/pelisxporno.py deleted file mode 100644 index f8920727..00000000 --- a/channels/porn/pelisxporno.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- coding: utf-8 -*- -import urlparse -import re - -from platformcode import config, logger -from core import httptools -from core import scrapertools -from core import servertools - - -host = 'http://www.pelisxporno.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(item.clone(action="lista", title="Novedades", url= host + "/?order=date")) - itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categorias/")) - itemlist.append(item.clone(action="search", title="Buscar")) - return itemlist - -def search(item, texto): - logger.info("") - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - -def search(item, texto): - logger.info() - item.url = item.url % texto - return lista(item) - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li class="cat-item cat-item-.*?"><a href="(.*?)".*?>(.*?)<' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedtitle in matches: - itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl)) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="video.".*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)".*?\/>.*?duration.*?>(.*?)<' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedtitle, scrapedthumbnail, duration in matches: - if duration: - scrapedtitle = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle - itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - fanart=scrapedthumbnail)) - next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"') - if next_page: - itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) - return itemlist - - -def findvideos(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, '<div class="video_code">(.*?)<h3') - patron = '(?:src|SRC)="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - if not 'mixdrop' in scrapedurl: #el base64 es netu.tv - url = "https://hqq.tv/player/embed_player.php?vid=RODE5Z2Hx3hO&autoplay=none" - else: - url = "https:" + scrapedurl - headers = {'Referer': item.url} - data = httptools.downloadpage(url, headers=headers).data - url = scrapertools.find_single_match(data, 'vsrc = "([^"]+)"') - url= "https:" + url - itemlist.append(item.clone(action="play", title = "%s", url=url )) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - diff --git a/channels/porn/perfectgirls.json b/channels/porn/perfectgirls.json deleted file mode 100644 index 0ef5b75b..00000000 --- a/channels/porn/perfectgirls.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "perfectgirls", - "name": "perfectgirls", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.perfectgirls.net/images/no-sprite/perfect-girl-regular.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/perfectgirls.py b/channels/porn/perfectgirls.py deleted file mode 100644 index 43503f3c..00000000 --- a/channels/porn/perfectgirls.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://www.perfectgirls.net' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host)) - itemlist.append( Item(channel=item.channel, title="Top" , action="peliculas", url=host + "/top/3days/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/%s/" % texto - try: - return peliculas(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li class="additional_list__item"><a href="([^"]+)">([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - url = urlparse.urljoin(item.url,scrapedurl) + "/1" - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def peliculas(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="list__item_link"><a href="([^"]+)" title="([^"]+)">.*?' - patron += 'data-original="([^"]+)".*?' - patron += '<time>(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches: - plot = "" - time = scrapertools.find_single_match(duracion, '([^"]+)</time>') - if not 'HD' in duracion : - title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle - else: - title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle - scrapedthumbnail = "http:" + scrapedthumbnail - url = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=scrapedthumbnail, - fanart=scrapedthumbnail, plot=plot, contentTitle = title)) - next_page = scrapertools.find_single_match(data, '<a class="btn_wrapper__btn" href="([^"]+)">Next</a></li>') - if next_page: - next_page = urlparse.urljoin(item.url, next_page) - itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page )) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<source src="([^"]+)" res="\d+" label="([^"]+)" type="video/mp4" default/>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl)) - return itemlist - diff --git a/channels/porn/playpornx.json b/channels/porn/playpornx.json deleted file mode 100644 index d7582a98..00000000 --- a/channels/porn/playpornx.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "playpornx", - "name": "PlayPornX", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://s3.postimg.cc/3rkfrnypv/playpornx.png", - "banner": "https://s3.postimg.cc/it5qa1gyb/playpornx-banner.png", - "categories": [ - "adult" - ], - "settings": [ - ] -} \ No newline at end of file diff --git a/channels/porn/playpornx.py b/channels/porn/playpornx.py deleted file mode 100644 index c39822b1..00000000 --- a/channels/porn/playpornx.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import servertools -from core import scrapertools -from core.item import Item -from platformcode import logger -import base64 - -host = "https://watchfreexxx.net/" - -def mainlist(item): - itemlist = [] - - itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista", - url = urlparse.urljoin(host, "category/porn-movies/"))) - - itemlist.append(Item(channel=item.channel, title="Escenas", action="lista", - url = urlparse.urljoin(host, "category/xxx-scenes/"))) - - itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=', - thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png', - fanart='https://s30.postimg.cc/pei7txpa9/buscar.png')) - - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = item.url + texto - try: - if texto != '': - item.extra = 'Buscar' - return lista(item) - else: - return [] - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def lista(item): - logger.info() - itemlist = [] - if item.url == '': item.url = host - data = httptools.downloadpage(item.url).data - data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data) - patron = '<article id=.*?<a href="([^"]+)".*?<img data-src="([^"]+)" alt="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - for data_1, data_2, data_3 in matches: - url = data_1 - thumbnail = data_2 - title = data_3 - itemlist.append(Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail)) - #Paginacion - if itemlist != []: - actual_page_url = item.url - next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Next</a>') - if next_page != '': - itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page, - thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png', extra=item.extra)) - return itemlist - - -def findvideos(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) - patron = '- on ([^"]+)" href="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedtitle,url in matches: - if "tk/goto/" in url: - n = 3 - while n > 0: - url= url.replace("https://vshares.tk/goto/", "").replace("https://waaws.tk/goto/", "").replace("https://openloads.tk/goto/", "") - logger.debug (url) - url = base64.b64decode(url) - n -= 1 - if "mangovideo" in url: #Aparece como directo - data = httptools.downloadpage(url).data - patron = 'video_url: \'function/0/https://mangovideo.pw/get_file/(\d+)/\w+/(.*?)/\?embed=true\'' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedtitle,url in matches: - if scrapedtitle =="1": scrapedtitle= "https://www.mangovideo.pw/contents/videos/" - if scrapedtitle =="7": scrapedtitle= "https://server9.mangovideo.pw/contents/videos/" - if scrapedtitle =="8": scrapedtitle= "https://s10.mangovideo.pw/contents/videos/" - if scrapedtitle =="10": scrapedtitle= "https://server217.mangovideo.pw/contents/videos/" - if scrapedtitle =="11": scrapedtitle= "https://234.mangovideo.pw/contents/videos/" - url = scrapedtitle + url - itemlist.append(item.clone(action="play", title = "%s", url=url )) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - - diff --git a/channels/porn/porn300.json b/channels/porn/porn300.json deleted file mode 100644 index 7af7d643..00000000 --- a/channels/porn/porn300.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "porn300", - "name": "porn300", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.porn300.com/android-icon-192x192.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/porn300.py b/channels/porn/porn300.py deleted file mode 100644 index c84840a0..00000000 --- a/channels/porn/porn300.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.porn300.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/en_US/ajax/page/list_videos/?page=1")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/?page=1")) - itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/pornstars/?page=1")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/?page=1")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist -# view-source:https://www.porn300.com/en_US/ajax/page/show_search?q=big+tit&page=1 -# https://www.porn300.com/en_US/ajax/page/show_search?page=2 -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/en_US/ajax/page/show_search?q=%s&?page=1" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a itemprop="url" href="/([^"]+)".*?' - patron += 'data-src="([^"]+)" alt=.*?' - patron += 'itemprop="name">([^<]+)</h3>.*?' - patron += '</svg>([^<]+)<' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: - scrapedplot = "" - cantidad = re.compile("\s+", re.DOTALL).sub(" ", cantidad) - scrapedtitle = scrapedtitle + " (" + cantidad +")" - scrapedurl = scrapedurl.replace("channel/", "producer/") - scrapedurl = "/en_US/ajax/page/show_" + scrapedurl + "?page=1" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />') - if next_page=="": - if "/?page=1" in item.url: - next_page=urlparse.urljoin(item.url,"/?page=2") - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a itemprop="url" href="([^"]+)".*?' - patron += 'data-src="([^"]+)".*?' - patron += 'itemprop="name">([^<]+)<.*?' - patron += '</svg>([^<]+)<' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches: - url = urlparse.urljoin(item.url,scrapedurl) - scrapedtime = scrapedtime.strip() - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle) ) - prev_page = scrapertools.find_single_match(item.url,"(.*?)page=\d+") - num= int(scrapertools.find_single_match(item.url,".*?page=(\d+)")) - num += 1 - num_page = "?page=" + str(num) - if num_page!="": - next_page = urlparse.urljoin(item.url,num_page) - if "show_search" in next_page: - next_page = prev_page + num_page - next_page = next_page.replace("&?", "&") - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<source src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for url in matches: - itemlist.append(item.clone(action="play", title=url, url=url)) - return itemlist - diff --git a/channels/porn/pornboss.json b/channels/porn/pornboss.json deleted file mode 100644 index c9815e6e..00000000 --- a/channels/porn/pornboss.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "pornboss", - "name": "pornboss", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://pornboss.org/wp-content/uploads/2018/10/cropped-bottom-180x180.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/pornboss.py b/channels/porn/pornboss.py deleted file mode 100644 index ebe0f22b..00000000 --- a/channels/porn/pornboss.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import jsontools as json -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://pornboss.org' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/category/movies/")) - itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - data = scrapertools.find_single_match(data,'<div class="uk-panel uk-panel-box widget_nav_menu">(.*?)</ul>') - patron = '<li><a href=(.*?) class>([^<]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<article id=item-\d+.*?' - patron += '<img class=.*?src=(.*?) alt="([^"]+)".*?' - patron += 'Duration:</strong>(.*?) / <strong>.*?' - patron += '>SHOW<.*?href=([^"]+) target=' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedtitle,duration,scrapedurl in matches: - scrapedplot = "" - title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li><a href=([^<]+)><i class=uk-icon-angle-double-right>') - next_page = next_page.replace('"', '') - if next_page!="": - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - -def play(item): - logger.info() - itemlist = [] - if "streamcloud" in item.url: - itemlist.append(item.clone(action="play", url=item.url )) - else: - data = httptools.downloadpage(item.url).data - url=scrapertools.find_single_match(data,'<span class="bottext">Streamcloud.eu</span>.*?href="([^"]+)"') - url= "https://tolink.to" + url - data = httptools.downloadpage(url).data - patron = '<input type="hidden" name="id" value="([^"]+)">.*?' - patron += '<input type="hidden" name="fname" value="([^"]+)">' - matches = re.compile(patron,re.DOTALL).findall(data) - for id, url in matches: - url= "http://streamcloud.eu/" + id - itemlist.append(item.clone(action="play", url=url )) - itemlist = servertools.get_servers_itemlist(itemlist) - return itemlist - - diff --git a/channels/porn/porndish.json b/channels/porn/porndish.json deleted file mode 100755 index c6089759..00000000 --- a/channels/porn/porndish.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "porndish", - "name": "porndish", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.porndish.com/wp-content/uploads/2015/09/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/porndish.py b/channels/porn/porndish.py deleted file mode 100755 index 0a5508aa..00000000 --- a/channels/porn/porndish.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.porndish.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<li id="menu-item-\d+".*?' - patron += '<a href="([^"]+)">([^<]+)<' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - data = scrapertools.find_single_match(data, 'archive-body">(.*?)<div class="g1-row g1-row-layout-page g1-prefooter">') - patron = '<article class=.*?' - patron += 'src="([^"]+)".*?' - patron += 'title="([^"]+)".*?' - patron += '<a href="([^"]+)" rel="bookmark">' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedtitle,scrapedurl in matches: - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - fanart=thumbnail, thumbnail=thumbnail, plot=plot, contentTitle = scrapedtitle)) - next_page = scrapertools.find_single_match(data, '<a class="g1-delta g1-delta-1st next" href="([^"]+)">Next</a>') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - - - diff --git a/channels/porn/porneq.json b/channels/porn/porneq.json deleted file mode 100644 index 4416272a..00000000 --- a/channels/porn/porneq.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "porneq", - "name": "porneq", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://porneq.com/uploads/porneq-logo-home-png554cf1a970e6d.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/porneq.py b/channels/porn/porneq.py deleted file mode 100644 index 044346eb..00000000 --- a/channels/porn/porneq.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -import urlparse -import urllib2 -import urllib -import re -import os -import sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://porneq.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Ultimos", action="lista", url=host + "/videos/browse/")) - itemlist.append(Item(channel=item.channel, title="Mas Vistos", action="lista", url=host + "/videos/most-viewed/")) - itemlist.append(Item(channel=item.channel, title="Mas Votado", action="lista", url=host + "/videos/most-liked/")) - itemlist.append(Item(channel=item.channel, title="Big Tits", action="lista", url=host + "/show/big+tit")) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/show/%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a class="clip-link" data-id="\d+" title="([^"]+)" href="([^"]+)">.*?' - patron += '<img src="([^"]+)".*?' - patron += '<span class="timer">(.*?)</span></div>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedtime in matches: - scrapedplot = "" - scrapedthumbnail = scrapedthumbnail.replace("https:", "http:") - scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle - itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot)) - next_page = scrapertools.find_single_match(data, '<nav id="page_nav"><a href="(.*?)"') - if next_page != "": - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data, '<source src="([^"]+)"') - scrapedurl = scrapedurl.replace("X20", "-") - itemlist.append( - Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist diff --git a/channels/porn/pornhive.json b/channels/porn/pornhive.json deleted file mode 100644 index f884f1f0..00000000 --- a/channels/porn/pornhive.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "pornhive", - "name": "pornhive", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.pornhive.tv/assets/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/pornhive.py b/channels/porn/pornhive.py deleted file mode 100644 index f34ce0f6..00000000 --- a/channels/porn/pornhive.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -import base64 - -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://www.pornhive.tv/en' - -# Algunos link caidos - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search?keyword=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - if item.title == "Categorias" : - data = scrapertools.find_single_match(data,'Categories(.*?)Channels') - else: - data = scrapertools.find_single_match(data,'Channels(.*?)</ul>') - patron = '<li><a href="([^"]+)" title="[^"]+">(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="col-lg-3 col-md-3 col-sm-4 col-xs-6 col-thumb panel-video-\d+">.*?' - patron += '<a href="([^"]+)".*?' - patron += 'data-src="([^"]+)".*?' - patron += 'alt="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - title = scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos" , title=title, url=scrapedurl, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle=title)) - next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" data-ci-pagination-page="\d+" rel="next">Next ›') - if next_page != "" : - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = ';extra_urls\[\d+\]=\'([^\']+)\'' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl in matches: - scrapedurl = base64.b64decode(scrapedurl) - itemlist.append(item.clone(action="play", title="%s", url=scrapedurl)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - - diff --git a/channels/porn/pornhub.json b/channels/porn/pornhub.json deleted file mode 100644 index 4a719116..00000000 --- a/channels/porn/pornhub.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "pornhub", - "name": "PornHub", - "active": true, - "adult": true, - "language": ["*"], - "fanart": "http://i.imgur.com/PwFvoss.jpg", - "thumbnail": "http://s22.postimg.cc/5lzcocfqp/pornhub_logo.jpg", - "banner": "pornhub.png", - "categories": [ - "adult" - ], - "settings": [ - ] -} diff --git a/channels/porn/pornhub.py b/channels/porn/pornhub.py deleted file mode 100644 index c485be51..00000000 --- a/channels/porn/pornhub.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse -from core import httptools -from core import servertools -from core import scrapertools -from core.item import Item -from platformcode import logger - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", fanart=item.fanart, - url="http://es.pornhub.com/video?o=cm")) - itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", fanart=item.fanart, - url="http://es.pornhub.com/categories")) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", fanart=item.fanart, - url="http://es.pornhub.com/video/search?search=%s&o=mr")) - return itemlist - - -def search(item, texto): - logger.info() - - item.url = item.url % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li class="cat_pic" data-category=".*?' - patron += '<a href="([^"]+)".*?' - patron += 'data-thumb_url="(.*?)".*?' - patron += 'alt="([^"]+)".*?' - patron += '<var>(.*?)</var>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedthumbnail, scrapedtitle, cantidad in matches: - if "?" in scrapedurl: - url = urlparse.urljoin(item.url, scrapedurl + "&o=cm") - else: - url = urlparse.urljoin(item.url, scrapedurl + "?o=cm") - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=url, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail)) - itemlist.sort(key=lambda x: x.title) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - videodata = scrapertools.find_single_match(data, 'videos search-video-thumbs">(.*?)<div class="reset"></div>') - patron = '<div class="phimage">.*?' - patron += '<a href="([^"]+)" title="([^"]+).*?' - patron += 'data-mediumthumb="([^"]+)".*?' - patron += '<var class="duration">([^<]+)</var>(.*?)</div>' - matches = re.compile(patron, re.DOTALL).findall(videodata) - for url, scrapedtitle, thumbnail, duration, scrapedhd in matches: - scrapedhd = scrapertools.find_single_match(scrapedhd, '<span class="hd-thumbnail">(.*?)</span>') - if scrapedhd == 'HD': - title = "[COLOR yellow]" +duration+ "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle - else: - title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle - url = urlparse.urljoin(item.url, url) - itemlist.append( - Item(channel=item.channel, action="play", title=title, url=url, fanart=thumbnail, thumbnail=thumbnail)) - if itemlist: - # Paginador - patron = '<li class="page_next"><a href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - if matches: - url = urlparse.urljoin(item.url, matches[0].replace('&', '&')) - itemlist.append( - Item(channel=item.channel, action="lista", title=">> Página siguiente", fanart=item.fanart, - url=url)) - return itemlist - -def play(item): - logger.info(item) - itemlist = servertools.find_video_items(item.clone(url = item.url)) - return itemlist - diff --git a/channels/porn/pornohdmega.json b/channels/porn/pornohdmega.json deleted file mode 100755 index c8cc0622..00000000 --- a/channels/porn/pornohdmega.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "pornohdmega", - "name": "pornohdmega", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.pornohdmega.com/wp-content/uploads/2018/11/dftyu.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/pornohdmega.py b/channels/porn/pornohdmega.py deleted file mode 100755 index 862a37f1..00000000 --- a/channels/porn/pornohdmega.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.pornohdmega.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/?order=recent")) - itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/?order=top-rated")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/?order=most-viewed")) - - itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<li><a href=\'([^\']+)\' title=\'([^\']+) Tag\'>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - if not "tag" in scrapedurl: - scrapedurl = "" - thumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=thumbnail , plot=scrapedplot) ) - return itemlist - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<h2><a href="([^"]+)">([^<]+)</a></h2>.*?' - patron += '<strong>(\d+) Videos</strong>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedtitle = "%s (%s)" % (scrapedtitle,cantidad) - thumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=thumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<figure class="video-preview"><a href="([^"]+)".*?' - patron += '<img src="([^"]+)".*?' - patron += 'title="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - title = scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail, - fanart=thumbnail, plot=plot,)) - next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<iframe src="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for url in matches: - itemlist.append(item.clone(action="play", title= "%s", url=url)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - diff --git a/channels/porn/pornrewind.json b/channels/porn/pornrewind.json deleted file mode 100644 index 8e2ed488..00000000 --- a/channels/porn/pornrewind.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "pornrewind", - "name": "pornrewind", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.pornrewind.com/static/images/logo-light-pink.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/pornrewind.py b/channels/porn/pornrewind.py deleted file mode 100644 index 55d415e8..00000000 --- a/channels/porn/pornrewind.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.pornrewind.com' - -# hacer funcionar conector Kt player - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/videos/?sort_by=post_date")) - itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/videos/?sort_by=rating")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/videos/?sort_by=video_viewed")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/%s/" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a class="thumb-categories" href="([^"]+)" title="([^"]+)">.*?' - patron += '<img class="lazyload" data-src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - scrapedplot = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a class="thumb" href="([^"]+)" title="([^"]+)".*?' - patron += '<img class="lazyload" data-src="([^"]+)".*?' - patron += '<span>(.*?)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = title)) - next_page = scrapertools.find_single_match(data, '<li class="direction"><a href="([^"]+)" data-ajax="pagination">') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page ) ) - return itemlist - diff --git a/channels/porn/porntrex.json b/channels/porn/porntrex.json deleted file mode 100644 index 1976283d..00000000 --- a/channels/porn/porntrex.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "id": "porntrex", - "name": "Porntrex", - "language": ["*"], - "active": true, - "adult": true, - "thumbnail": "http://i.imgur.com/n8SUCE9.png?1", - "categories": [ - "adult" - ], - "settings": [ - { - "id": "menu_info", - "type": "bool", - "label": "Mostrar menú antes de reproducir con imágenes", - "default": true, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/channels/porn/porntrex.py b/channels/porn/porntrex.py deleted file mode 100644 index 70dbca6f..00000000 --- a/channels/porn/porntrex.py +++ /dev/null @@ -1,327 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urllib -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import config, logger - -host = "https://www.porntrex.com" -perpage = 20 - - -def mainlist(item): - logger.info() - itemlist = [] - - config.set_setting("url_error", False, "porntrex") - itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/")) - itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/")) - itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/")) - itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/")) - itemlist.append(item.clone(action="categorias", title="Modelos", - url=host + "/models/?mode=async&function=get_block&block_id=list_models_models" \ - "_list&sort_by=total_videos")) - itemlist.append(item.clone(action="categorias", title="Canal", url=host + "/channels/")) - itemlist.append(item.clone(action="playlists", title="Listas", url=host + "/playlists/")) - itemlist.append(item.clone(action="tags", title="Tags", url=host + "/tags/")) - itemlist.append(item.clone(title="Buscar...", action="search")) - itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - - return itemlist - - -def configuracion(item): - from platformcode import platformtools - ret = platformtools.show_channel_settings() - platformtools.itemlist_refresh() - return ret - - -def search(item, texto): - logger.info() - item.url = "%s/search/%s/" % (host, texto.replace("+", "-")) - item.extra = texto - try: - return lista(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def lista(item): - logger.info() - itemlist = [] - # Descarga la pagina - data = get_data(item.url) - action = "play" - if config.get_setting("menu_info", "porntrex"): - action = "menu_info" - # Quita las entradas, que no son private <div class="video-preview-screen video-item thumb-item private " - patron = '<div class="video-preview-screen video-item thumb-item ".*?' - patron += '<a href="([^"]+)".*?' - patron += 'data-src="([^"]+)".*?' - patron += 'alt="([^"]+)".*?' - patron += '<span class="quality">(.*?)<.*?' - patron += '</i>([^<]+)<' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedthumbnail, scrapedtitle, quality, duration in matches: - if "go.php?" in scrapedurl: - scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0]) - if not scrapedthumbnail.startswith("https"): - scrapedthumbnail = "https:%s" % scrapedthumbnail - else: - scrapedurl = urlparse.urljoin(host, scrapedurl) - if not scrapedthumbnail.startswith("https"): - scrapedthumbnail = "https:%s" % scrapedthumbnail - scrapedtitle = "%s - [COLOR red]%s[/COLOR] %s" % (duration, quality, scrapedtitle) - scrapedthumbnail += "|Referer=https://www.porntrex.com/" - itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - contentThumbnail=scrapedthumbnail, fanart=scrapedthumbnail)) - # Extrae la marca de siguiente página - if item.extra: - next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)') - if next_page: - if "from_videos=" in item.url: - next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url) - else: - next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos" \ - "&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page) - itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) - else: - next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]*)"') - if next_page and not next_page.startswith("#"): - if "go.php?" in next_page: - next_page = urllib.unquote(next_page.split("/go.php?u=")[1].split("&")[0]) - else: - next_page = urlparse.urljoin(host, next_page) - itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) - else: - next_page = scrapertools.find_single_match(data, '<li class="next">.*?from4:(\d+)') - if next_page: - if "from4" in item.url: - next_page = re.sub(r'&from4=(\d+)', '&from4=%s' % next_page, item.url) - else: - next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list_norm" \ - "&sort_by=post_date&from4=%s" % ( - item.url, next_page) - itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = get_data(item.url) - - # Extrae las entradas - if "/channels/" in item.url: - patron = '<div class="video-item ">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?<li>([^<]+)<' - else: - patron = '<a class="item" href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?<div class="videos">([^<]+)<' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches: - if "go.php?" in scrapedurl: - scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0]) - scrapedthumbnail = urllib.unquote(scrapedthumbnail.split("/go.php?u=")[1].split("&")[0]) - scrapedthumbnail += "|Referer=https://www.porntrex.com/" - else: - scrapedurl = urlparse.urljoin(host, scrapedurl) - if not scrapedthumbnail.startswith("https"): - scrapedthumbnail = "https:%s" % scrapedthumbnail - scrapedthumbnail += "|Referer=https://www.porntrex.com/" - scrapedthumbnail = scrapedthumbnail.replace(" " , "%20") - if videos: - scrapedtitle = "%s (%s)" % (scrapedtitle, videos) - itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - fanart=scrapedthumbnail)) - - # Extrae la marca de siguiente página - next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)') - if next_page: - if "from=" in item.url: - next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url) - else: - next_page = "%s&from=%s" % (item.url, next_page) - itemlist.append(item.clone(action="categorias", title=">> Página Siguiente", url=next_page)) - - return itemlist - - -def playlists(item): - logger.info() - itemlist = [] - # Descarga la pagina - data = get_data(item.url) - # Extrae las entradas - patron = '<div class="item.*?' - patron += 'href="([^"]+)" title="([^"]+)".*?' - patron += 'data-original="([^"]+)".*?' - patron += '<div class="totalplaylist">([^<]+)<' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches: - if "go.php?" in scrapedurl: - scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0]) - scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail) - else: - scrapedurl = urlparse.urljoin(host, scrapedurl) - if not scrapedthumbnail.startswith("https"): - scrapedthumbnail = "https:%s" % scrapedthumbnail - scrapedthumbnail += "|Referer=https://www.porntrex.com/" - scrapedthumbnail = scrapedthumbnail.replace(" " , "%20") - if videos: - scrapedtitle = "%s [COLOR red](%s)[/COLOR]" % (scrapedtitle, videos) - itemlist.append(item.clone(action="videos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - fanart=scrapedthumbnail)) - #Extrae la marca de siguiente página - next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]+)"') - if next_page: - if "go.php?" in next_page: - next_page = urllib.unquote(next_page.split("/go.php?u=")[1].split("&")[0]) - else: - next_page = urlparse.urljoin(host, next_page) - itemlist.append(item.clone(action="playlists", title=">> Página Siguiente", url=next_page)) - - return itemlist - - -def videos(item): - logger.info() - if not item.indexp: - item.indexp = 1 - itemlist = [] - # Descarga la pagina - data = get_data(item.url) - action = "play" - if config.get_setting("menu_info", "porntrex"): - action = "menu_info" - # Extrae las entradas - # Quita las entradas, que no son private <div class="video-item private "> - patron = '<div class="video-item ".*?' - patron += 'href="([^"]+)".*?' - patron += 'title="([^"]+)".*?' - patron += 'src="([^"]+)"(.*?)<div class="durations">.*?' - patron += '</i>([^<]+)</div>' - matches = scrapertools.find_multiple_matches(data, patron) - count = 0 - for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches: - count += 1 - if count < item.indexp: - continue - if "go.php?" in scrapedurl: - scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0]) - scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail) - else: - scrapedurl = urlparse.urljoin(host, scrapedurl) - if not scrapedthumbnail.startswith("https"): - scrapedthumbnail = "https:%s" % scrapedthumbnail - scrapedthumbnail += "|Referer=https://www.porntrex.com/" - scrapedthumbnail = scrapedthumbnail.replace(" " , "%20") - if 'k4"' in quality: - quality = "4K" - scrapedtitle = "%s - [COLOR yellow]%s[/COLOR] %s" % (duration, quality, scrapedtitle) - else: - quality = scrapertools.find_single_match(quality, '<span class="quality">(.*?)<.*?') - scrapedtitle = "%s - [COLOR red]%s[/COLOR] %s" % (duration, quality, scrapedtitle) - if len(itemlist) >= perpage: - break; - itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, contentThumbnail=scrapedthumbnail, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail)) - #Extrae la marca de siguiente página - if item.channel and len(itemlist) >= perpage: - itemlist.append( item.clone(title = "Página siguiente >>>", indexp = count + 1) ) - return itemlist - -def play(item): - logger.info() - itemlist = [] - data = get_data(item.url) - patron = '(?:video_url|video_alt_url[0-9]*):\s*\'([^\']+)\'.*?' - patron += '(?:video_url_text|video_alt_url[0-9]*_text):\s*\'([^\']+)\'' - matches = scrapertools.find_multiple_matches(data, patron) - scrapertools.printMatches(matches) - for url, quality in matches: - quality = quality.replace(" HD" , "").replace(" 4k", "") - itemlist.append(['.mp4 %s [directo]' % quality, url]) - if item.extra == "play_menu": - return itemlist, data - return itemlist - - -def menu_info(item): - logger.info() - itemlist = [] - video_urls, data = play(item.clone(extra="play_menu")) - itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls)) - matches = scrapertools.find_multiple_matches(data, '<img class="thumb lazy-load" src="([^"]+)"') - for i, img in enumerate(matches): - if i == 0: - continue - img = "https:" + img + "|Referer=https://www.porntrex.com/" - title = "Imagen %s" % (str(i)) - itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img)) - return itemlist - - -def tags(item): - logger.info() - itemlist = [] - data = get_data(item.url) - - if item.title == "Tags": - letras = [] - matches = scrapertools.find_multiple_matches(data, '<strong class="title".*?>\s*(.*?)</strong>') - for title in matches: - title = title.strip() - if title not in letras: - letras.append(title) - itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=title, extra=title)) - else: - if not item.length: - item.length = 0 - - bloque = scrapertools.find_single_match(data, - '>%s</strong>(.*?)(?:(?!%s)(?!#)[A-Z#]{1}</strong>|<div class="footer-margin">)' % ( - item.extra, item.extra)) - matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)">\s*(.*?)</a>') - for url, title in matches[item.length:item.length + 100]: - if "go.php?" in url: - url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0]) - itemlist.append(Item(channel=item.channel, action="lista", url=url, title=title)) - - if len(itemlist) >= 100: - itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=">> Página siguiente", - length=item.length + 100, extra=item.extra)) - - return itemlist - - -def get_data(url_orig): - try: - if config.get_setting("url_error", "porntrex"): - raise Exception - response = httptools.downloadpage(url_orig) - if not response.data or "urlopen error [Errno 1]" in str(response.code): - raise Exception - except: - config.set_setting("url_error", True, "porntrex") - import random - server_random = ['nl', 'de', 'us'] - server = server_random[random.randint(0, 2)] - url = "https://%s.hideproxy.me/includes/process.php?action=update" % server - post = "u=%s&proxy_formdata_server=%s&allowCookies=1&encodeURL=0&encodePage=0&stripObjects=0&stripJS=0&go=" \ - % (urllib.quote(url_orig), server) - while True: - response = httptools.downloadpage(url, post, follow_redirects=False) - if response.headers.get("location"): - url = response.headers["location"] - post = "" - else: - break - return response.data diff --git a/channels/porn/porntv.json b/channels/porn/porntv.json deleted file mode 100755 index d65c8cb8..00000000 --- a/channels/porn/porntv.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "porntv", - "name": "porntv", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.porntv.com/images/dart/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/porntv.py b/channels/porn/porntv.py deleted file mode 100755 index c2a9a31e..00000000 --- a/channels/porn/porntv.py +++ /dev/null @@ -1,104 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.porntv.com' - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/videos/straight/all-recent.html")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/videos/straight/all-view.html")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/videos/straight/all-rate.html")) - itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/videos/straight/all-popular.html")) - itemlist.append( Item(channel=item.channel, title="Mas largos" , action="lista", url=host + "/videos/straight/all-length.html")) - - - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "") - item.url = host + "/videos/straight/%s-recent.html" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - data = scrapertools.find_single_match(data, '<h1>Popular Categories</h1>(.*?)<h1>Community</h1>') - patron = '<h2><a href="([^"]+)">([^<]+)</a>.*?' - patron += 'src="([^"]+)".*?' - patron += '<span class="contentquantity">([^<]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: - scrapedplot = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - title = scrapedtitle + " " + cantidad - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<div class="item" style="width: 320px">.*?' - patron += '<a href="([^"]+)".*?' - patron += '<img src="([^"]+)".*?' - patron += '>(.*?)<div class="trailer".*?' - patron += 'title="([^"]+)".*?' - patron += 'clock"></use></svg>([^<]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,quality,scrapedtitle,scrapedtime in matches: - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - if "flag-hd" in quality: - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - fanart=thumbnail, thumbnail=thumbnail, plot=plot, contentTitle = scrapedtitle)) - - next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="next"') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - data = scrapertools.find_single_match(data, 'sources: \[(.*?)\]') - patron = 'file: "([^"]+)",.*?label: "([^"]+)",' - matches = re.compile(patron,re.DOTALL).findall(data) - for url,quality in matches: - itemlist.append(["%s %s [directo]" % (quality, url), url]) - return itemlist - - diff --git a/channels/porn/qwertty.json b/channels/porn/qwertty.json deleted file mode 100644 index 0d618542..00000000 --- a/channels/porn/qwertty.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "qwertty", - "name": "qwertty", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://qwertty.net/wp-content/uploads/2019/07/favicon.ico", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/qwertty.py b/channels/porn/qwertty.py deleted file mode 100644 index 7daedcbe..00000000 --- a/channels/porn/qwertty.py +++ /dev/null @@ -1,116 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools -from channels import pornhub, xvideos,youporn - -host = 'http://qwertty.net' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Recientes" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/?filter=most-viewed")) - itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/?filter=popular")) - itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/?filter=random")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li><a href="([^<]+)">(.*?)</a></li>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedurl = host + scrapedurl - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<article id="post-\d+".*?' - patron += '<a href="([^"]+)" title="([^"]+)">.*?' - patron += '<div class="post-thumbnail(.*?)<span class="views">.*?' - patron += '<span class="duration"><i class="fa fa-clock-o"></i>([^<]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches: - scrapedplot = "" - thumbnail = scrapertools.find_single_match(scrapedthumbnail, 'poster="([^"]+)"') - if thumbnail == "": - thumbnail = scrapertools.find_single_match(scrapedthumbnail, "data-thumbs='(.*?jpg)") - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - fanart=thumbnail, thumbnail=thumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Next</a>') - if next_page=="": - next_page = scrapertools.find_single_match(data,'<li><a class="current">.*?<li><a href="([^"]+)" class="inactive">') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - url1 = scrapertools.find_single_match(data,'<meta itemprop="embedURL" content="([^"]+)"') - if "spankwire" in url1: - data = httptools.downloadpage(item.url).data - data = scrapertools.get_match(data,'Copy Embed Code(.*?)For Desktop') - patron = '<div class="shareDownload_container__item__dropdown">.*?<a href="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - url = scrapedurl - if url=="#": - url = scrapertools.find_single_match(data,'playerData.cdnPath480 = \'([^\']+)\'') - itemlist.append(item.clone(action="play", title=url, contentTitle = url, url=url)) - elif "xvideos1" in url1: - item1 = item.clone(url=url1) - itemlist = xvideos.play(item1) - return itemlist - elif "pornhub" in url1 : - url = url1 - elif "txx" in url1:# Falta conector - url = "" - elif "youporn" in url1: - item1 = item.clone(url=url1) - itemlist = youporn.play(item1) - return itemlist - else: - data = httptools.downloadpage(url1).data - url = scrapertools.find_single_match(data,'"quality":"\d+","videoUrl":"([^"]+)"') - url = url.replace("\/", "/") - - itemlist.append(item.clone(action="play", title= "%s " + url1, contentTitle = item.title, url=url)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - diff --git a/channels/porn/redtube.json b/channels/porn/redtube.json deleted file mode 100644 index 4d1f69f1..00000000 --- a/channels/porn/redtube.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "redtube", - "name": "redtube", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://thumbs-cdn.redtube.com/www-static/cdn_files/redtube/images/pc/logo/redtube_logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} diff --git a/channels/porn/redtube.py b/channels/porn/redtube.py deleted file mode 100644 index 91c511a0..00000000 --- a/channels/porn/redtube.py +++ /dev/null @@ -1,119 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ - -import re -import urlparse -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger - -host = 'https://es.redtube.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest")) - itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/mostviewed")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top")) - itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstar")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?search=%s" % texto - try: - return peliculas(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a class="pornstar_link js_mpop js-pop" href="([^"]+)".*?"([^"]+)"\s+title="([^"]+)".*?<div class="ps_info_count">\s+([^"]+)\s+Videos' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " [COLOR yellow]" + cantidad + "[/COLOR] " - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) - return itemlist - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="category_item_wrapper">.*?' - patron += '<a href="([^"]+)".*?' - patron += 'data-src="([^"]+)".*?' - patron += 'alt="([^"]+)".*?' - patron += '<span class="category_count">([^"]+) Videos' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: - scrapedplot = "" - cantidad = cantidad.strip() - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def peliculas(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<img id="img_.*?data-path="([^"]+)".*?' - patron += '<span class="duration">(.*?)</a>.*?' - patron += '<a title="([^"]+)" href="([^"]+)">' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,duration,scrapedtitle,scrapedurl in matches: - url = urlparse.urljoin(item.url,scrapedurl) - scrapedhd = scrapertools.find_single_match(duration, '<span class="hd-video-text">(.*?)</span>') - if scrapedhd == 'HD': - duration = scrapertools.find_single_match(duration, 'HD</span>(.*?)</span>') - title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" + scrapedhd + "[/COLOR] " + scrapedtitle - else: - duration = duration.replace("<span class=\"vr-video\">VR</span>", "") - title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle - title = title.replace(" </span>", "").replace(" ", "") - scrapedthumbnail = scrapedthumbnail.replace("{index}.", "1.") - plot = "" - if not "/premium/" in url: - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=plot, contentTitle = title) ) - next_page_url = scrapertools.find_single_match(data,'<a id="wp_navNext" class="js_pop_page" href="([^"]+)">') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '"defaultQuality":true,"format":"",.*?"videoUrl"\:"([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl in matches: - url = scrapedurl.replace("\/", "/") - itemlist.append(item.clone(action="play", title=url, url=url)) - return itemlist - diff --git a/channels/porn/serviporno.json b/channels/porn/serviporno.json deleted file mode 100644 index 5b72fb31..00000000 --- a/channels/porn/serviporno.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "serviporno", - "name": "Serviporno", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "serviporno.png", - "banner": "serviporno.png", - "categories": [ - "adult" - ], - "settings": [ - ] -} diff --git a/channels/porn/serviporno.py b/channels/porn/serviporno.py deleted file mode 100644 index 67096e09..00000000 --- a/channels/porn/serviporno.py +++ /dev/null @@ -1,149 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger - -host = "https://www.serviporno.com" - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", - url=host + "/ajax/homepage/?page=1", last= host)) - itemlist.append(Item(channel=item.channel, action="videos", title="Más vistos", - url=host + "/ajax/most_viewed/?page=1", last= host + "/mas-vistos/")) - itemlist.append(Item(channel=item.channel, action="videos", title="Más votados", - url=host + "/ajax/best_rated/?page=1", last= host + "/mas-votados/")) - itemlist.append(Item(channel=item.channel, action="categorias", title="Canal", - url=host + "/ajax/list_producers/?page=1", last= host + "/sitios/")) - itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", url= host + "/categorias/")) - itemlist.append(Item(channel=item.channel, action="chicas", title="Chicas", - url=host + "/ajax/list_pornstars/?page=1", last= host + "/pornstars/")) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", last="")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + '/ajax/new_search/?q=%s&page=1' % texto - try: - return videos(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def get_last_page(url): - logger.info() - data = httptools.downloadpage(url).data - last_page= scrapertools.find_single_match(data,'data-ajax-last-page="(\d+)"') - if last_page: - last_page= int(last_page) - return last_page - - -def videos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '(?s)<div class="wrap-box-escena">.*?' - patron += '<div class="box-escena">.*?' - patron += '<a\s*href="([^"]+)".*?' - patron += 'data-stats-video-name="([^"]+)".*?' - patron += 'data-src="([^"]+)".*?' - patron += '<div class="duracion">([^"]+) min</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for url, title, thumbnail,duration in matches: - title = "[COLOR yellow]" + duration + "[/COLOR] " + title - url = urlparse.urljoin(item.url, url) - itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail, fanart=thumbnail)) - # Paginador "Página Siguiente >>" - current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)")) - if not item.last_page: - last_page = get_last_page(item.last) - else: - last_page = int(item.last_page) - if current_page < last_page: - next_page = "?page=" + str(current_page + 1) - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(Item(channel=item.channel, action="videos", title="Página Siguiente >>", text_color="blue", - url=next_page, thumbnail="", last_page=last_page)) - return itemlist - - -def chicas(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="box-chica">.*?' - patron += '<a href="([^"]+)" title="">.*?' - patron += '<img class="img" src=\'([^"]+)\' width="175" height="150" border=\'0\' alt="[^"]+" />.*?' - patron += '<h4><a href="[^"]+" title="">([^"]+)</a></h4>.*?' - patron += '<a class="total-videos" href="[^"]+" title="">([^<]+)</a>' - matches = re.compile(patron, re.DOTALL).findall(data) - for url, thumbnail, title, videos in matches: - last = urlparse.urljoin(item.url, url) - url= last.replace("/pornstar", "/ajax/show_pornstar") + "?page=1" - title = title + " (" + videos + ")" - itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, last=last, thumbnail=thumbnail, fanart=thumbnail)) - # Paginador "Página Siguiente >>" - current_page = int(scrapertools.find_single_match(item.url, "/?page=(\d+)")) - if not item.last_page: - last_page = get_last_page(item.last) - else: - last_page = int(item.last_page) - if current_page < last_page: - next_page = "?page=" + str(current_page + 1) - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(Item(channel=item.channel, action="chicas", title="Página Siguiente >>", text_color="blue", - url=next_page, thumbnail="", last_page=last_page)) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="wrap-box-escena.*?' - patron += '<img src="([^"]+)".*?' - patron += '<h4.*?<a href="([^"]+)">([^<]+)<' - matches = re.compile(patron, re.DOTALL).findall(data) - for thumbnail, url, title in matches: - last = urlparse.urljoin(item.url, url) - url= last.replace("/videos-porno", "/ajax/show_category").replace("/sitio","/ajax/show_producer") + "?page=1" - itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, last=last, thumbnail=thumbnail, plot="")) - # Paginador "Página Siguiente >>" - current_page = scrapertools.find_single_match(item.url, "/?page=(\d+)") - if current_page: - current_page = int(current_page) - if not item.last_page: - last_page = get_last_page(item.last) - else: - last_page = int(item.last_page) - if current_page < last_page: - next_page = "?page=" + str(current_page + 1) - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(Item(channel=item.channel, action="categorias", title="Página Siguiente >>", text_color="blue", - url=next_page, thumbnail="", last_page=last_page)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - url = scrapertools.find_single_match(data, "sendCdnInfo.'([^']+)") - itemlist.append( - Item(channel=item.channel, action="play", server="directo", title=item.title, url=url, thumbnail=item.thumbnail, - plot=item.plot, folder=False)) - return itemlist - diff --git a/channels/porn/sexgalaxy.json b/channels/porn/sexgalaxy.json deleted file mode 100644 index 36fadfbd..00000000 --- a/channels/porn/sexgalaxy.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "sexgalaxy", - "name": "sexgalaxy", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://sexgalaxy.net/wp-content/themes/redwaves-lite/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/sexgalaxy.py b/channels/porn/sexgalaxy.py deleted file mode 100644 index 506bd45a..00000000 --- a/channels/porn/sexgalaxy.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger - -host = 'http://sexgalaxy.net' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista", url=host + "/full-movies/")) - itemlist.append(Item(channel=item.channel, title="Videos", action="lista", url=host + "/new-releases/")) - itemlist.append(Item(channel=item.channel, title="Canales", action="canales", url=host)) - itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host)) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def canales(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(host).data - data = scrapertools.find_single_match(data, '>TopSites</a>(.*?)</ul>') - patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = str(scrapedtitle) - thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) - itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot)) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, '>Popular Categories<(.*?)</p>') - patron = '<a href="(.*?)">(.*?)</a>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = str(scrapedtitle) - thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) - itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot)) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="post-img small-post-img">.*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)"' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle, scrapedthumbnail in matches: - scrapedplot = "" - calidad = scrapertools.find_single_match(scrapedtitle, '\(.*?/(\w+)\)') - if calidad: - scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle - itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot)) - next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"') - if next_page != "": - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - return itemlist - - diff --git a/channels/porn/sexkino.json b/channels/porn/sexkino.json deleted file mode 100644 index 12ffb7a7..00000000 --- a/channels/porn/sexkino.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "sexkino", - "name": "sexkino", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://sexkino.to/wp-content/uploads/2016/12/sexkino.to_.jpg", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/sexkino.py b/channels/porn/sexkino.py deleted file mode 100644 index c530c2b2..00000000 --- a/channels/porn/sexkino.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger - -host = 'http://sexkino.to' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="New" , action="lista", url= host + "/movies/")) - itemlist.append( Item(channel=item.channel, title="Año" , action="anual", url= host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host)) - - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info("pelisalacarta.gmobi mainlist") - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li class="cat-item cat-item-.*?<a href="(.*?)" >(.*?)</a> <i>(.*?)</i>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = scrapedtitle + " ("+cantidad+")" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - -def anual(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li><a href="([^<]+)">([^<]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="poster">.*?' - patron += '<img src="([^"]+)" alt="([^"]+)">.*?' - patron += '<span class="quality">([^"]+)</span>.*?' - patron += '<a href="([^"]+)">' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedthumbnail,scrapedtitle,calidad,scrapedurl in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + calidad + ")" - itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'resppages.*?<a href="([^"]+)" ><span class="icon-chevron-right">') - if next_page != "": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page) ) - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - - # <th>Watch online</th><th>Quality</th><th>Language</th><th>Added</th></tr></thead> - # <tbody> - # <tr id='link-3848'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=vidzella.me'> <a href='http://sexkino.to/links/69321-5/' target='_blank'>Watch online</a></td> - # <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr> - # <tr id='link-3847'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=flashx.tv'> <a href='http://sexkino.to/links/69321-4/' target='_blank'>Watch online</a></td> - # <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr> - # <tr id='link-3844'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=openload.co'> <a href='http://sexkino.to/links/69321-3/' target='_blank'>Watch online</a></td> - # <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr> - # <tr id='link-3843'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=vidoza.net'> <a href='http://sexkino.to/links/69321-2/' target='_blank'>Watch online</a></td> - # <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr> - # <tr id='link-3842'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=rapidvideo.ws'> <a href='http://sexkino.to/links/69321/' target='_blank'>Watch online</a></td> - # <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr> - # </tbody></table></div></div></div></div> - - - - patron = '<tr id=(.*?)</tr>' - matches = re.compile(patron,re.DOTALL).findall(data) - for match in matches: - url = scrapertools.find_single_match(match,'href="([^"]+)" target') - title = scrapertools.find_single_match(match,'<td><img src=.*?> (.*?)</td>') - itemlist.append(item.clone(action="play", title=title, url=url)) - - # <a id="link" href="https://vidzella.me/play#GS7D" class="btn" style="background-color:#1e73be">Continue</a> - - patron = '<iframe class="metaframe rptss" src="([^"]+)".*?<li><a class="options" href="#option-\d+">\s+(.*?)\s+<' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - url = scrapedurl - title = scrapedtitle - itemlist.append(item.clone(action="play", title=title, url=url)) - return itemlist - - -def play(item): - logger.info() - data = httptools.downloadpage(item.url).data - itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: - videoitem.title = item.title - videoitem.thumbnail = item.thumbnail - videoitem.channel = item.channel - return itemlist - diff --git a/channels/porn/sexofilm.json b/channels/porn/sexofilm.json deleted file mode 100644 index fc013872..00000000 --- a/channels/porn/sexofilm.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "sexofilm", - "name": "sexofilm", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://i0.wp.com/sexofilm.com/xbox/wp-content/uploads/2016/06/SexoFilm-Logo-230x54-LOGO-MOBILE.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/sexofilm.py b/channels/porn/sexofilm.py deleted file mode 100644 index d71a7d3e..00000000 --- a/channels/porn/sexofilm.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://sexofilm.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/xtreme-adult-wing/adult-dvds/")) - itemlist.append( Item(channel=item.channel, title="Parody" , action="lista", url=host + "/xtreme-adult-wing/porn-parodies/")) - itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/xtreme-adult-wing/porn-clips-movie-scene/")) - itemlist.append( Item(channel=item.channel, title="SexMUSIC" , action="lista", url=host + "/topics/sexo-music-videos/")) - itemlist.append( Item(channel=item.channel, title="Xshows" , action="lista", url=host + "/xshows/")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) - # itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url =host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - if item.title == "Canal" : - data = scrapertools.find_single_match(data,'>Best Porn Studios</a>(.*?)</ul>') - else: - data = scrapertools.find_single_match(data,'<div class="nav-wrap">(.*?)<ul class="sub-menu">') - itemlist.append( Item(channel=item.channel, action="lista", title="Big tit", url="https://sexofilm.com/?s=big+tits")) - patron = '<a href="([^<]+)">([^<]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl) ) - return itemlist - - -def anual(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li><a href="([^<]+)">([^<]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<article id="post-\d+".*?' - patron += '<a href="([^"]+)".*?' - patron += 'data-src="([^"]+)".*?' - patron += '<h2 class="post-title.*?title="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - plot = "" - title = scrapedtitle.replace(" Porn DVD", "").replace("Permalink to ", "").replace(" Porn Movie", "") - itemlist.append(item.clone(action="play", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, - fanart=scrapedthumbnail, plot=plot) ) - next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - url = scrapertools.find_single_match(data,'<div class="entry-inner">.*?<source src="([^"]+)"') - if not url: - url = scrapertools.find_single_match(data,'<div class="entry-inner">.*?<source src=\'([^\']+)\'') - itemlist = servertools.find_video_items(item.clone(url = item.url)) - if url: - itemlist.append(item.clone(action="play", title=url, url=url)) - return itemlist - - diff --git a/channels/porn/shameless.json b/channels/porn/shameless.json deleted file mode 100755 index f8016a10..00000000 --- a/channels/porn/shameless.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "shameless", - "name": "Shameless", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.shameless.com/favicon/apple-touch-icon.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} diff --git a/channels/porn/shameless.py b/channels/porn/shameless.py deleted file mode 100755 index 2d7ca4ab..00000000 --- a/channels/porn/shameless.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.shameless.com' - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/videos/1/")) - itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/videos/popular/week/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/videos/rated/week/")) - - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<a href="(https://www.shameless.com/categories/[^"]+)".*?' - patron += '<span itemprop="name">(.*?)</span> <sup>(.*?)</sup>.*?' - patron += 'src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,cantidad,scrapedthumbnail in matches: - scrapedplot = "" - title = scrapedtitle + " " + cantidad - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<div class="icnt.*?' - patron += '<a href="([^"]+)".*?' - patron += 'data-src="([^"]+)" alt="([^"]+)".*?' - patron += '<div class="bg"></div>([^<]+)</time>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches: - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail + "|Referer=https://www.shameless.com/" - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - fanart=thumbnail, thumbnail=thumbnail, plot=plot, contentTitle = scrapedtitle)) - next_page = scrapertools.find_single_match(data, 'class="active">.*?<a href="([^"]+)"') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '(?:video_url|video_alt_url[0-9]*):\s*\'([^\']+)\'.*?' - patron += '(?:video_url_text|video_alt_url[0-9]*_text):\s*\'([^\']+)\'' - matches = scrapertools.find_multiple_matches(data, patron) - for url, quality in matches: - headers = {'Referer': item.url} - url = httptools.downloadpage(url, headers=headers , follow_redirects=False, only_headers=True).headers.get("location", "") - itemlist.append(["%s %s [directo]" % (quality, url), url]) - return itemlist - - diff --git a/channels/porn/siska.json b/channels/porn/siska.json deleted file mode 100644 index e4ebb2c4..00000000 --- a/channels/porn/siska.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "siska", - "name": "siska", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.siska.tv/images/siska.png?50", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/siska.py b/channels/porn/siska.py deleted file mode 100644 index 2ef0d1a2..00000000 --- a/channels/porn/siska.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://www.siska.tv/' - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "newVideo.php?language=en")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "MostViewed.php?views=month&language=en")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "Channel.php?language=en")) - - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "index.php?category=1&language=en")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "search.php?q=%s&language=en&search=Search" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - data = scrapertools.find_single_match(data,'<div id="content">(.*?)<div class="maincat">') - patron = '<a href="(.*?)".*?' - patron += '<img src="(.*?)".*?alt="(.*?)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle.replace("Watch Channel ", "") - url = urlparse.urljoin(item.url,scrapedurl) - thumbnail = urlparse.urljoin(item.url,scrapedthumbnail) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url, - thumbnail=thumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - if "catID=" in item.url: - patron = '<li><h3><a href="([^"]+)">.*?' - patron += '<img src="([^"]+)" class="imgt" alt="([^"]+)".*?' - patron += '<div class="time">(.*?)</div>' - else: - patron = '<li><h3><a href=\'([^\']+)\'>.*?' - patron += '<img src=\'([^\']+)\' class=\'imgt\' alt=\'(.*?)\'.*?' - patron += '<div class=\'time\'>(.*?)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches: - scrapedtime = scrapedtime.replace("Duration: ", "").replace(" : ", ":") - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = urlparse.urljoin(item.url,scrapedthumbnail) - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, - contentTitle = scrapedtitle)) - next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span>Next') - if next_page == "": - next_page = scrapertools.find_single_match(data, '<a href=\'([^\']+)\' title=\'Next Page\'>') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - diff --git a/channels/porn/sleazemovies.json b/channels/porn/sleazemovies.json deleted file mode 100644 index 1be75fce..00000000 --- a/channels/porn/sleazemovies.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "sleazemovies", - "name": "SleazeMovies", - "active": true, - "adult": true, - "language": [], - "thumbnail": "https://i.imgur.com/x0tzGxQ.jpg", - "banner": "https://i.imgur.com/d8LsUNf.png", - "fanart": "https://i.imgur.com/NRdQvFW.jpg", - "categories": [ - "movie", - "vos" - ] -} diff --git a/channels/porn/sleazemovies.py b/channels/porn/sleazemovies.py deleted file mode 100644 index 10b81e6f..00000000 --- a/channels/porn/sleazemovies.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- Channel SleazeMovies -*- -# -*- Created for Alfa-addon -*- -# -*- By Sculkurt -*- - -import re -from channelselector import get_thumb -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config, logger - -host = 'http://www.eroti.ga/' - - -def mainlist(item): - logger.info() - - itemlist = list() - itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True))) - itemlist.append(item.clone(title="Generos", action="genero", url=host, thumbnail=get_thumb('genres', auto=True))) - itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True))) - - return itemlist - -def genero(item): - logger.info() - itemlist = list() - data = httptools.downloadpage(host).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '<li class="cat-item.*?<a href="([^"]+)".*?>([^<]+)</a>' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedtitle in matches: - - itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl)) - return itemlist - - -def list_all(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc... - - patron = '<div class="featured-thumb"><a href="([^"]+)"><img.*?src="([^?]+).*?data-image-title="([^\(]+).*?\(([^\)]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, img, scrapedtitle, year in matches: - itemlist.append(Item(channel = item.channel, - title = scrapedtitle, - url = scrapedurl, - action = "findvideos", - thumbnail = img, - contentTitle = scrapedtitle, - contentType = "movie", - infoLabels = {'year': year})) - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) - - # Extrae la marca de siguiente página - next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">Next</a></div>') - if next_page != "": - itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True)) - return itemlist - - - -def search(item, texto): - logger.info() - if texto != "": - texto = texto.replace(" ", "+") - item.url = host + "?s=" + texto - item.extra = "busqueda" - try: - return list_all(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def findvideos(item): - logger.info() - - itemlist = [] - - data = httptools.downloadpage(item.url).data - logger.debug('codigo = ' + data) - - itemlist.extend(servertools.find_video_items(data=data)) - - for video in itemlist: - - video.channel = item.channel - video.contentTitle = item.contentTitle - - if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': - itemlist.append(Item(channel = item.channel, - title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url = item.url, - action = "add_pelicula_to_library", - extra = "findvideos", - contentTitle = item.contentTitle, - thumbnail = item.thumbnail - )) - - return itemlist diff --git a/channels/porn/spankbang.json b/channels/porn/spankbang.json deleted file mode 100644 index 40cdeee4..00000000 --- a/channels/porn/spankbang.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "spankbang", - "name": "spankbang", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://static.spankbang.com/static_desktop/Images/logo_desktop@2xv2.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - - diff --git a/channels/porn/spankbang.py b/channels/porn/spankbang.py deleted file mode 100644 index 538eabe8..00000000 --- a/channels/porn/spankbang.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://es.spankbang.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos", action="lista", url= host + "/new_videos/")) - itemlist.append( Item(channel=item.channel, title="Mas valorados", action="lista", url=host + "/trending_videos/")) - itemlist.append( Item(channel=item.channel, title="Mas vistos", action="lista", url= host + "/most_popular/")) - itemlist.append( Item(channel=item.channel, title="Mas largos", action="lista", url= host + "/longest_videos/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/s/%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a href="([^"]+)/?order=trending"><img src="([^"]+)"><span>([^"]+)</span></a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl , - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="video-item" data-id="\d+">.*?' - patron += '<a href="([^"]+)" class="thumb ">.*?' - patron += 'data-src="([^"]+)" alt="([^"]+)".*?' - patron += '</span>(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches: - url = urlparse.urljoin(item.url,scrapedurl) - scrapedhd = scrapertools.find_single_match(scrapedtime, '<span class="i-hd">(.*?)</span>') - duration = scrapertools.find_single_match(scrapedtime, '<i class="fa fa-clock-o"></i>(.*?)</span>') - if scrapedhd != '': - title = "[COLOR yellow]" + duration + " min[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle - else: - title = "[COLOR yellow]" + duration + " min[/COLOR] " + scrapedtitle - thumbnail = "http:" + scrapedthumbnail - plot = "" - year = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle=title) ) - next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)">') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>" , text_color="blue", - url=next_page ) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data, 'var stream_url_1080p = \'([^\']+)\';') - if scrapedurl == "": - scrapedurl = scrapertools.find_single_match(data, 'var stream_url_720p = \'([^\']+)\';') - if scrapedurl == "": - scrapedurl = scrapertools.find_single_match(data, 'var stream_url_480p = \'([^\']+)\';') - scrapedurl = scrapedurl.replace("amp;", "") - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, thumbnail=item.thumbnail, - plot=item.plot, show=item.title, server="directo")) - return itemlist - diff --git a/channels/porn/spankwire.json b/channels/porn/spankwire.json deleted file mode 100644 index bb80d7a9..00000000 --- a/channels/porn/spankwire.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "spankwire", - "name": "spankwire", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://cdn1-static-spankwire.spankcdn.net/apple-touch-icon-precomposed.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/spankwire.py b/channels/porn/spankwire.py deleted file mode 100644 index bd0856c8..00000000 --- a/channels/porn/spankwire.py +++ /dev/null @@ -1,133 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re - -from core import jsontools as json -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.spankwire.com' - -url_api = host + "/api/video/list.json?segment=Straight&limit=33&sortby=" - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=url_api + "recent&page=1")) - itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=url_api + "views&period=Month&page=1")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=url_api + "rating&period=Month&page=1")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=url_api + "duration&period=Month&page=1")) - itemlist.append( Item(channel=item.channel, title="Pornstar" , action="catalogo", url=host + "/api/pornstars?limit=48&sort=popular&page=1")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/api/categories/list.json?segmentId=0&limit=100&sort=abc&page=1")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/api/video/search.json?segment=Straight&limit=33&query=%s&page=1" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - JSONData = json.load(data) - for Video in JSONData["items"]: - title = Video["name"] - id = Video["id"] - cantidad = Video["videosNumber"] - thumbnail = Video["image"] - title = "%s (%s)" % (title,cantidad) - thumbnail = thumbnail.replace("\/", "/").replace(".webp", ".jpg") - url = url_api + "recent&category=%s&page=1" % id - plot = "" - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=url, - fanart=thumbnail, thumbnail=thumbnail, plot=plot) ) - Actual = int(scrapertools.find_single_match(item.url, '&page=([0-9]+)')) - if JSONData["pages"] - 1 > Actual: - scrapedurl = item.url.replace("&page=" + str(Actual), "&page=" + str(Actual + 1)) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=scrapedurl)) - return itemlist - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - JSONData = json.load(data) - for Video in JSONData["items"]: - title = Video["name"] - id = Video["id"] - cantidad = Video["videos"] - thumbnail = Video["thumb"] - title = "%s (%s)" % (title,cantidad) - thumbnail = thumbnail.replace("\/", "/").replace(".webp", ".jpg") - url = host + "/api/video/list.json?pornstarId=%s&limit=25&sortby=recent&page=1" % id - plot = "" - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=url, - fanart=thumbnail, thumbnail=thumbnail, plot=plot) ) - Actual = int(scrapertools.find_single_match(item.url, '&page=([0-9]+)')) - if JSONData["pages"] - 1 > Actual: - scrapedurl = item.url.replace("&page=" + str(Actual), "&page=" + str(Actual + 1)) - itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=scrapedurl)) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - JSONData = json.load(data) - for Video in JSONData["items"]: - segundos = Video["duration"] - horas=int(segundos/3600) - segundos-=horas*3600 - minutos=int(segundos/60) - segundos-=minutos*60 - if segundos < 10: - segundos = "0%s" %segundos - if minutos < 10: - minutos = "0%s" %minutos - if horas == 00: - duration = "%s:%s" % (minutos,segundos) - else: - duration = "%s:%s:%s" % (horas,minutos,segundos) - title = Video["title"] - thumbnail = Video["flipBookPath"] - url = host + Video["url"] - title = "[COLOR yellow]" + duration + "[/COLOR] " + title - thumbnail = thumbnail.replace("\/", "/").replace("{index}", "2") - url = url.replace("\/", "/") - plot = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot)) - Actual = int(scrapertools.find_single_match(item.url, '&page=([0-9]+)')) - if JSONData["pages"] - 1 > Actual: - scrapedurl = item.url.replace("&page=" + str(Actual), "&page=" + str(Actual + 1)) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=scrapedurl)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data,'<div class="shareDownload_container__item__dropdown">.*?<a href="([^"]+)"') - itemlist.append(item.clone(action="play", server = "directo", url=scrapedurl)) - return itemlist - diff --git a/channels/porn/streamingporn.json b/channels/porn/streamingporn.json deleted file mode 100644 index 0f7929a5..00000000 --- a/channels/porn/streamingporn.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "id": "streamingporn", - "name": "streamingporn", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://streamingporn.xyz/wp-content/uploads/2017/06/streamingporn.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - - diff --git a/channels/porn/streamingporn.py b/channels/porn/streamingporn.py deleted file mode 100644 index 383bedae..00000000 --- a/channels/porn/streamingporn.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://streamingporn.xyz' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/category/movies/")) - itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/stream/")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data,'PaySites(.*?)<li id="menu-item-28040"') - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl , - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data,'<a href="#">Categories</a>(.*?)</ul>') - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li id="menu-item-\d+".*?<a href="([^"]+)">([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = scrapedtitle - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl , - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="entry-featuredImg">.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)">' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - url = scrapedurl - title = scrapedtitle - if 'HD' in scrapedtitle : - calidad = scrapertools.find_single_match(scrapedtitle, '(\d+)p') - title = "[COLOR red]" + "HD" +"[/COLOR] "+ scrapedtitle - if calidad : - title = "[COLOR red]" + "HD" + calidad +" [/COLOR] "+ scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, - fanart=scrapedthumbnail, plot=plot, contentTitle = contentTitle) ) - next_page_url = scrapertools.find_single_match(data,'<div class="loadMoreInfinite"><a href="(.*?)" >Load More') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append( Item(channel=item.channel , action="lista" , title="Página Siguiente >>" , - text_color="blue", url=next_page_url) ) - return itemlist - diff --git a/channels/porn/streamporno.json b/channels/porn/streamporno.json deleted file mode 100644 index 437adbbe..00000000 --- a/channels/porn/streamporno.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "id": "streamporno", - "name": "streamporno", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://pornstreams.eu/wp-content/uploads/2015/12/faviiconeporn.png", - "banner": "", - "categories": [ - "adult" - - ], - "settings": [ - - ] -} - diff --git a/channels/porn/streamporno.py b/channels/porn/streamporno.py deleted file mode 100644 index 0634b825..00000000 --- a/channels/porn/streamporno.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://streamporno.eu' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li id="menu-item-.*?<a href="([^"]+)">([^"]+)</a>' - if item.title == "Categorias": - itemlist.append( Item(channel=item.channel, title="Big Tits" , action="lista", url=host + "/?s=big+tits")) - patron = '<li class="cat-item.*?<a href="([^"]+)">([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = scrapedtitle - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<article id=.*?<a href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, - thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - diff --git a/channels/porn/submityouflicks.json b/channels/porn/submityouflicks.json deleted file mode 100644 index 42bdf281..00000000 --- a/channels/porn/submityouflicks.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "submityouflicks", - "name": "Submit Your Flicks", - "active": true, - "adult": true, - "language": ["*"], - "banner": "submityouflicks.png", - "thumbnail": "submityouflicks.png", - "categories": [ - "adult" - ], - "settings": [ - ] -} diff --git a/channels/porn/submityouflicks.py b/channels/porn/submityouflicks.py deleted file mode 100644 index 4d726fb8..00000000 --- a/channels/porn/submityouflicks.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger - -host = 'http://www.submityourflicks.com' - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url= host)) - itemlist.append(Item(channel=item.channel, action="videos", title="Mas vistos", url= host + "/most-viewed/")) - itemlist.append(Item(channel=item.channel, action="videos", title="Mejor valorados", url= host + "/top-rated/")) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url= host)) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "-") - item.url = host + "/search/%s/" % texto - try: - return videos(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def videos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="item-block item-normal col".*?' - patron += '<a href="([^"]+)" title="([^"]+)">.*?' - patron += 'data-src="([^"]+)".*?' - patron += '</span> ([^"]+)<' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedtime in matches: - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - url = scrapedurl - thumbnail = scrapedthumbnail.replace(" ", "%20") - itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail)) - next_page = scrapertools.find_single_match(data, "<a href='([^']+)' class=\"next\">NEXT</a>") - if next_page != "": - url = urlparse.urljoin(item.url, next_page) - itemlist.append(Item(channel=item.channel, action="videos", title=">> Página siguiente", url=url)) - return itemlist - - -def play(item): - logger.info() - data = httptools.downloadpage(item.url).data - media_url = "https:" + scrapertools.find_single_match(data, 'source src="([^"]+)"') - itemlist = [] - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=media_url, - thumbnail=item.thumbnail, show=item.title, server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/sunporno.json b/channels/porn/sunporno.json deleted file mode 100644 index dff98bb7..00000000 --- a/channels/porn/sunporno.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "sunporno", - "name": "sunporno", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://sunstatic.fuckandcdn.com/sun/sunstatic/v31/common/sunporno/img/logo_top.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/sunporno.py b/channels/porn/sunporno.py deleted file mode 100644 index e9a0259f..00000000 --- a/channels/porn/sunporno.py +++ /dev/null @@ -1,119 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.sunporno.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host +"/most-recent/")) - itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-viewed/date-last-week/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/date-last-week/")) - itemlist.append( Item(channel=item.channel, title="Mas largas" , action="lista", url=host + "/long-movies/date-last-month/")) - itemlist.append( Item(channel=item.channel, title="PornStars" , action="catalogo", url=host + "/pornstars/most-viewed/1/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/%s/" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="thumb-container with-title moviec.*?' - patron += '<a href="([^"]+)".*?' - patron += 'src="([^"]+)".*?' - patron += '<a title="([^"]+)".*?' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - scrapedurl = scrapedurl + "/most-recent/" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="starec">.*?' - patron += '<a href="([^"]+)".*?' - patron += '<img class="thumb" src="([^"]+)" alt="([^"]+)".*?' - patron += '<p class="videos">(\d+)</p>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl, scrapedthumbnail, scrapedtitle, cantidad in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel , action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - data = scrapertools.find_single_match(data,'class="thumbs-container">(.*?)<div class="clearfix">') - patron = '<p class="btime">([^"]+)</p>.*?' - patron += '>(.*?)<img width=.*?' - patron += '="([^"]+)" class="thumb.*?' - patron += 'title="([^"]+)".*?' - patron += 'href="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for duracion,calidad,scrapedthumbnail,scrapedtitle,scrapedurl in matches: - url = scrapedurl - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - if ">HD<" in calidad: - title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, - fanart=scrapedthumbnail, plot=plot, contentTitle = scrapedtitle)) - next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="(.*?)">Next ></a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel , action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<video src="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - scrapedurl = scrapedurl.replace("https:", "http:") - scrapedurl += "|Referer=%s" % host - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/sxyprn.json b/channels/porn/sxyprn.json deleted file mode 100755 index 55157b53..00000000 --- a/channels/porn/sxyprn.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "sxyprn", - "name": "sxyprn", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.sxyprn.com/favicon.ico", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/sxyprn.py b/channels/porn/sxyprn.py deleted file mode 100755 index 8f68519b..00000000 --- a/channels/porn/sxyprn.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,re -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.sxyprn.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/blog/all/0.html?fl=all&sm=latest")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/popular/top-viewed.html")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/popular/top-rated.html")) - itemlist.append( Item(channel=item.channel, title="Sitios" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "-") - item.url = host + "/%s.html" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - if "Sitios" in item.title: - patron = "<a href='([^']+)' target='_blank'><div class='top_sub_el top_sub_el_sc'>.*?" - patron += "<span class='top_sub_el_key_sc'>([^<]+)</span>" - patron += "<span class='top_sub_el_count'>(\d+)</span>" - else: - patron = "<a class='tdn' href='([^']+)'.*?" - patron += "<span class='htag_el_tag'>([^<]+)</span>" - patron += "<span class='htag_el_count'>(\d+) videos</span>" - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - title = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = "<img class=.*?" - patron += " src='([^']+)'.*?" - patron += "<span class='duration_small'.*?'>([^<]+)<.*?" - patron += "<span class='shd_small'.*?>([^<]+)<.*?" - patron += "post_time' href='([^']+)' title='([^']+)'" - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedtime,quality,scrapedurl,scrapedtitle in matches: - title = "[COLOR yellow]%s[/COLOR] [COLOR red]%s[/COLOR] %s" % (scrapedtime,quality,scrapedtitle) - thumbnail = "https:" + scrapedthumbnail - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, - thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = scrapedtitle)) - # - next_page = scrapertools.find_single_match(data, "<div class='ctrl_el ctrl_sel'>.*?<a href='([^']+)'") - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def findvideos(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) - url = scrapertools.find_single_match(data, 'data-vnfo=.*?":"([^"]+)"') - url = url.replace("\/", "/").replace("/cdn/", "/cdn7/") - url = urlparse.urljoin(item.url,url) - itemlist.append( Item(channel=item.channel, action="play", title = "%s " + url, url=url)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - diff --git a/channels/porn/tabooshare.json b/channels/porn/tabooshare.json deleted file mode 100644 index 568b6953..00000000 --- a/channels/porn/tabooshare.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "tabooshare", - "name": "tabooshare", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://tabooshare.com/wp-content/uploads/2017/06/cropped-TSa-180x180.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/tabooshare.py b/channels/porn/tabooshare.py deleted file mode 100644 index e2bdd1c7..00000000 --- a/channels/porn/tabooshare.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import re - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger -from platformcode import config - -host = 'http://tabooshare.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data,'<h3>Categories</h3>(.*?)</ul>') - patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = str(scrapedtitle) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="post" id="post-\d+">.*?<a href="([^"]+)" title="(.*?)"><img src="(.*?)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle.replace(" – Free Porn Download", "") - itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<span class="current">.*?<a href="(.*?)"') - if next_page=="http://NaughtyPorn.net/": - next_page = scrapertools.find_single_match(data,'<span class="current">.*?<a href=\'(.*?)\'') - if next_page!="": - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - data = httptools.downloadpage(item.url).data - itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: - videoitem.title = item.title - videoitem.fulltitle = item.fulltitle - videoitem.thumbnail = item.thumbnail - videoitem.channel = item.channel - return itemlist - diff --git a/channels/porn/thumbzilla.json b/channels/porn/thumbzilla.json deleted file mode 100644 index fcce22f8..00000000 --- a/channels/porn/thumbzilla.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "id": "thumbzilla", - "name": "ThumbZilla", - "active": true, - "adult": true, - "language": "en", - "fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg", - "thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png?cache=2018110203", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "perfil", - "type": "list", - "label": "Perfil de color", - "default": 3, - "enabled": true, - "visible": true, - "lvalues": [ - "Sin color", - "Perfil 3", - "Perfil 2", - "Perfil 1" - ] - } - ] -} diff --git a/channels/porn/thumbzilla.py b/channels/porn/thumbzilla.py deleted file mode 100644 index aab21aee..00000000 --- a/channels/porn/thumbzilla.py +++ /dev/null @@ -1,175 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import channeltools -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from channelselector import get_thumb - -__channel__ = "thumbzilla" - -host = 'https://www.thumbzilla.com' -try: - __modo_grafico__ = config.get_setting('modo_grafico', __channel__) - __perfil__ = int(config.get_setting('perfil', __channel__)) -except: - __modo_grafico__ = True - __perfil__ = 0 - -# Fijar perfil de color -perfil = [['0xFF6E2802', '0xFFFAA171', '0xFFE9D7940'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], - ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']] - -if __perfil__ - 1 >= 0: - color1, color2, color3 = perfil[__perfil__ - 1] -else: - color1 = color2 = color3 = "" - -headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], - ['Referer', host]] - -parameters = channeltools.get_channel_parameters(__channel__) -fanart_host = parameters['fanart'] -thumbnail_host = parameters['thumbnail'] -thumbnail = 'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/%s.png' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=__channel__, action="videos", title="Más Calientes", url=host, - viewmode="movie", thumbnail=get_thumb("adult.png"))) - - itemlist.append(Item(channel=__channel__, title="Nuevas", url=host + '/newest', - action="videos", viewmode="movie_with_plot", viewcontent='movies', - thumbnail=get_thumb("adult.png"))) - - itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/tending', - action="videos", viewmode="movie_with_plot", viewcontent='movies', - thumbnail=get_thumb("adult.png"))) - - itemlist.append(Item(channel=__channel__, title="Mejores Videos", url=host + '/top', - action="videos", viewmode="movie_with_plot", viewcontent='movies', - thumbnail=get_thumb("adult.png"))) - - itemlist.append(Item(channel=__channel__, title="Populares", url=host + '/popular', - action="videos", viewmode="movie_with_plot", viewcontent='movies', - thumbnail=get_thumb("adult.png"))) - - itemlist.append(Item(channel=__channel__, title="Videos en HD", url=host + '/hd', - action="videos", viewmode="movie_with_plot", viewcontent='movies', - thumbnail=get_thumb("adult.png"))) - - itemlist.append(Item(channel=__channel__, title="Caseros", url=host + '/hd', - action="videos", viewmode="movie_with_plot", viewcontent='homemade', - thumbnail=get_thumb("adult.png"))) - - itemlist.append(Item(channel=__channel__, title="PornStar", action="catalogo", - url=host + '/pornstars/', viewmode="movie_with_plot", viewcontent='movies', - thumbnail=get_thumb("adult.png"))) - - itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias", - url=host + '/categories/', viewmode="movie_with_plot", viewcontent='movies', - thumbnail=get_thumb("adult.png"))) - - itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host, - thumbnail=get_thumb("adult.png"), extra="buscar")) - return itemlist - - -# REALMENTE PASA LA DIRECCION DE BUSQUEDA - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = urlparse.urljoin(item.url, "video/search?q={0}".format(texto)) - # item.url = item.url % tecleado - item.extra = "buscar" - try: - return videos(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - -def videos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '<a class="[^"]+" href="([^"]+)">' # url - patron += '<img id="[^"]+".*?src="([^"]+)".*?' # img - patron += '<span class="title">([^<]+)</span>.*?' # title - patron += '<span class="duration"(.*?)</a>' # time - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtime in matches: - time = scrapertools.find_single_match(scrapedtime, '>([^<]+)</span>') - title = "[%s] %s" % (time, scrapedtitle) - if ">HD<" in scrapedtime: - title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle - itemlist.append(Item(channel=item.channel, action='play', title=title, thumbnail=scrapedthumbnail, - url=host + scrapedurl, contentTile=scrapedtitle, fanart=scrapedthumbnail)) - paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '') - if paginacion: - itemlist.append(Item(channel=item.channel, action="videos", - thumbnail=thumbnail % 'rarrow', - title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion)) - return itemlist - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li class="pornstars">.*?<a href="([^"]+)".*?' - patron += '<img src="([^"]+)" alt="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - url = urlparse.urljoin(item.url, scrapedurl) - itemlist.append(Item(channel=item.channel, action="videos", url=url, title=scrapedtitle, fanart=scrapedthumbnail, - thumbnail=scrapedthumbnail, viewmode="movie_with_plot")) - paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '') - if paginacion: - itemlist.append(Item(channel=item.channel, action="catalogo", - thumbnail=thumbnail % 'rarrow', - title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion)) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - # logger.info(data) - patron = 'class="checkHomepage"><a href="([^"]+)".*?' # url - patron += '<span class="count">([^<]+)</span>' # title, vids - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, vids in matches: - scrapedtitle = scrapedurl.replace('/categories/', '').replace('-', ' ').title() - title = "%s (%s)" % (scrapedtitle, vids.title()) - thumbnail = item.thumbnail - url = urlparse.urljoin(item.url, scrapedurl) - itemlist.append(Item(channel=item.channel, action="videos", fanart=thumbnail, - title=title, url=url, thumbnail=thumbnail, - viewmode="movie_with_plot", folder=True)) - return itemlist - - -def play(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - url = scrapertools.find_single_match(data, '"quality":"[^"]+","videoUrl":"([^"]+)"').replace('\\', '') - itemlist.append(item.clone(url=url, title=item.contentTile)) - return itemlist - diff --git a/channels/porn/titsbox.json b/channels/porn/titsbox.json deleted file mode 100755 index 1262153a..00000000 --- a/channels/porn/titsbox.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "titsbox", - "name": "titsbox", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://titsbox.com/android-chrome-192x192.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/titsbox.py b/channels/porn/titsbox.py deleted file mode 100755 index d34d1f47..00000000 --- a/channels/porn/titsbox.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re - -from core import jsontools as json -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://titsbox.com' #titbox vivud zmovs - -url_api = host + "/?ajax=1&type=" - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=url_api + "most-recent&page=1")) - itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=url_api + "top-rated&page=1")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=url_api + "long&page=1")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, '<ul class="sidebar-nav">(.*?)</ul>') - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<a class="category-item" href="([^"]+)">([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - url = host + scrapedurl + "?ajax=1&type=most-recent&page=1" - scrapedplot = "" - thumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url, - thumbnail=thumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - JSONData = json.load(data) - for Video in JSONData["data"]: - duration = Video["duration"] - title = Video["videoTitle"] - title = "[COLOR yellow]%s[/COLOR] %s" % (duration,title) - src= Video["src"] - domain="" - thumbnail = src.get('domain', domain) + src.get('pathMedium', domain)+"1.jpg" - url= Video["urls_CDN"] - url= url.get('480', domain) - url = url.replace("/\n/", "/") - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot,)) - Actual = int(scrapertools.find_single_match(item.url, '&page=([0-9]+)')) - if JSONData["pagesLeft"] - 1 > Actual: - scrapedurl = item.url.replace("&page=" + str(Actual), "&page=" + str(Actual + 1)) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=scrapedurl)) - return itemlist - - -def findvideos(item): - return - diff --git a/channels/porn/tnaflix.json b/channels/porn/tnaflix.json deleted file mode 100644 index 94f6956e..00000000 --- a/channels/porn/tnaflix.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "id": "tnaflix", - "name": "tnaflix", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.tnaflix.com/images/favicons/tnaflix/android-icon-192x192.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - - diff --git a/channels/porn/tnaflix.py b/channels/porn/tnaflix.py deleted file mode 100644 index 0df37291..00000000 --- a/channels/porn/tnaflix.py +++ /dev/null @@ -1,138 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools -from core import tmdb -from core import jsontools - -host = 'https://www.tnaflix.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/new/?d=all&period=all")) - itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular/?d=all&period=all")) - itemlist.append( Item(channel=item.channel, title="Mejor valorado" , action="peliculas", url=host + "/toprated/?d=all&period=month")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/all/top-rated/1/all")) - itemlist.append( Item(channel=item.channel, title="PornStars" , action="categorias", url=host + "/pornstars")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search.php?what=%s&tab=" % texto - try: - return peliculas(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="vidcountSp">(\d+)</div>.*?<a class="categoryTitle channelTitle" href="([^"]+)" title="([^"]+)">.*?data-original="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for cantidad,scrapedurl,scrapedtitle,scrapedthumbnail in matches: - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - title = scrapedtitle + " (" + cantidad + ")" - scrapedplot = "" - itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) - next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - if item.title=="PornStars" : - data = scrapertools.find_single_match(data,'</i> Hall Of Fame Pornstars</h1>(.*?)</section>') - patron = '<a class="thumb" href="([^"]+)">.*?<img src="([^"]+)".*?<div class="vidcountSp">(.*?)</div>.*?<a class="categoryTitle".*?>([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches: - scrapedplot = "" - if item.title=="Categorias" : - scrapedthumbnail = "http:" + scrapedthumbnail - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - if item.title=="PornStars" : - scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "?section=videos" - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) - next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append( Item(channel=item.channel , action="categorias" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) - return itemlist - - -def peliculas(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a class=\'thumb no_ajax\' href=\'(.*?)\'.*?' - patron += 'data-original=\'(.*?)\' alt="([^"]+)"><div class=\'videoDuration\'>([^<]+)</div>(.*?)<div class=\'watchedInfo' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,duracion,quality in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - if quality: - quality= scrapertools.find_single_match(quality, '>(\d+p)<') - title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + quality + "[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append(Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) - return itemlist - - -def ref(url): - logger.info() - itemlist = [] - data = httptools.downloadpage(url).data - VID = scrapertools.find_single_match(data,'id="VID" type="hidden" value="([^"]+)"') - vkey = scrapertools.find_single_match(data,'id="vkey" type="hidden" value="([^"]+)"') - thumb = scrapertools.find_single_match(data,'id="thumb" type="hidden" value="([^"]+)"') - nkey= scrapertools.find_single_match(data,'id="nkey" type="hidden" value="([^"]+)"') - url = "https://cdn-fck.tnaflix.com/tnaflix/%s.fid?key=%s&VID=%s&nomp4=1&catID=0&rollover=1&startThumb=%s" % (vkey, nkey, VID, thumb) - url += "&embed=0&utm_source=0&multiview=0&premium=1&country=0user=0&vip=1&cd=0&ref=0&alpha" - return url - - -def play(item): - logger.info() - itemlist = [] - url= ref(item.url) - headers = {'Referer': item.url} - data = httptools.downloadpage(url, headers=headers).data - patron = '<res>(.*?)</res>.*?' - patron += '<videoLink><([^<]+)></videoLink>' - matches = scrapertools.find_multiple_matches(data, patron) - for title, url in matches: - url= url.replace("![CDATA[", "http:").replace("]]", "") - itemlist.append(["%s %s [directo]" % (title, url), url]) - itemlist.reverse() - return itemlist - - diff --git a/channels/porn/tryboobs.json b/channels/porn/tryboobs.json deleted file mode 100644 index 9fe3080f..00000000 --- a/channels/porn/tryboobs.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "tryboobs", - "name": "tryboobs", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://tb3.fuckandcdn.com/tb/tbstatic/v30/common/tryboobs/img/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/tryboobs.py b/channels/porn/tryboobs.py deleted file mode 100644 index 333751d3..00000000 --- a/channels/porn/tryboobs.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://www.tryboobs.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/week/")) - itemlist.append( Item(channel=item.channel, title="Mejor Valorado" , action="lista", url=host + "/top-rated/week/")) - itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/model-viewed/1/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)" class="th-[^"]+">.*?' - patron += 'src="([^"]+)".*?' - patron += '<span>(\d+)</span>.*?' - patron += '<span class="title">([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = 'href="([^"]+)"\s*class="th-video.*?' - patron += '<img src="([^"]+)".*?' - patron += '<span class="time">([^"]+)</span>.*?' - patron += '<span class="title">([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,duracion,scrapedtitle in matches: - url = scrapedurl - contentTitle = scrapedtitle - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<video src="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for url in matches: - url += "|Referer=%s" % host - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=url, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/tubedupe.json b/channels/porn/tubedupe.json deleted file mode 100644 index 37fa768b..00000000 --- a/channels/porn/tubedupe.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "tubedupe", - "name": "tubedupe", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://tubedupe.com/apple-touch-icon-180x180-precomposed.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - - diff --git a/channels/porn/tubedupe.py b/channels/porn/tubedupe.py deleted file mode 100644 index 0897c8bb..00000000 --- a/channels/porn/tubedupe.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://tubedupe.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated/")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Modelos" , action="categorias", url=host + "/models/?sort_by=model_viewed")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/?sort_by=cs_viewed")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/?sort_by=avg_videos_popularity")) - # itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="block-[^"]+">.*?' - patron += '<a href="([^"]+)".*?title="([^"]+)".*?' - patron += 'src="([^"]+)".*?' - if '/models/' in item.url: - patron += '<span class="strong">Videos</span>(.*?)</div>' - else: - patron += '<var class="duree">([^"]+) </var>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - cantidad = cantidad.strip() - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - scrapedplot = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail,fanart=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data, '<li class="active">.*?<a href="([^"]+)" title="Page') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page ) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="block-video">.*?' - patron += '<a href="([^"]+)" class="[^"]+" title="([^"]+)">.*?' - patron += '<img src="([^"]+)".*?' - patron += '<var class="duree">(.*?)</var>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail,plot=plot, contentTitle = title)) - next_page = scrapertools.find_single_match(data, '<li class="active">.*?<a href="([^"]+)" title="Page') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page ) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'') - if scrapedurl == "" : - scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'') - if scrapedurl == "" : - scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'') - if scrapedurl == "" : - scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'') - - itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - - diff --git a/channels/porn/tubehentai.json b/channels/porn/tubehentai.json deleted file mode 100644 index 57321cbf..00000000 --- a/channels/porn/tubehentai.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "id": "tubehentai", - "name": "tubehentai", - "active": true, - "adult": true, - "language": ["*"], - "banner": "tubehentai.png", - "thumbnail": "tubehentai.png", - "categories": [ - "adult" - ], - "settings": [ - ] -} \ No newline at end of file diff --git a/channels/porn/tubehentai.py b/channels/porn/tubehentai.py deleted file mode 100644 index 6352c943..00000000 --- a/channels/porn/tubehentai.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger - -host = 'http://tubehentai.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Novedades", action="lista", url=host + "/most-recent/")) - itemlist.append(Item(channel=item.channel, title="Mas visto", action="lista", url=host + "/most-viewed/")) - itemlist.append(Item(channel=item.channel, title="Mejor valorado", action="lista", url=host + "/top-rated/")) - - itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "%20") - item.url = host + "/search/%s/" % texto - try: - return lista(item) - # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<a href="((?:http|https)://tubehentai.com/video/[^"]+)" title="([^"]+)".*?' - patron += '<span class="icon -time">.*?<span class="item__stat-label">([^<]+)</span>.*?' - patron += '<img src="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,duration,scrapedthumbnail in matches: - title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle - itemlist.append(Item(channel=item.channel, action="play", title=title, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail)) - next_page = scrapertools.find_single_match(data,'<a rel=\'next\' title=\'Next\' href=\'([^\']+)\'') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - url = scrapertools.find_single_match(data, '<source src="([^"]+\.mp4)"') - server = "Directo" - itemlist.append(Item(channel=item.channel, title="", url=url, server=server, folder=False)) - return itemlist - diff --git a/channels/porn/videosXYZ.json b/channels/porn/videosXYZ.json deleted file mode 100644 index b9726ca3..00000000 --- a/channels/porn/videosXYZ.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "videosXYZ", - "name": "videosXYZ", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://free-porn-videos.xyz/wp-content/uploads/2018/10/cropped-Logo-org-Free-porn-videos.xyz-app-icon-192x192.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/videosXYZ.py b/channels/porn/videosXYZ.py deleted file mode 100644 index 6f645dcf..00000000 --- a/channels/porn/videosXYZ.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://free-porn-videos.xyz' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/topics/adult-movie/")) - itemlist.append( Item(channel=item.channel, title="Parody" , action="lista", url=host + "/topics/free-porn-parodies/")) - itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/topics/porn-videos/")) - itemlist.append( Item(channel=item.channel, title="BigTits" , action="lista", url=host + "/?s=big+tit")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<article id="post-\d+".*?<a href="([^"]+)".*?data-src="([^"]+)".*?alt="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle.replace("Permalink to Watch ", "").replace("Porn Online", "").replace("Permalink to ", "") - itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, contentTitle=scrapedtitle, plot=scrapedplot) ) - next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page_url) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data,'<iframe src="([^"]+)"') - scrapedurl = scrapedurl.replace("%28", "(").replace("%29", ")") - itemlist = servertools.find_video_items(data=data) - for videoitem in itemlist: - videoitem.title = item.title - videoitem.thumbnail = item.thumbnail - videoitem.channel = item.channel - return itemlist - diff --git a/channels/porn/vidz7.json b/channels/porn/vidz7.json deleted file mode 100644 index ab0941f5..00000000 --- a/channels/porn/vidz7.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "id": "vidz7", - "name": "Vidz7", - "active": true, - "adult": true, - "language": ["*"], - "banner": "https://www.dropbox.com/s/182r0wby3ohnxkc/bannermenu.jpg?dl=1", - "thumbnail": "https://www.dropbox.com/s/7z31b4ixve2ge0l/thumbnail.png?dl=1", - "categories": [ - "adult" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/channels/porn/vidz7.py b/channels/porn/vidz7.py deleted file mode 100644 index de2cb712..00000000 --- a/channels/porn/vidz7.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger - -host = 'http://www.vidz7.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos", url=host)) - itemlist.append( - Item(channel=item.channel, action="categorias", title="Canal", url=host + "/category/")) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url="http://www.vidz7.com")) - return itemlist - - -def search(item, texto): - logger.info() - - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}", "", data) - patron = '<li><a href="([^"]+)">([^<]+)</a><span>(\d+) </' - matches = re.compile(patron, re.DOTALL).findall(data) - for url, title, cantidad in matches: - title = title + " (" + cantidad + ")" - itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url)) - - return itemlist - - -def lista(item): - logger.info() - # Descarga la página - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}", "", data) - patron = "<a href='.*?.' class='thumb' style='background-image:url\(\"([^\"]+)\"\).*?" - patron += "<div class=\"hd\">(.*?)</div>.*?" - patron += "<div class=\"duration\">(.*?)</div>.*?" - patron += "<h6><a class='hp' href='([^']+)'>(.*?)</a></h6>" - matches = re.compile(patron, re.DOTALL).findall(data) - itemlist = [] - for scrapedthumbnail, scrapedhd, duration, scrapedurl, scrapedtitle in matches: - thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) - url = urlparse.urljoin(item.url, scrapedurl) - scrapedtitle = scrapedtitle.strip() - title = "[COLOR yellow]" + duration + "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle - # Añade al listado - itemlist.append(Item(channel=item.channel, action="play", title=title, thumbnail=thumbnail, fanart=thumbnail, - contentTitle=title, url=url, - viewmode="movie", folder=True)) - paginacion = scrapertools.find_single_match(data,'<a class="active".*?.>\d+</a><a class="inactive" href ="([^"]+)">') - if paginacion: - itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente", url=paginacion)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - # Descarga la página - data = httptools.downloadpage(item.url).data - data = scrapertools.unescape(data) - itemlist.extend(servertools.find_video_items(data=data)) - for videoitem in itemlist: - videoitem.thumbnail = item.thumbnail - videoitem.channel = item.channel - videoitem.action = "play" - videoitem.folder = False - videoitem.title = item.title - return itemlist - diff --git a/channels/porn/vintagetube.json b/channels/porn/vintagetube.json deleted file mode 100644 index ddf36990..00000000 --- a/channels/porn/vintagetube.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "vintagetube", - "name": "vintagetube", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.vintagexxxsex.com/images/vintagexxxsex.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/vintagetube.py b/channels/porn/vintagetube.py deleted file mode 100644 index fafe7f37..00000000 --- a/channels/porn/vintagetube.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://www.vintagetube.club' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/tube/last-1/")) - itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/tube/popular-1/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/%s" % texto - item.url = item.url + "/popular-1/" - try: - return peliculas(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="prev prev-ct">.*?' - patron += '<a href="([^"]+)">.*?' - patron += '<img src="([^"]+)".*?' - patron += '<span class="prev-tit">([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - scrapedurl = host + scrapedurl - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def peliculas(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="prev">.*?' - patron += '<a href="([^"]+)">.*?' - patron += '<img src="([^"]+)">.*?' - patron += '<span class="prev-tit">([^"]+)</span>.*?' - patron += '<div class="prev-dur"><span>([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches: - scrapedplot = "" - scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + str(scrapedtitle) - scrapedurl = scrapedurl.replace("/xxx.php?tube=", "") - scrapedurl = host + scrapedurl - itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<span class="page">.*?<a target="_self" href="([^"]+)"') - - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page) ) - - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'(.*?)\'') - if scrapedurl == "": - scrapedurl = scrapertools.find_single_match(data,'<iframe src="([^"]+)"') - data = httptools.downloadpage(scrapedurl).data - else: - data = httptools.downloadpage(scrapedurl).data - scrapedurl = scrapertools.find_single_match(data,'<iframe src="([^"]+)"') - data = httptools.downloadpage("https:" + scrapedurl).data - scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)"') - itemlist = [] - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/vintagexxxsex.json b/channels/porn/vintagexxxsex.json deleted file mode 100644 index 6c51cae5..00000000 --- a/channels/porn/vintagexxxsex.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "vintagexxxsex", - "name": "vintagexxxsex", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.vintagexxxsex.com/images/vintagexxxsex.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/vintagexxxsex.py b/channels/porn/vintagexxxsex.py deleted file mode 100644 index b7afa813..00000000 --- a/channels/porn/vintagexxxsex.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://www.vintagexxxsex.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Top" , action="lista", url=host + "/all-top/1/")) - itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host + "/all-new/1/")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/all-longest/1/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li><a href="([^"]+)"><i class="fa fa-tag"></i>(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedurl = host + scrapedurl - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="th">.*?' - patron += '<a href="([^"]+)".*?' - patron += '<img src="([^"]+)".*?' - patron += '<span class="th_nm">([^"]+)</span>.*?' - patron += '<i class="fa fa-clock-o"></i>([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches: - contentTitle = scrapedtitle - title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle - scrapedurl = scrapedurl.replace("/up.php?xxx=", "") - scrapedurl = host + scrapedurl - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle=contentTitle)) - next_page = scrapertools.find_single_match(data,'<li><span class="pg_nm">\d+</span></li>.*?href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - scrapedurl = scrapertools.find_single_match(data,'<iframe src="([^"]+)"') - data = httptools.downloadpage(scrapedurl).data - scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)"') - if scrapedurl == "": - scrapedurl = "http:" + scrapertools.find_single_match(data,'<iframe src="([^"]+)"') - data = httptools.downloadpage(scrapedurl).data - scrapedurl = scrapertools.find_single_match(data,'file: "([^"]+)"') - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/vivud.json b/channels/porn/vivud.json deleted file mode 100755 index 7b70e238..00000000 --- a/channels/porn/vivud.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "vivud", - "name": "vivud", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://vivud.com/favicon-96x96.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/vivud.py b/channels/porn/vivud.py deleted file mode 100755 index 02d6b015..00000000 --- a/channels/porn/vivud.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re - -from core import jsontools as json -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://vivud.com' #titbox vivud zmovs - -url_api = host + "/?ajax=1&type=" - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=url_api + "most-recent&page=1")) - itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=url_api + "top-rated&page=1")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=url_api + "long&page=1")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, '<ul class="sidebar-nav">(.*?)</ul>') - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<a class="category-item" href="([^"]+)">([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - url = host + scrapedurl + "?ajax=1&type=most-recent&page=1" - scrapedplot = "" - thumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url, - thumbnail=thumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - JSONData = json.load(data) - for Video in JSONData["data"]: - duration = Video["duration"] - title = Video["videoTitle"] - title = "[COLOR yellow]%s[/COLOR] %s" % (duration,title) - src= Video["src"] - domain="" - thumbnail = src.get('domain', domain) + src.get('pathMedium', domain)+"1.jpg" - url= Video["urls_CDN"] - url= url.get('480', domain) - url = url.replace("/\n/", "/") - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot,)) - Actual = int(scrapertools.find_single_match(item.url, '&page=([0-9]+)')) - if JSONData["pagesLeft"] - 1 > Actual: - scrapedurl = item.url.replace("&page=" + str(Actual), "&page=" + str(Actual + 1)) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=scrapedurl)) - return itemlist - - -def findvideos(item): - return - diff --git a/channels/porn/vporn.json b/channels/porn/vporn.json deleted file mode 100644 index d33ff5f9..00000000 --- a/channels/porn/vporn.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "vporn", - "name": "vporn", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://th-eu1.vporn.com/images/logo-dark-theme.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/vporn.py b/channels/porn/vporn.py deleted file mode 100644 index 65727102..00000000 --- a/channels/porn/vporn.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import jsontools as json -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'https://www.vporn.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host + "/newest/month/")) - itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/views/month/")) - itemlist.append( Item(channel=item.channel, title="Mejor Valoradas" , action="lista", url=host + "/rating/month/")) - itemlist.append( Item(channel=item.channel, title="Favoritas" , action="lista", url=host + "/favorites/month/")) - itemlist.append( Item(channel=item.channel, title="Mas Votada" , action="lista", url=host + "/votes/month/")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/longest/month/")) - itemlist.append( Item(channel=item.channel, title="PornStar" , action="catalogo", url=host + "/pornstars/")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search?q=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class=\'star\'>.*?' - patron += '<a href="([^"]+)".*?' - patron += '<img src="([^"]+)" alt="([^"]+)".*?' - patron += '<span> (\d+) Videos' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - scrapedurl = host + scrapedurl - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<a class="next" href="([^"]+)">') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="catalogo", title="Next page >>", text_color="blue", url=next_page) ) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '"name":"([^"]+)".*?' - patron += '"image":"([^"]+)".*?' - patron += '"url":"([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedtitle,scrapedthumbnail,scrapedurl in matches: - scrapedplot = "" - scrapedthumbnail = "https://th-us2.vporn.com" + scrapedthumbnail - scrapedthumbnail= scrapedthumbnail.replace("\/", "/") - scrapedurl = host + scrapedurl - scrapedurl = scrapedurl.replace("\/", "/") - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="video">.*?' - patron += '<a href="([^"]+)".*?' - patron += '<span class="time">(.*?)</span>(.*?)</span>.*?' - patron += '<img src="([^"]+)" alt="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,time,calidad,scrapedthumbnail,scrapedtitle in matches: - scrapedtitle = scrapedtitle.replace(", ", " & ").replace("(", "(").replace(")", ")") - title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle - if "hd-marker is-hd" in calidad: - title = "[COLOR yellow]" + time + " [/COLOR]" + "[COLOR red]" + "HD" + " [/COLOR]" + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, - fanart=thumbnail, thumbnail=thumbnail, plot=plot, contentTitle = title)) - next_page = scrapertools.find_single_match(data,'<a class="next.*?title="Next Page" href="([^"]+)">') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<source src="([^"]+)" type="video/mp4" label="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl,scrapedtitle in matches: - itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl)) - return itemlist - diff --git a/channels/porn/watchpornfree.json b/channels/porn/watchpornfree.json deleted file mode 100644 index f5f9b0a9..00000000 --- a/channels/porn/watchpornfree.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "watchpornfree", - "name": "watchpornfree", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://watchpornfree.info/wp-content/uploads/2019/01/favicon.ico", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/watchpornfree.py b/channels/porn/watchpornfree.py deleted file mode 100644 index 73d14f7f..00000000 --- a/channels/porn/watchpornfree.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -# https://playpornfree.org/ https://mangoporn.net/ https://watchfreexxx.net/ https://losporn.org/ https://xxxstreams.me/ https://speedporn.net/ - -host = 'https://watchpornfree.info' - -def mainlist(item): - logger.info("") - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips-scenes")) - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Parodia" , action="lista", url=host + "/category/parodies")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Año" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info("") - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info("") - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - if item.title == "Canal": - data = scrapertools.find_single_match(data,'Scenes</a></li>(.*?)</ul>') - if item.title == "Año": - data = scrapertools.find_single_match(data,'Year</a>(.*?)</ul>') - if item.title == "Categorias": - data = scrapertools.find_single_match(data,'>Categories</div>(.*?)</ul>') - patron = '<a href="([^"]+)".*?>([^"]+)</a></li>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - -def lista(item): - logger.info("") - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<article class="TPost B">.*?<a href="([^"]+)">.*?src="([^"]+)".*?<div class="Title">([^"]+)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle in matches: - scrapedplot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next »</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - diff --git a/channels/porn/webpeliculasporno.json b/channels/porn/webpeliculasporno.json deleted file mode 100644 index a048c974..00000000 --- a/channels/porn/webpeliculasporno.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "webpeliculasporno", - "name": "webpeliculasporno", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://www.webpeliculasporno.com/wp-content/uploads/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/webpeliculasporno.py b/channels/porn/webpeliculasporno.py deleted file mode 100644 index e49a51f6..00000000 --- a/channels/porn/webpeliculasporno.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -import re - -import urlparse - -from core import httptools -from core import scrapertools -from core.item import Item -from platformcode import logger -from platformcode import config - -host = 'http://www.webpeliculasporno.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Ultimas", action="lista", url=host)) - itemlist.append( - Item(channel=item.channel, title="Mas vistas", action="lista", url=host + "/?display=tube&filtre=views")) - itemlist.append( - Item(channel=item.channel, title="Mejor valoradas", action="lista", url=host + "/?display=tube&filtre=rate")) - itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host)) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li class="cat-item [^>]+><a href="([^"]+)" >([^<]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot)) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<li class="border-radius-5 box-shadow">.*?' - patron += 'src="([^"]+)".*?' - patron += '<a href="([^"]+)" title="([^"]+)">' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedthumbnail, scrapedurl, scrapedtitle in matches: - url = urlparse.urljoin(item.url, scrapedurl) - title = scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle=contentTitle)) - next_page = scrapertools.find_single_match(data, '<li><a class="next page-numbers" href="([^"]+)">Next') - if next_page != "": - next_page = urlparse.urljoin(item.url, next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - return itemlist diff --git a/channels/porn/woodrocket.json b/channels/porn/woodrocket.json deleted file mode 100644 index 02ea74a1..00000000 --- a/channels/porn/woodrocket.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "woodrocket", - "name": "woodrocket", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://woodrocket.com/img//logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/woodrocket.py b/channels/porn/woodrocket.py deleted file mode 100644 index f24640e4..00000000 --- a/channels/porn/woodrocket.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://woodrocket.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host + "/porn")) - itemlist.append( Item(channel=item.channel, title="Parodias" , action="lista", url=host + "/parodies")) - itemlist.append( Item(channel=item.channel, title="Shows" , action="categorias", url=host + "/series")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories")) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="media-panel-image">.*?<img src="(.*?)".*?<a href="(.*?)">(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = host + scrapedthumbnail - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="media-panel-image">.*?<a href="([^"]+)".*?title="([^"]+)".*?<img src="([^"]+)"' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - plot = "" - contentTitle = scrapedtitle - thumbnail = urlparse.urljoin(item.url,scrapedthumbnail) - title = scrapedtitle - year = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">»</a></li>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - url = scrapertools.find_single_match(data,'<iframe src="([^"]+)"') - itemlist.append(item.clone(action="play", title= "%s", url=url)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - diff --git a/channels/porn/x18hentai.json b/channels/porn/x18hentai.json deleted file mode 100644 index b23b8dd8..00000000 --- a/channels/porn/x18hentai.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "id": "x18hentai", - "name": "18HentaiOnline", - "active": true, - "adult": true, - "language": ["*"], - "banner": "https://s32.postimg.cc/lafs9vgxh/18hentaionline_banner.png", - "thumbnail": "https://s32.postimg.cc/fui7jdg9x/18hentaionline.png", - "categories": [ - "adult" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": false, - "visible": false - } - ] -} \ No newline at end of file diff --git a/channels/porn/x18hentai.py b/channels/porn/x18hentai.py deleted file mode 100644 index db4b37ec..00000000 --- a/channels/porn/x18hentai.py +++ /dev/null @@ -1,151 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger -from platformcode import config - -host = 'http://www.18hentaionline.net/' -headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], - ['Referer', host]] - - -def mainlist(item): - logger.info() - - itemlist = [] - - itemlist.append(Item(channel=item.channel, title="Todos", action="todas", url=host, thumbnail='', fanart='')) - - itemlist.append( - Item(channel=item.channel, title="Sin Censura", action="todas", url=host + '/tag/sin-censura/', thumbnail='', - fanart='')) - - itemlist.append( - Item(channel=item.channel, title="Estrenos", action="todas", url=host + '/category/estreno/', thumbnail='', - fanart='')) - - itemlist.append( - Item(channel=item.channel, title="Categorias", action="categorias", url=host, thumbnail='', fanart='')) - - return itemlist - - -def todas(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url, headers=headers).data - patron = '<h3><a href="([^"]+)" title="([^"]+)">.*?<\/a><\/h3>.*?' - patron += '<.*?>.*?' - patron += '<a.*?img src="([^"]+)" alt' - - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle, scrapedthumbnail in matches: - url = scrapedurl - title = scrapedtitle.decode('utf-8') - thumbnail = scrapedthumbnail - fanart = '' - itemlist.append( - Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fanart=fanart)) - - # Paginacion - title = '' - siguiente = scrapertools.find_single_match(data, - '<a rel="nofollow" class="next page-numbers" href="([^"]+)">Siguiente »<\/a><\/div>') - title = 'Pagina Siguiente >>> ' - fanart = '' - itemlist.append(Item(channel=item.channel, action="todas", title=title, url=siguiente, fanart=fanart)) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = item.url + texto - - if texto != '': - return todas(item) - else: - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url, headers=headers).data - patron = "<a href='([^']+)' class='tag-link-.*? tag-link-position-.*?' title='.*?' style='font-size: 11px;'>([^<]+)<\/a>" - - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedtitle in matches: - url = scrapedurl - title = scrapedtitle - itemlist.append(Item(channel=item.channel, action="todas", title=title, fulltitle=item.fulltitle, url=url)) - - return itemlist - - -def episodios(item): - censura = {'Si': 'con censura', 'No': 'sin censura'} - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url, headers=headers).data - old_mode = scrapertools.find_single_match(data, '<th>Censura<\/th>') - if old_mode: - patron = '<td>(\d+)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td><a href="(.*?)".*?>Ver Capitulo<\/a><\/td>' - - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedcap, scrapedaud, scrapedsub, scrapedcen, scrapedurl in matches: - url = scrapedurl - title = 'CAPITULO ' + scrapedcap + ' AUDIO: ' + scrapedaud + ' SUB:' + scrapedsub + ' ' + censura[scrapedcen] - thumbnail = '' - plot = '' - fanart = '' - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url, - thumbnail=item.thumbnail, plot=plot)) - else: - patron = '<\/i>.*?(.\d+)<\/td><td style="text-align:center">MP4<\/td><td style="text-align:center">(.*?)<\/td>.*?' - patron +='<a class="dr-button" href="(.*?)" >' - - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedcap, scrapedsub, scrapedurl in matches: - url = scrapedurl - if scrapedsub !='': - subs= scrapedsub - else: - sub = 'No' - title = 'CAPITULO %s SUB %s'%(scrapedcap, subs) - thumbnail = '' - plot = '' - fanart = '' - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url, - thumbnail=item.thumbnail, plot=plot)) - - return itemlist - -def findvideos(item): - logger.info() - - itemlist = [] - data = httptools.downloadpage(item.url).data - gvideo = scrapertools.find_single_match(data,'<li rel="(http:\/\/www\.18hentaionline\.net\/ramus\/phar\.php\?vid=.*?)">') - headers = {'Host':'www.18hentaionline.net', 'Referer':item.url} - gvideo_data = httptools.downloadpage(gvideo, headers = headers).data - gvideo_url = scrapertools.find_single_match(gvideo_data, 'file: "(.*?)"') - server = 'directo' - new_item = (item.clone(url=gvideo_url, server=server)) - itemlist.append(new_item) - itemlist.extend(servertools.find_video_items(data=data)) - for videoitem in itemlist: - videoitem.channel = item.channel - videoitem.title = item.title+' (%s)'%videoitem.server - videoitem.action = 'play' - return itemlist - - diff --git a/channels/porn/xhamster.json b/channels/porn/xhamster.json deleted file mode 100644 index 54d9f88c..00000000 --- a/channels/porn/xhamster.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "id": "xhamster", - "name": "xhamster", - "active": true, - "adult": true, - "language": ["*"], - "banner": "xhamster.png", - "thumbnail": "xhamster.png", - "categories": [ - "adult" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/channels/porn/xhamster.py b/channels/porn/xhamster.py deleted file mode 100644 index c9020a97..00000000 --- a/channels/porn/xhamster.py +++ /dev/null @@ -1,155 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import sys -import urlparse - -from platformcode import logger -from core import scrapertools, httptools -from core.item import Item - -HOST = "http://es.xhamster.com/" - - -def mainlist(item): - logger.info() - - itemlist = [] - itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=HOST, viewmode="movie")) - itemlist.append(Item(channel=item.channel, action="categorias", title="Categorías", url=HOST)) - itemlist.append(Item(channel=item.channel, action="votados", title="Lo mejor")) - itemlist.append(Item(channel=item.channel, action="vistos", title="Los mas vistos")) - itemlist.append(Item(channel=item.channel, action="videos", title="Recomendados", - url=urlparse.urljoin(HOST, "/videos/recommended"))) - itemlist.append( - Item(channel=item.channel, action="search", title="Buscar", url=urlparse.urljoin(HOST, "/search?q=%s"))) - - return itemlist - - -# REALMENTE PASA LA DIRECCION DE BUSQUEDA - - -def search(item, texto): - logger.info() - tecleado = texto.replace(" ", "+") - item.url = item.url % tecleado - item.extra = "buscar" - try: - return videos(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -# SECCION ENCARGADA DE BUSCAR - - -def videos(item): - logger.info() - data = httptools.downloadpage(item.url).data - itemlist = [] - - data = scrapertools.find_single_match(data, '<article.+?>(.*?)</article>') - - # Patron - patron = '(?s)<div class="thumb-list__item.*?href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)">.*?' - patron += '<div class="thumb-image-container__duration">(.+?)</div>' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, scrapedtitle, duration in matches: - # logger.debug("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") - contentTitle = scrapedtitle.strip() + " [" + duration + "]" - itemlist.append( - Item(channel=item.channel, action="play", title=contentTitle, url=scrapedurl, thumbnail=scrapedthumbnail, - folder=True)) - - # Paginador - patron = '(?s)<div class="pager-container".*?<li class="next">.*?href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) > 0: - itemlist.append( - Item(channel=item.channel, action="videos", title="Página Siguiente", url=matches[0], thumbnail="", - folder=True, viewmode="movie")) - - return itemlist - - -# SECCION ENCARGADA DE VOLCAR EL LISTADO DE CATEGORIAS CON EL LINK CORRESPONDIENTE A CADA PAGINA - - -def categorias(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - - data = scrapertools.find_single_match(data, '(?s)<div class="all-categories">(.*?)</aside>') - - patron = '(?s)<li>.*?<a href="([^"]+)".*?>([^<]+).*?</a></li>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - contentTitle = scrapedtitle.strip() - itemlist.append(Item(channel=item.channel, action="videos", title=contentTitle, url=scrapedurl)) - - return itemlist - - -def votados(item): - logger.info() - itemlist = [] - - itemlist.append(Item(channel=item.channel, action="videos", title="Día", url=urlparse.urljoin(HOST, "/best/daily"), - viewmode="movie")) - itemlist.append( - Item(channel=item.channel, action="videos", title="Semana", url=urlparse.urljoin(HOST, "/best/weekly"), - viewmode="movie")) - itemlist.append( - Item(channel=item.channel, action="videos", title="Mes", url=urlparse.urljoin(HOST, "/best/monthly"), - viewmode="movie")) - itemlist.append( - Item(channel=item.channel, action="videos", title="De siempre", url=urlparse.urljoin(HOST, "/best/"), - viewmode="movie")) - return itemlist - - -def vistos(item): - logger.info() - itemlist = [] - - itemlist.append( - Item(channel=item.channel, action="videos", title="Día", url=urlparse.urljoin(HOST, "/most-viewed/daily"), - viewmode="movie")) - itemlist.append( - Item(channel=item.channel, action="videos", title="Semana", url=urlparse.urljoin(HOST, "/most-viewed/weekly"), - viewmode="movie")) - itemlist.append( - Item(channel=item.channel, action="videos", title="Mes", url=urlparse.urljoin(HOST, "/most-viewed/monthly"), - viewmode="movie")) - itemlist.append( - Item(channel=item.channel, action="videos", title="De siempre", url=urlparse.urljoin(HOST, "/most-viewed/"), - viewmode="movie")) - - return itemlist - - -# OBTIENE LOS ENLACES SEGUN LOS PATRONES DEL VIDEO Y LOS UNE CON EL SERVIDOR -def play(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - logger.debug(data) - - patron = '"([0-9]+p)":"([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - - for res, url in matches: - url = url.replace("\\", "") - logger.debug("url=" + url) - itemlist.append(["%s %s [directo]" % (res, scrapertools.get_filename_from_url(url)[-4:]), url]) - - return itemlist diff --git a/channels/porn/xms.json b/channels/porn/xms.json deleted file mode 100644 index 64e505cd..00000000 --- a/channels/porn/xms.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "id": "xms", - "name": "XMS", - "active": true, - "adult": true, - "language": ["*"], - "fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg", - "thumbnail": "https://i.postimg.cc/wB0NsMTX/xms.png", - "banner": "https://i.postimg.cc/c6yh5C3K/xmsbn.png", - "categories": [ - "adult" - ], - "settings": [ - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "perfil", - "type": "list", - "label": "Perfil de color", - "default": 3, - "enabled": true, - "visible": true, - "lvalues": [ - "Sin color", - "Perfil 3", - "Perfil 2", - "Perfil 1" - ] - } - ] -} diff --git a/channels/porn/xms.py b/channels/porn/xms.py deleted file mode 100644 index 7fa51198..00000000 --- a/channels/porn/xms.py +++ /dev/null @@ -1,246 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse -import base64 - -from core import channeltools -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger - -__channel__ = "xms" - -host = 'https://xtheatre.org/' -host1 = 'https://www.cam4.com/' -try: - __modo_grafico__ = config.get_setting('modo_grafico', __channel__) - __perfil__ = int(config.get_setting('perfil', __channel__)) -except: - __modo_grafico__ = True - __perfil__ = 0 - -# Fijar perfil de color -perfil = [['0xFF6E2802', '0xFFFAA171', '0xFFE9D7940'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], - ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']] - -if __perfil__ - 1 >= 0: - color1, color2, color3 = perfil[__perfil__ - 1] -else: - color1 = color2 = color3 = "" - -headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], - ['Referer', host]] - -parameters = channeltools.get_channel_parameters(__channel__) -fanart_host = parameters['fanart'] -thumbnail_host = parameters['thumbnail'] -thumbnail = 'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/%s.png' - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append(Item(channel=__channel__, title="Últimas", url=host + '?filtre=date&cat=0', - action="peliculas", viewmode="movie_with_plot", viewcontent='movies', - thumbnail=thumbnail % '1')) - - itemlist.append(Item(channel=__channel__, title="Más Vistas", url=host + '?display=extract&filtre=views', - action="peliculas", viewmode="movie_with_plot", viewcontent='movies', - thumbnail=thumbnail % '2')) - - itemlist.append(Item(channel=__channel__, title="Mejor Valoradas", url=host + '?display=extract&filtre=rate', - action="peliculas", viewmode="movie_with_plot", viewcontent='movies', - thumbnail=thumbnail % '3')) - - itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias", - url=host + 'categories/', viewmode="movie_with_plot", viewcontent='movies', - thumbnail=thumbnail % '4')) - - itemlist.append(Item(channel=__channel__, title="WebCam", action="webcamenu", - viewmode="movie_with_plot", viewcontent='movies', - thumbnail='https://ae01.alicdn.com/kf/HTB1LDoiaHsrBKNjSZFpq6AXhFXa9/-.jpg')) - - itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host, thumbnail=thumbnail % '5')) - - return itemlist - - -def webcamenu(item): - logger.info() - itemlist = [item.clone(title="Trending Cams", action="webcam", text_blod=True, url=host1, - viewcontent='movies', viewmode="movie_with_plot"), - item.clone(title="Females", action="webcam", text_blod=True, - viewcontent='movies', url=host1 + 'female', viewmode="movie_with_plot"), - item.clone(title="Males", action="webcam", text_blod=True, - viewcontent='movies', url=host1 + 'male', viewmode="movie_with_plot"), - item.clone(title="Couples", action="webcam", text_blod=True, - viewcontent='movies', url=host1 + 'couple', viewmode="movie_with_plot"), - item.clone(title="Trans", action="webcam", text_blod=True, extra="Películas Por año", - viewcontent='movies', url=host1 + 'transgender', viewmode="movie_with_plot")] - return itemlist - - -def peliculas(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|#038;", "", data) - patron = 'src="([^"]+)" class="attachment-thumb_site.*?' # img - patron += '<a href="([^"]+)" title="([^"]+)".*?' # url, title - patron += '<div class="right"><p>([^<]+)</p>' # plot - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedthumbnail, scrapedurl, scrapedtitle, plot in matches: - plot = scrapertools.decodeHtmlentities(plot) - - itemlist.append(item.clone(channel=__channel__, action="play", title=scrapedtitle.capitalize(), - url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, - fanart=scrapedthumbnail,viewmode="movie_with_plot", - folder=True, contentTitle=scrapedtitle)) - # Extrae el paginador - paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)">Next ›</a></li><li>') - paginacion = urlparse.urljoin(item.url, paginacion) - - if paginacion: - itemlist.append(Item(channel=__channel__, action="peliculas", - thumbnail=thumbnail % 'rarrow', - title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion)) - - return itemlist - - -def webcam(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|#038;", "", data) - patron = '<div class="profileBox">.*?<a href="/([^"]+)".*?' # url - patron += 'data-hls-preview-url="([^"]+)">.*?' # video_url - patron += 'data-username="([^"]+)".*?' # username - patron += 'title="([^"]+)".*?' # title - patron += 'data-profile="([^"]+)"' # img - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, video_url, username, scrapedtitle, scrapedthumbnail in matches: - scrapedtitle = scrapedtitle.replace(' Chat gratis con webcam.', '') - - itemlist.append(item.clone(channel=__channel__, action="play", title=username, - url=video_url, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, - viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle)) - # Extrae el paginador - paginacion = scrapertools.find_single_match(data, '<span id="pagerSpan">\d+</span> <a href="([^"]+)"') - paginacion = urlparse.urljoin(item.url, paginacion) - - if paginacion: - itemlist.append(Item(channel=__channel__, action="webcam", - thumbnail=thumbnail % 'rarrow', - title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion)) - - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = 'data-lazy-src="([^"]+)".*?' - patron += '<a href="([^"]+)".*?' - patron += '<span>([^<]+)</span></a>.*?' - patron += '<span class="nb_cat border-radius-5">([^<]+)</span>' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedthumbnail, scrapedurl, scrapedtitle, vids in matches: - title = "%s (%s)" % (scrapedtitle, vids.title()) - itemlist.append(item.clone(channel=__channel__, action="peliculas", fanart=scrapedthumbnail, - title=title, url=scrapedurl, thumbnail=scrapedthumbnail, - viewmode="movie_with_plot", folder=True)) - - return itemlist - - -def search(item, texto): - logger.info() - - texto = texto.replace(" ", "+") - item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) - - try: - return sub_search(item) - - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - -def sub_search(item): - logger.info() - - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = 'data-lazy-src="([^"]+)".*?' # img - patron += 'title="([^"]+)" />.*?' # title - patron += '</noscript><a href="([^"]+)".*?' # url - patron += '<div class="right"><p>([^<]+)</p>' # plot - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches: - itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=plot, fanart=scrapedthumbnail, - action="play", thumbnail=scrapedthumbnail)) - - paginacion = scrapertools.find_single_match( - data, "<a href='([^']+)' class=\"inactive\">\d+</a>") - - if paginacion: - itemlist.append(item.clone(channel=__channel__, action="sub_search", - title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion)) - - return itemlist - - -def play(item): - itemlist = [] - if "playlist.m3u8" in item.url: - url = item.url - else: - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) - patron = 'src="([^"]+)" allowfullscreen="true">' - matches = scrapertools.find_multiple_matches(data, patron) - for url in matches: - if "strdef" in url: - url = decode_url(url) - if "strdef" in url: - url = httptools.downloadpage(url).url - itemlist.append(item.clone(action="play", title= "%s", url=url)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - - -def decode_url(txt): - logger.info() - itemlist = [] - data = httptools.downloadpage(txt).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - rep = True - while rep == True: - b64_data = scrapertools.find_single_match(data, '\(dhYas638H\("([^"]+)"\)') - if b64_data: - b64_url = base64.b64decode(b64_data + "=") - b64_url = base64.b64decode(b64_url + "==") - data = b64_url - else: - rep = False - url = scrapertools.find_single_match(b64_url, '<iframe src="([^"]+)"') - logger.debug (url) - return url \ No newline at end of file diff --git a/channels/porn/xozilla.json b/channels/porn/xozilla.json deleted file mode 100644 index aae90940..00000000 --- a/channels/porn/xozilla.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "xozilla", - "name": "xozilla", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://www.xozilla.com/images/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/xozilla.py b/channels/porn/xozilla.py deleted file mode 100644 index 4b0b3d46..00000000 --- a/channels/porn/xozilla.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -import urlparse -import urllib2 -import urllib -import re -import os -import sys - -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.xozilla.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/latest-updates/")) - itemlist.append(Item(channel=item.channel, title="Popular", action="lista", url=host + "/most-popular/")) - itemlist.append(Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top-rated/")) - - itemlist.append(Item(channel=item.channel, title="PornStar", action="categorias", url=host + "/models/")) - itemlist.append(Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/")) - itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/")) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/%s/" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?' - patron += '<img class="thumb" src="([^"]+)".*?' - patron += '(.*?)</a>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle, scrapedthumbnail, cantidad in matches: - scrapedplot = "" - cantidad = scrapertools.find_single_match(cantidad, '(\d+) videos</div>') - if cantidad: - scrapedtitle += " (" + cantidad + ")" - itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot)) - if "Categorias" in item.title: - itemlist.sort(key=lambda x: x.title) - next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"') - if next_page != "#videos": - next_page = urlparse.urljoin(item.url, next_page) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page)) - if next_page == "#videos": - next_page = scrapertools.find_single_match(data, 'from:(\d+)">Next</a>') - next_page = urlparse.urljoin(item.url, next_page) + "/" - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page)) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)" class="item.*?' - patron += 'data-original="([^"]+)".*?' - patron += 'alt="([^"]+)".*?' - patron += '<div class="duration">(.*?)</div>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedthumbnail, scrapedtitle, duracion in matches: - url = scrapedurl - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - year = "" - itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle=contentTitle)) - next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"') - if next_page != "#videos": - next_page = urlparse.urljoin(item.url, next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - if next_page == "#videos": - next_page = scrapertools.find_single_match(data, 'from:(\d+)">Next</a>') - next_page = urlparse.urljoin(item.url, next_page) + "/" - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - media_url = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)/\'') - if media_url == "": - media_url = scrapertools.find_single_match(data, 'video_url: \'([^\']+)/\'') - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=media_url, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist diff --git a/channels/porn/xtapes.json b/channels/porn/xtapes.json deleted file mode 100644 index 017437cb..00000000 --- a/channels/porn/xtapes.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "xtapes", - "name": "xtapes", - "active": false, - "adult": true, - "language": ["*"], - "thumbnail": "http://hd.xtapes.to/wp-content/uploads/xtapes.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/xtapes.py b/channels/porn/xtapes.py deleted file mode 100644 index 4bb3548b..00000000 --- a/channels/porn/xtapes.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://hd.xtapes.to' - -# Links NetuTV - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/hd-porn-movies/")) - itemlist.append( Item(channel=item.channel, title="Productora" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/?filtre=date&cat=0")) - itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="lista", url=host + "/?display=tube&filtre=views")) - itemlist.append( Item(channel=item.channel, title="Mejor valorado" , action="lista", url=host + "/?display=tube&filtre=rate")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/?display=tube&filtre=duree")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - if item.title=="Canal": - data = scrapertools.find_single_match(data,'<div class="footer-banner">(.*?)<div id="footer-copyright">') - if item.title=="Productora" : - data = scrapertools.find_single_match(data,'>Full Movies</a>(.*?)</ul>') - if item.title=="Categorias" : - data = scrapertools.find_single_match(data,'<a>Categories</a>(.*?)</ul>') - patron = '<a href="([^"]+)">([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = scrapedtitle - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li class="border-radius-5 box-shadow">.*?' - patron += 'src="([^"]+)".*?<a href="([^"]+)" title="([^"]+)">.*?' - patron += '<div class="time-infos".*?>([^"]+)<span class="time-img">' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedurl,scrapedtitle,duracion in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = scrapedtitle - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next video') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - next_page = next_page.replace("#038;cat=0#038;", "") - next_page = next_page.replace("#038;filtre=views#038;", "").replace("&filtre=rate#038;", "&").replace("#038;filtre=duree#038;", "") - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - diff --git a/channels/porn/xvideos.json b/channels/porn/xvideos.json deleted file mode 100755 index ddcc89d5..00000000 --- a/channels/porn/xvideos.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "xvideos", - "name": "xvideos", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "xvideos.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/xvideos.py b/channels/porn/xvideos.py deleted file mode 100755 index 6c66b18c..00000000 --- a/channels/porn/xvideos.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.xvideos.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Lo mejor" , action="lista", url=host + "/best/")) - itemlist.append( Item(channel=item.channel, title="Pornstar" , action="catalogo", url=host + "/pornstars-index")) - itemlist.append( Item(channel=item.channel, title="WebCAM" , action="catalogo", url=host + "/webcam-models-index")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-index/top")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/tags")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?k=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<li><a href="([^"]+)"><b>([^<]+)</b><span class="navbadge default">([^<]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - title = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return itemlist - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<img src="([^"]+)".*?' - patron += '<p class="profile-name">.*?<a href="([^"]+)">([^<]+)</a>.*?' - patron += '<span class="with-sub">([^<]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedurl,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedurl = urlparse.urljoin(host,scrapedurl) + "/videos/new/0" - title = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)" class="no-page next-page">Siguiente') - if next_page=="": - next_page = scrapertools.find_single_match(data, '<li><a class="active".*?<a href="([^"]+)"') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<div id="video_\d+".*?' - patron += 'data-src="([^"]+)".*?' - patron += '</a>(.*?)<div class=.*?' - patron += '<a href="([^"]+)" title="([^"]+)".*?' - patron += '<span class="duration">([^<]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,quality,scrapedurl,scrapedtitle,scrapedtime in matches: - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - thumbnail = scrapedthumbnail.replace("THUMBNUM" , "10") - quality = scrapertools.find_single_match(quality, 'mark">([^<]+)</span>') - if quality: - title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + "[COLOR red]" + quality + "[/COLOR] " + scrapedtitle - plot = "" - itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, - thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle = scrapedtitle)) - next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)" class="no-page next-page">Siguiente') - if "profile" in item.url: - next_page = scrapertools.find_single_match(data, '<li><a class="active" href="">(\d+)</a></li><li><a href="#') - if next_page: - next_page = urlparse.urljoin(item.url,next_page).replace("&", "&") - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - url = scrapertools.find_single_match(data, 'html5player.setVideoHLS\(\'([^\']+)\'\)') - itemlist.append(item.clone(action="play", title=url, url=url )) - return itemlist - diff --git a/channels/porn/xxxdan.json b/channels/porn/xxxdan.json deleted file mode 100644 index 4636a2f1..00000000 --- a/channels/porn/xxxdan.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "id": "xxxdan", - "name": "xxxdan", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://s0.cdn3x.com/xxxdan/i/logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - - diff --git a/channels/porn/xxxdan.py b/channels/porn/xxxdan.py deleted file mode 100644 index c4d21fd0..00000000 --- a/channels/porn/xxxdan.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys - -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'http://xxxdan.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/newest")) - itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular30")) - itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/longest")) - itemlist.append( Item(channel=item.channel, title="HD" , action="lista", url=host + "/channel30/hd")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search?query=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)" rel="tag".*?' - patron += 'title="([^"]+)".*?' - patron += 'data-original="([^"]+)".*?' - patron += '<span class="score">(\d+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - scrapedurl = scrapedurl.replace("channel", "channel30") - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail , fanart=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - - - patron = '<li><figure>\s*<a href="([^"]+)".*?' - patron += 'data-original="([^"]+)".*?' - patron += '<time datetime="\w+">([^"]+)</time>' - patron += '(.*?)</ul>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,duracion,calidad in matches: - url = scrapedurl - scrapedtitle = scrapertools.find_single_match(scrapedurl,'https://xxxdan.com/es/.*?/(.*?).html') - contentTitle = scrapedtitle - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - if '<li class="hd">' in calidad : - title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">→</a>') - if next_page!="": - next_page = next_page.replace("http://xxxdan.com/","") - next_page = "/" + next_page - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - media_url = scrapertools.find_single_match(data, 'src:\'([^\']+)\'') - media_url = media_url.replace("https","http") - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=media_url, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist - diff --git a/channels/porn/xxxfreeinhd.json b/channels/porn/xxxfreeinhd.json deleted file mode 100644 index ff580284..00000000 --- a/channels/porn/xxxfreeinhd.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "id": "xxxfreeinhd", - "name": "xxxfreeinhd", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://watchxxxfreeinhd.com/wp-content/uploads/logo2015-1.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - ] -} - diff --git a/channels/porn/xxxfreeinhd.py b/channels/porn/xxxfreeinhd.py deleted file mode 100644 index ff12d4e1..00000000 --- a/channels/porn/xxxfreeinhd.py +++ /dev/null @@ -1,122 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -import base64 -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://watchxxxfreeinhd.com' - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/?filtre=date&cat=0")) - itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/?display=tube&filtre=views")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/?display=tube&filtre=rate")) - - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "search.php?q=%s&language=en&search=Search" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<noscript>.*?src="([^"]+)".*?' - patron += '<a href="([^"]+)" title="([^"]+)".*?' - patron += '<span class="nb_cat border-radius-5">(\d+) videos</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedurl,scrapedtitle,cantidad in matches: - scrapedplot = "" - title = scrapedtitle + " (" + cantidad + ")" - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data) - patron = '<li class="border-radius-5 box-shadow">.*?' - patron += '<img width="\d+" height="\d+" src="([^"]+)" class=.*?' - patron += '<a href="([^"]+)" title="([^"]+)">.*?' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedurl,scrapedtitle in matches: - title = scrapedtitle - thumbnail = scrapedthumbnail + "|https://watchxxxfreeinhd.com/" - plot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, - thumbnail=thumbnail, plot=plot, fanart=scrapedthumbnail )) - next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)"') - if next_page: - next_page = urlparse.urljoin(item.url,next_page) - if "?filtre=date&cat=0" in item.url: next_page += "?filtre=date&cat=0" - elif "?display=tube&filtre=views" in item.url: next_page += "?display=tube&filtre=views" - elif "?display=tube&filtre=rate" in item.url: next_page += "?display=tube&filtre=rate" - itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - data = scrapertools.find_single_match(data,'<div class="video-embed">(.*?)<div class="views-infos">') - patron = 'data-lazy-src="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for title in matches: - if "strdef" in title: - url = decode_url(title) - if "strdef" in url: - url = httptools.downloadpage(url).url - if "hqq" in title: - url = title - itemlist.append( Item(channel=item.channel, action="play", title = "%s", url=url )) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - - -def decode_url(txt): - logger.info() - itemlist = [] - data = httptools.downloadpage(txt).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - rep = True - while rep == True: - b64_data = scrapertools.find_single_match(data, '\(dhYas638H\("([^"]+)"\)') - if b64_data: - b64_url = base64.b64decode(b64_data + "=") - b64_url = base64.b64decode(b64_url + "==") - data = b64_url - else: - rep = False - url = scrapertools.find_single_match(b64_url, '<iframe src="([^"]+)"') - logger.debug (url) - return url - - diff --git a/channels/porn/xxxparodyhd.json b/channels/porn/xxxparodyhd.json deleted file mode 100644 index 54b05121..00000000 --- a/channels/porn/xxxparodyhd.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "xxxparodyhd", - "name": "xxxparodyhd", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://xxxparodyhd.net/wp-content/uploads/2018/04/parodyhd-1.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/xxxparodyhd.py b/channels/porn/xxxparodyhd.py deleted file mode 100644 index df554687..00000000 --- a/channels/porn/xxxparodyhd.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'https://xxxparodyhd.net' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/genre/clips-scenes/")) - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/movies/")) - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/genre/new-release/")) - itemlist.append( Item(channel=item.channel, title="Parodias" , action="lista", url=host + "/genre/parodies/")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - if item.title == "Canal" : - data = scrapertools.find_single_match(data,'>Studios</a>(.*?)</ul>') - else: - data = scrapertools.find_single_match(data,'>Categories</a>(.*?)</ul>') - patron = '<a href="([^"]+)">([^<]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div data-movie-id="\d+" class="ml-item">.*?' - patron += '<a href="([^"]+)".*?' - patron += 'oldtitle="([^"]+)".*?' - patron += '<img src="([^"]+)".*?rel="tag">(.*?)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedyear in matches: - scrapedplot = "" - itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot, infoLabels={'year':scrapedyear}) ) - next_page = scrapertools.find_single_match(data,'<li class=\'active\'>.*?href=\'([^\']+)\'>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - diff --git a/channels/porn/xxxstreams.json b/channels/porn/xxxstreams.json deleted file mode 100644 index a92df9d3..00000000 --- a/channels/porn/xxxstreams.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "xxxstreams", - "name": "xxxstreams", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "xxxstreams.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/xxxstreams.py b/channels/porn/xxxstreams.py deleted file mode 100644 index cff4d613..00000000 --- a/channels/porn/xxxstreams.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://xxxstreams.org' #es hhttp://freepornstreams.org - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url= host + "/category/full-porn-movie-stream/")) - itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/new-porn-streaming/")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - if item.title == "Categorias" : - data1 = scrapertools.find_single_match(data,'>Top Tags</a>(.*?)</ul>') - data1 += scrapertools.find_single_match(data,'>Ethnic</a>(.*?)</ul>') - data1 += scrapertools.find_single_match(data,'>Kinky</a>(.*?)</ul>') - if item.title == "Canal" : - data1 = scrapertools.find_single_match(data,'>Top sites</a>(.*?)</ul>') - data1 += scrapertools.find_single_match(data,'Downloads</h2>(.*?)</ul>') - patron = '<a href="([^<]+)">([^<]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data1) - for scrapedurl,scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="entry-content">.*?' - patron += '<img src="([^"]+)".*?' - patron += '<a href="([^<]+)".*?' - patron += '<span class="screen-reader-text">(.*?)</span>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedthumbnail,scrapedurl,scrapedtitle in matches: - scrapedplot = "" - if '/HD' in scrapedtitle : title= "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle - elif 'SD' in scrapedtitle : title= "[COLOR red]" + "SD" + "[/COLOR] " + scrapedtitle - elif 'FullHD' in scrapedtitle : title= "[COLOR red]" + "FullHD" + "[/COLOR] " + scrapedtitle - elif '1080' in scrapedtitle : title= "[COLOR red]" + "1080p" + "[/COLOR] " + scrapedtitle - else: title = scrapedtitle - itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail,plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next →</a>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista" , title="Next page >>", text_color="blue", url=next_page) ) - return itemlist - - -def findvideos(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data) - patron = '<a href="([^"]+)" rel="nofollow"[^<]+>(?:Streaming|Download)' - matches = scrapertools.find_multiple_matches(data, patron) - for url in matches: - if not "ubiqfile" in url: - itemlist.append(item.clone(action='play',title="%s", url=url)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist diff --git a/channels/porn/yespornplease.json b/channels/porn/yespornplease.json deleted file mode 100644 index f9994183..00000000 --- a/channels/porn/yespornplease.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "id": "yespornplease", - "name": "YesPornPlease", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "yespornplease.png", - "banner": "yespornplease.png", - "categories": [ - "adult" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/channels/porn/yespornplease.py b/channels/porn/yespornplease.py deleted file mode 100644 index ae7c2b40..00000000 --- a/channels/porn/yespornplease.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from core import httptools -from core.item import Item -from platformcode import logger -from urlparse import urljoin -from core import servertools - - -HOST="http://yespornplease.com" - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(item.clone(action="links", title="Novedades", url=HOST)) - itemlist.append(item.clone(action="categories", title="Categorías", url=urljoin(HOST, "categories"))) - itemlist.append(item.clone(action="search", title="Buscar", url=urljoin(HOST, "search"))) - return itemlist - - -def search(item, texto): - logger.info("texto = %s" %(texto)) - item.url = urljoin(HOST, "search?q=" + texto) - try: - return links(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categories(item): - logger.info() - data = httptools.downloadpage(item.url).data - result = [] - categories = re.findall("href=[\"'](?P<url>/search[^\"']+).*?>(?P<name>[^<>]+)</div>.*?badge[^>]+>(?P<counter>\d+)", data, re.DOTALL | re.MULTILINE) - for url, name, counter in categories: - result.append(item.clone(action = "links", title = "%s (%s videos)" % (name, counter), url = urljoin(item.url, url))) - return result - - -def get_page(url): - page = re.search("p=(\d+)", url) - if page: - return int(page.group(1)) - return 1 - - -def get_page_url(url, page): - logger.debug("URL: %s to page %d" % (url, page)) - resultURL = re.sub("([&\?]p=)(?:\d+)", "\g<1>%d" % page, url) - if resultURL == url: - resultURL += ("&" if "?" in url else "?") + "p=%d" % (page) - logger.debug("Result: %s" % (resultURL)) - return resultURL - - -def links(item): - logger.info() - data = httptools.downloadpage(item.url).data - reExpr = "<img\s+src=['\"](?P<img>[^'\"]+)[^>]+(?:title|alt)[^'\"]*['\"](?P<title>[^\"]+)[^>]+id[^'\"]*['\"](?P<id>[^'\"]+)[^>]*>(?:[^<]*<[^>]+>(?P<quality>[^<]+)<)?[^<]*<[^>]*duration[^>]*>(?P<duration>[^<]+)" - reResults = re.findall(reExpr, data, re.MULTILINE | re.DOTALL) - result = [] - for img, title, vID, quality, duration in reResults: - formattedQuality = "" - if quality: - formattedQuality += " [%s]" % (quality) - titleFormatted = "%(title)s%(quality)s [%(duration)s]" % ({"title": title, "quality": formattedQuality, "duration": duration}) - result.append(item.clone(action = "play", title = titleFormatted, url = urljoin(item.url, "/v/%s" % (vID)), thumbnail = urljoin(item.url, img), vID = vID)) - # Has pagination - paginationOccurences = data.count('class="prevnext"') - if paginationOccurences: - page = get_page(item.url) - logger.info("Page " + str(page) + " Ocurrences: " + str(paginationOccurences)) - if page > 1: - result.append(item.clone(action = "links", title = "<< Anterior", url = get_page_url(item.url, page - 1))) - if paginationOccurences > 1 or page == 1: - result.append(item.clone(action = "links", title = "Siguiente >>", url = get_page_url(item.url, page + 1))) - return result - - -def play(item): - logger.info(item) - embededURL = urljoin(item.url, "/v/%s" % (item.vID)) - itemlist = servertools.find_video_items(item.clone(url = embededURL)) - return itemlist diff --git a/channels/porn/youjizz.json b/channels/porn/youjizz.json deleted file mode 100644 index cdc171b9..00000000 --- a/channels/porn/youjizz.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "youjizz", - "name": "youjizz", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://cdne-static.yjcontentdelivery.com/app/1/images/yjlogo.jpeg", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/youjizz.py b/channels/porn/youjizz.py deleted file mode 100644 index 4112acbe..00000000 --- a/channels/porn/youjizz.py +++ /dev/null @@ -1,111 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -import urlparse -import urllib2 -import urllib -import re -import os -import sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.youjizz.com' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/newest-clips/1.html")) - itemlist.append(Item(channel=item.channel, title="Popular", action="lista", url=host + "/most-popular/1.html")) - itemlist.append( - Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top-rated-week/1.html")) - itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host)) - itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/%s-1.html" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, '>Trending Categories<(.*?)</ul>') - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<li><a href="([^"]+)">([^"]+)</a>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - scrapedplot = "" - scrapedthumbnail = "" - scrapedtitle = scrapedtitle - scrapedurl = urlparse.urljoin(item.url, scrapedurl) - itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot)) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<div class="video-item">.*?' - patron += 'class="frame image" href="([^"]+)".*?' - patron += 'data-original="([^"]+)" />.*?' - patron += '<div class="video-title">.*?' - patron += '>(.*?)</a>.*?' - patron += '<span class="time">(.*?)</span>' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedthumbnail, scrapedtitle, duracion in matches: - url = urlparse.urljoin(item.url, scrapedurl) - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - quality = "" - if '-720-' in scrapedthumbnail: - quality = "720" - if '-1080-' in scrapedthumbnail: - quality = "1080" - if quality: - title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + quality + "p[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = "http:" + scrapedthumbnail - plot = "" - year = "" - itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, - plot=plot, quality=quality, contentTitle=contentTitle)) - next_page = scrapertools.find_single_match(data, '<li><a class="pagination-next" href="([^"]+)">Next »</a>') - if next_page != "": - next_page = urlparse.urljoin(item.url, next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page)) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, 'var encodings(.*?)var') - if '360' in data: - patron = '"360".*?"filename"\:"(.*?)"' - if '720' in data: - patron = '"720".*?"filename"\:"(.*?)"' - if '1080' in data: - patron = '"1080".*?"filename"\:"(.*?)"' - media_url = scrapertools.find_single_match(data, patron) - media_url = "https:" + media_url.replace("\\", "") - itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=media_url, - thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) - return itemlist diff --git a/channels/porn/youporn.json b/channels/porn/youporn.json deleted file mode 100644 index ed466088..00000000 --- a/channels/porn/youporn.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "youporn", - "name": "youporn", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "https://fs.ypncdn.com/cb/bundles/youpornwebfront/images/l_youporn_black.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/youporn.py b/channels/porn/youporn.py deleted file mode 100644 index fd41cfb7..00000000 --- a/channels/porn/youporn.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from platformcode import config, logger -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools - -host = 'https://www.youporn.com' - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/browse/time/")) - itemlist.append( Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + "/browse/views/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top_rated/")) - itemlist.append( Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/most_popular/")) - itemlist.append( Item(channel=item.channel, title="Pornstars", action="catalogo", url=host + "/pornstars/most_popular/")) - itemlist.append( Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/alphabetical/")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host + "/search/?query=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def catalogo(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - data1 = scrapertools.find_single_match(data,'>Most Popular Pornstars<(.*?)<i class=\'icon-menu-right\'></i></a>') - patron = '<a href="([^"]+)".*?' - patron += 'data-original="([^"]+)".*?' - patron += '<span class="porn-star-name">([^"]+)</span>.*?' - patron += '<span class="video-count">([^"]+)</span>' - matches = re.compile(patron,re.DOTALL).findall(data1) - for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches: - scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - if item.title == "Canal": - data = scrapertools.find_single_match(data,'>All</div>(.*?)<i class=\'icon-menu-right\'></i></a>') - if item.title == "Categorias": - data = scrapertools.find_single_match(data,'<div class=\'row alphabetical\'.*?>(.*?)>Popular by Country</h2>') - patron = '<a href="([^"]+)".*?' - patron += '<img src=(.*?)>.*?' - patron += '>([^<]+) (?:Videos|videos)<' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,cantidad in matches: - scrapedplot = "" - thumbnail = scrapertools.find_single_match(scrapedthumbnail,'data-original="([^"]+)"') - scrapedtitle = scrapertools.find_single_match(scrapedthumbnail,'alt="([^"]+)"') - if scrapedtitle == "" : - scrapedtitle = scrapertools.find_single_match(scrapedthumbnail,'alt=\'([^\']+)\'') - title = scrapedtitle + " (" + cantidad +")" - scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="lista", title=title, url=scrapedurl, - fanart=thumbnail, thumbnail=thumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<div class="currentPage".*?<a href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - patron = '<a href="([^"]+)" class=\'video-box-image\'.*?' - patron += 'data-original="([^"]+)".*?' - patron += '<div class="video-box-title">([^"]+)</div>.*?' - patron += '<div class="video-duration">(.*?)</div>' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches: - url = urlparse.urljoin(item.url,scrapedurl) - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - contentTitle = title - thumbnail = scrapedthumbnail - plot = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, - fanart=thumbnail, plot=plot, contentTitle = contentTitle)) - next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - return itemlist - - -def play(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = 'page_params.video.mediaDefinition =.*?"videoUrl":"([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl in matches: - scrapedurl = scrapedurl.replace("\/", "/") - itemlist.append(item.clone(action="play", title=scrapedurl, url=scrapedurl)) - return itemlist - - diff --git a/channels/porn/yuuk.json b/channels/porn/yuuk.json deleted file mode 100644 index 89900cea..00000000 --- a/channels/porn/yuuk.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "yuuk", - "name": "yuuk", - "active": true, - "adult": true, - "language": ["*"], - "thumbnail": "http://yuuk.net/wp-content/uploads/2018/06/yuuk_net_logo.png", - "banner": "", - "categories": [ - "adult" - ], - "settings": [ - - ] -} - diff --git a/channels/porn/yuuk.py b/channels/porn/yuuk.py deleted file mode 100644 index f6e08673..00000000 --- a/channels/porn/yuuk.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -#------------------------------------------------------------ -import urlparse,urllib2,urllib,re -import os, sys -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import config, logger -from core import httptools - -host = 'http://yuuk.net' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host)) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-genres/")) - itemlist.append( Item(channel=item.channel, title="Buscar" , action="search")) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = host+ "/?s=%s" % texto - try: - return lista(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def categorias(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - itemlist.append( Item(channel=item.channel, title="Censored" , action="lista", url=host + "/category/censored/")) - itemlist.append( Item(channel=item.channel, title="Uncensored" , action="lista", url=host + "/category/uncensored/")) - patron = '<li><a href="([^"]+)" title="[^"]+"><span>([^"]+)</span><span>([^"]+)</span></a></li>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle,cantidad in matches: - scrapedtitle = scrapedtitle + " (" + cantidad + ")" - scrapedplot = "" - scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot) ) - return itemlist - - -def lista(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '<div class="featured-wrap clearfix">.*?' - patron += '<a href="([^"]+)" title="([^"]+)".*?' - patron += 'src="([^"]+)".*?' - patron += '>#([^"]+)</a>' - matches = re.compile(patron,re.DOTALL).findall(data) - scrapertools.printMatches(matches) - for scrapedurl,scrapedtitle,scrapedthumbnail,calidad in matches: - scrapedplot = "" - calidad = calidad.replace(" Full HD JAV", "") - scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle - itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) ) - next_page = scrapertools.find_single_match(data,'<li class=\'current\'>.*?<a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) - - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - data = scrapertools.find_single_match(data,'Streaming Server<(.*?)Screenshot<') - patron = '(?:src|SRC)="([^"]+)"' - matches = scrapertools.find_multiple_matches(data, patron) - for url in matches: - if "http://stream.yuuk.net/embed.php" in url: - data = httptools.downloadpage(url).data - url = scrapertools.find_single_match(data,'"file": "([^"]+)e=download"') - itemlist.append( Item(channel=item.channel, action="play", title = "%s", url=url )) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - return itemlist - diff --git a/channels/pufimovies.json b/channels/pufimovies.json index 5ded05f3..91680914 100644 --- a/channels/pufimovies.json +++ b/channels/pufimovies.json @@ -2,7 +2,6 @@ "id": "pufimovies", "name": "PufiMovies", "active": true, - "adult": false, "language": ["ita", "sub-ita"], "thumbnail": "pufimovies.png", "banner": "pufimovies.png", diff --git a/channels/raiplay.json b/channels/raiplay.json index cf69a5b1..569776ca 100644 --- a/channels/raiplay.json +++ b/channels/raiplay.json @@ -2,7 +2,6 @@ "id": "raiplay", "name": "Rai Play", "active": true, - "adult": false, "language": ["ita"], "thumbnail": "raiplay.png", "banner": "raiplay.png", diff --git a/channels/seriehd.json b/channels/seriehd.json index 54a7a5a5..eda29eb2 100644 --- a/channels/seriehd.json +++ b/channels/seriehd.json @@ -2,7 +2,6 @@ "id": "seriehd", "name": "SerieHD", "active": true, - "adult": false, "language": ["ita"], "thumbnail": "seriehd.png", "banner": "seriehd.png", diff --git a/channels/serietvonline.json b/channels/serietvonline.json index 700fe885..bc583756 100644 --- a/channels/serietvonline.json +++ b/channels/serietvonline.json @@ -2,7 +2,6 @@ "id": "serietvonline", "name": "SerieTvOnline", "active": true, - "adult": false, "language": ["ita"], "thumbnail": "serietvonline.png", "bannermenu": "serietvonline.png", diff --git a/channels/serietvonline.py b/channels/serietvonline.py index 586f5c31..f80927bf 100644 --- a/channels/serietvonline.py +++ b/channels/serietvonline.py @@ -60,7 +60,6 @@ def mainlist(item): @support.scrape def peliculas(item): support.log() - #findhost() blacklist = ['DMCA', 'Contatti', 'Attenzione NON FARTI OSCURARE', 'Lista Cartoni Animati e Anime'] patronBlock = r'<h1>.+?</h1>(?P<block>.*?)<div class="footer_c">' @@ -91,7 +90,6 @@ def peliculas(item): patron = r'href="(?P<url>[^"]+)"[^>]+>(?P<title>.*?)[ ]?(?P<year>\d+)?(?: Streaming | MD iSTANCE )?<' patronBlock = r'Lista dei film disponibili in streaming e anche in download\.</p>(?P<block>.*?)<div class="footer_c">' else: - #patronBlock = r'<h1>Ultimi film aggiunti</h1>(?P<block>.*?)<div class="footer_c">' patron = r'<tr><td><a href="(?P<url>[^"]+)"(?:|.+?)?>(?:  )?[ ]?(?P<title>.*?)[ ]?(?P<quality>HD)?[ ]?(?P<year>\d+)?(?: | HD | Streaming | MD(?: iSTANCE)? )?</a>' def itemHook(item): @@ -104,22 +102,22 @@ def peliculas(item): item.contentType = 'tvshow' item.action = 'episodios' return item - - #support.regexDbg(item, patronBlock, headers) - #debug = True return locals() @support.scrape def episodios(item): support.log() - #findhost() action = 'findvideos' patronBlock = r'<table>(?P<block>.*?)<\/table>' patron = r'<tr><td>(?:[^<]+)[ ](?:Parte)?(?P<episode>\d+x\d+|\d+)(?:|[ ]?(?P<title2>.+?)?(?:avi)?)<(?P<url>.*?)</td><tr>' - - #debug = True + def itemlistHook(itemlist): + for i, item in enumerate(itemlist): + ep = support.match(item.title, patron=r'\d+x(\d+)').match + if ep == '00': + item.title = item.title.replace('x00', 'x' + str(i+1).zfill(2)).replace('- ..','') + return itemlist return locals() diff --git a/channels/serietvsubita.json b/channels/serietvsubita.json index dcd8baec..0db82e02 100644 --- a/channels/serietvsubita.json +++ b/channels/serietvsubita.json @@ -2,7 +2,6 @@ "id": "serietvsubita", "name": "Serie TV Sub ITA", "active": true, - "adult": false, "language": ["ita"], "thumbnail": "serietvsubita.png", "banner": "serietvsubita.png", diff --git a/channels/serietvu.json b/channels/serietvu.json index 929dcc5b..e9e1ec1e 100644 --- a/channels/serietvu.json +++ b/channels/serietvu.json @@ -2,7 +2,6 @@ "id": "serietvu", "name": "SerieTVU", "active": true, - "adult": false, "language": ["ita", "sub-ita"], "thumbnail": "serietvu.png", "banner": "serietvu.png", diff --git a/channels/streamingaltadefinizione.json b/channels/streamingaltadefinizione.json index c028f749..9666b270 100644 --- a/channels/streamingaltadefinizione.json +++ b/channels/streamingaltadefinizione.json @@ -3,7 +3,6 @@ "name": "Popcorn Stream", "language": ["ita"], "active": true, - "adult": false, "thumbnail": "popcornstream.png", "banner": "popcornstream.png", "categories": ["movie","tvshow","anime"], diff --git a/channels/streamingaltadefinizione.py b/channels/streamingaltadefinizione.py index 959b4e11..64507fd9 100644 --- a/channels/streamingaltadefinizione.py +++ b/channels/streamingaltadefinizione.py @@ -12,14 +12,8 @@ list_servers = ['verystream', 'openload', 'wstream'] list_quality = ['1080p', 'HD', 'DVDRIP', 'SD', 'CAM'] def findhost(): - permUrl = httptools.downloadpage('https://www.popcornstream.info', follow_redirects=False, only_headers=True).headers - if 'google' in permUrl['location']: - host = permUrl['location'].replace('https://www.google.it/search?q=site:', '') - if host[:4] != 'http': - host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '') - else: - host = permUrl['location'] - return host + data = httptools.downloadpage('https://www.popcornstream-nuovo-indirizzo.online/').data + return support.scrapertools.find_single_match(data, '<a href="([^"]+)') host = config.get_channel_url(findhost) headers = [['Referer', host]] diff --git a/channels/streamtime.json b/channels/streamtime.json index d5e51934..11b43bab 100644 --- a/channels/streamtime.json +++ b/channels/streamtime.json @@ -3,7 +3,6 @@ "name": "StreamTime", "language": ["ita"], "active": false, - "adult": false, "thumbnail": "streamtime.png", "banner": "streamtime.png", "categories": ["tvshow", "movie"], diff --git a/channels/tantifilm.json b/channels/tantifilm.json index e50875c3..e9e12f8d 100644 --- a/channels/tantifilm.json +++ b/channels/tantifilm.json @@ -3,7 +3,6 @@ "name": "Tantifilm", "language": ["ita"], "active": true, - "adult": false, "thumbnail": "tantifilm.png", "banner": "tantifilm.png", "categories": ["tvshow", "movie", "anime"], diff --git a/channels/toonitalia.json b/channels/toonitalia.json index 15f0a135..fad45e0f 100644 --- a/channels/toonitalia.json +++ b/channels/toonitalia.json @@ -2,8 +2,7 @@ "id": "toonitalia", "name": "ToonItalia", "language": ["ita", "sub-ita"], - "active": true, - "adult": false, + "active": true, "thumbnail": "toonitalia.png", "banner": "toonitalia.png", "categories": ["tvshow", "movie", "vos", "anime"], diff --git a/channels/tunein.json b/channels/tunein.json index e72f724a..cb041bb3 100644 --- a/channels/tunein.json +++ b/channels/tunein.json @@ -2,7 +2,6 @@ "id": "tunein", "name": "TuneIn", "active": true, - "adult": false, "language": ["*"], "thumbnail": "tunein.png", "banner": "tunein.png", diff --git a/channels/tunein.py b/channels/tunein.py index 212965bc..d0d33383 100644 --- a/channels/tunein.py +++ b/channels/tunein.py @@ -21,6 +21,7 @@ def mainlist(item): patron = r'text="(?P<title>[^"]+)" URL="(?P<url>[^"]+)"' def itemHook(item): item.thumbnail = support.thumb(thumb='music.png') + item.contentType = 'music' return item def itemlistHook(itemlist): itemlist.append( diff --git a/channels/vedohd.json b/channels/vedohd.json index 4d87e941..51a47d4d 100644 --- a/channels/vedohd.json +++ b/channels/vedohd.json @@ -3,7 +3,6 @@ "name": "VedoHD", "language": ["ita"], "active": false, - "adult": false, "thumbnail": "vedohd.png", "banner": "vedohd.png", "categories": ["movie"], diff --git a/channels/vvvvid.json b/channels/vvvvid.json index e1f33816..4f84b77a 100644 --- a/channels/vvvvid.json +++ b/channels/vvvvid.json @@ -2,7 +2,6 @@ "id": "vvvvid", "name": "VVVVID", "active": true, - "adult": false, "language": ["ita","vos"], "thumbnail": "vvvvid.png", "banner": "vvvvid.png", diff --git a/channelselector.py b/channelselector.py index bfc1a764..10452461 100644 --- a/channelselector.py +++ b/channelselector.py @@ -1,15 +1,12 @@ # -*- coding: utf-8 -*- -import glob -import os -import sys +import glob, os from core import channeltools from core.item import Item from platformcode.unify import thumb_dict from platformcode import config, logger, unify -import xbmcaddon -addon = xbmcaddon.Addon('plugin.video.kod') +addon = config.__settings__ downloadenabled = addon.getSetting('downloadenabled') def getmainlist(view="thumb_"): @@ -20,16 +17,12 @@ def getmainlist(view="thumb_"): itemlist.append(Item(title="Redirect", channel="checkhost", action="check_channels", thumbnail='', category=config.get_localized_string(30119), viewmode="thumbnails")) - # Añade los canales que forman el menú principal + # Main Menu Channels if addon.getSetting('enable_news_menu') == "true": - # itemlist.append(Item(title=config.get_localized_string(30130), channel="news", action="mainlist", - # thumbnail=get_thumb("news.png", view), - # category=config.get_localized_string(30119), viewmode="thumbnails", - # context=CONTEXT + [{"title": config.get_localized_string(70285), "channel": "news", "action": "menu_opciones","goto": True}])) itemlist.append(Item(title=config.get_localized_string(30130), channel="news", action="mainlist", thumbnail=get_thumb("news.png", view), category=config.get_localized_string(30119), viewmode="thumbnails", - context=[{"title": config.get_localized_string(70285), "channel": "shortcuts", "action": "SettingOnPosition", "category":5}])) + context=[{"title": config.get_localized_string(70285), "channel": "shortcuts", "action": "SettingOnPosition", "category":7, "setting":1}])) if addon.getSetting('enable_channels_menu') == "true": itemlist.append(Item(title=config.get_localized_string(30118), channel="channelselector", action="getchanneltypes", @@ -37,16 +30,11 @@ def getmainlist(view="thumb_"): category=config.get_localized_string(30119), viewmode="thumbnails")) if addon.getSetting('enable_search_menu') == "true": - # itemlist.append(Item(title=config.get_localized_string(30103), channel="search", path='special', action="mainlist", - # thumbnail=get_thumb("search.png", view), - # category=config.get_localized_string(30119), viewmode="list", - # context = CONTEXT + [{"title": config.get_localized_string(60412), "action": "setting_channel_new", "channel": "search"}, - # {"title": config.get_localized_string(70286), "action": "settings", "channel": "search"}])) itemlist.append(Item(title=config.get_localized_string(30103), channel="search", path='special', action="mainlist", thumbnail=get_thumb("search.png", view), category=config.get_localized_string(30119), viewmode="list", context = [{"title": config.get_localized_string(60412), "action": "setting_channel_new", "channel": "search"}, - {"title": config.get_localized_string(70286), "channel": "shortcuts", "action": "SettingOnPosition", "category":3}])) + {"title": config.get_localized_string(70286), "channel": "shortcuts", "action": "SettingOnPosition", "category":5 , "setting":1}])) if addon.getSetting('enable_onair_menu') == "true": itemlist.append(Item(channel="filmontv", action="mainlist", title=config.get_localized_string(50001), @@ -63,30 +51,18 @@ def getmainlist(view="thumb_"): category=config.get_localized_string(30102), viewmode="thumbnails")) if config.get_videolibrary_support() and addon.getSetting('enable_library_menu') == "true": - # itemlist.append(Item(title=config.get_localized_string(30131), channel="videolibrary", action="mainlist", - # thumbnail=get_thumb("videolibrary.png", view), - # category=config.get_localized_string(30119), viewmode="thumbnails", - # context=CONTEXT + [{"title": config.get_localized_string(70287), "channel": "videolibrary", - # "action": "channel_config"}])) itemlist.append(Item(title=config.get_localized_string(30131), channel="videolibrary", action="mainlist", thumbnail=get_thumb("videolibrary.png", view), category=config.get_localized_string(30119), viewmode="thumbnails", - context=[{"title": config.get_localized_string(70287), "channel": "shortcuts", "action": "SettingOnPosition", "category":2}, + context=[{"title": config.get_localized_string(70287), "channel": "shortcuts", "action": "SettingOnPosition", "category":2, "setting":1}, {"title": config.get_localized_string(60568), "channel": "videolibrary", "action": "update_videolibrary"}])) if downloadenabled != "false": - # itemlist.append(Item(title=config.get_localized_string(30101), channel="downloads", action="mainlist", - # thumbnail=get_thumb("downloads.png", view), viewmode="list", - # context=CONTEXT + [{"title": config.get_localized_string(70288), "channel": "setting", "config": "downloads", - # "action": "channel_config"}])) itemlist.append(Item(title=config.get_localized_string(30101), channel="downloads", action="mainlist", thumbnail=get_thumb("downloads.png", view), viewmode="list", - context=[{"title": config.get_localized_string(70288), "channel": "shortcuts", "action": "SettingOnPosition", "category":4}])) + context=[{"title": config.get_localized_string(70288), "channel": "shortcuts", "action": "SettingOnPosition", "category":6}])) thumb_setting = "setting_%s.png" % 0 # config.get_setting("plugin_updates_available") - # itemlist.append(Item(title=config.get_localized_string(30100), channel="setting", action="mainlist", - # thumbnail=get_thumb(thumb_setting, view), - # category=config.get_localized_string(30100), viewmode="list")) itemlist.append(Item(title=config.get_localized_string(30100), channel="setting", action="settings", thumbnail=get_thumb(thumb_setting, view), category=config.get_localized_string(30100), viewmode="list")) @@ -99,18 +75,14 @@ def getmainlist(view="thumb_"): def getchanneltypes(view="thumb_"): logger.info() - # Lista de categorias - # channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "direct", "live", "torrent", "music"] - channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "direct", "live", "music"] + # Category List + channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "direct", "live", "torrent", "music"] - if config.get_setting("adult_mode") != 0: - channel_types.append("adult") - - # channel_language = config.get_setting("channel_language", default="all") + # Channel Language channel_language = auto_filter() logger.info("channel_language=%s" % channel_language) - # Ahora construye el itemlist ordenadamente + # Build Itemlist itemlist = list() title = config.get_localized_string(30121) itemlist.append(Item(title=title, channel="channelselector", action="filterchannels", view=view, @@ -123,12 +95,6 @@ def getchanneltypes(view="thumb_"): channel_type=channel_type, viewmode="thumbnails", thumbnail=get_thumb("%s.png" % channel_type, view))) - # itemlist.append(Item(title='Oggi in TV', channel="filmontv", action="mainlist", view=view, - # category=title, channel_type="all", thumbnail=get_thumb("on_the_air.png", view), - # viewmode="thumbnails")) - - - itemlist.append(Item(title=config.get_localized_string(70685), channel="community", action="mainlist", view=view, category=config.get_localized_string(70685), channel_type="all", thumbnail=get_thumb("community.png", view), viewmode="thumbnails")) @@ -136,27 +102,23 @@ def getchanneltypes(view="thumb_"): def filterchannels(category, view="thumb_"): - logger.info('Filterchannl'+category) + logger.info('Filter Channels ' + category) channelslist = [] - # Si category = "allchannelstatus" es que estamos activando/desactivando canales + # If category = "allchannelstatus" is that we are activating / deactivating channels appenddisabledchannels = False if category == "allchannelstatus": category = "all" appenddisabledchannels = True - # Lee la lista de canales - if category != 'adult': - channel_path = os.path.join(config.get_runtime_path(), 'channels', '*.json') - else: - channel_path = os.path.join(config.get_runtime_path(), 'channels', 'porn', '*.json') + channel_path = os.path.join(config.get_runtime_path(), 'channels', '*.json') logger.info("channel_path = %s" % channel_path) channel_files = glob.glob(channel_path) - logger.info("channel_files encontrados %s" % (len(channel_files))) + logger.info("channel_files found %s" % (len(channel_files))) - # channel_language = config.get_setting("channel_language", default="all") + # Channel Language channel_language = auto_filter() logger.info("channel_language=%s" % channel_language) @@ -171,106 +133,87 @@ def filterchannels(category, view="thumb_"): if channel_parameters["channel"] == 'community': continue - # # si el canal no es compatible, no se muestra - # if not channel_parameters["compatible"]: - # continue - - # Si no es un canal lo saltamos + # If it's not a channel we skip it if not channel_parameters["channel"]: continue logger.info("channel_parameters=%s" % repr(channel_parameters)) - # Si prefiere el banner y el canal lo tiene, cambia ahora de idea + # If you prefer the banner and the channel has it, now change your mind if view == "banner_" and "banner" in channel_parameters: channel_parameters["thumbnail"] = channel_parameters["banner"] - # si el canal está desactivado no se muestra el canal en la lista + # if the channel is deactivated the channel is not shown in the list if not channel_parameters["active"]: continue - # Se salta el canal si no está activo y no estamos activando/desactivando los canales + # The channel is skipped if it is not active and we are not activating / deactivating the channels channel_status = config.get_setting("enabled", channel_parameters["channel"]) if channel_status is None: - # si channel_status no existe es que NO HAY valor en _data.json. - # como hemos llegado hasta aquí (el canal está activo en channel.json), se devuelve True + # if channel_status does not exist, there is NO value in _data.json. + # as we got here (the channel is active in channel.json), True is returned channel_status = True if not channel_status: - # si obtenemos el listado de canales desde "activar/desactivar canales", y el canal está desactivado - # lo mostramos, si estamos listando todos los canales desde el listado general y está desactivado, - # no se muestra + # if we get the list of channels from "activate / deactivate channels", and the channel is deactivated + # we show it, if we are listing all the channels from the general list and it is deactivated, it is not shown if not appenddisabledchannels: continue - # Se salta el canal para adultos si el modo adultos está desactivado - if channel_parameters["adult"] and config.get_setting("adult_mode") == 0: - continue - - # Se salta el canal si está en un idioma filtrado - # Se muestran todos los canales si se elige "all" en el filtrado de idioma - # Se muestran sólo los idiomas filtrados, cast o lat - # Los canales de adultos se mostrarán siempre que estén activos - - # for channel_language_list in channel_language_list: - # if c in channel_parameters["language"]: - # L = True - # else: - # L = False - # logger.info('CCLANG= ' + channel_language + ' ' + str(channel_language_list)) if channel_language != "all" and "*" not in channel_parameters["language"] \ and channel_language not in str(channel_parameters["language"]): continue - # Se salta el canal si está en una categoria filtrado + # The channel is skipped if it is in a filtered category if category != "all" and category not in channel_parameters["categories"]: continue - # Si tiene configuración añadimos un item en el contexto + # If you have configuration we add an item in the context context = [] if channel_parameters["has_settings"]: context.append({"title": config.get_localized_string(70525), "channel": "setting", "action": "channel_config", "config": channel_parameters["channel"]}) channel_info = set_channel_info(channel_parameters) - # Si ha llegado hasta aquí, lo añade + # If it has come this far, add it channelslist.append(Item(title=channel_parameters["title"], channel=channel_parameters["channel"], action="mainlist", thumbnail=channel_parameters["thumbnail"], fanart=channel_parameters["fanart"], plot=channel_info, category=channel_parameters["title"], language=channel_parameters["language"], viewmode="list", context=context)) except: - logger.error("Se ha producido un error al leer los datos del canal '%s'" % channel) + logger.error("An error occurred while reading the channel data '%s'" % channel) import traceback logger.error(traceback.format_exc()) channelslist.sort(key=lambda item: item.title.lower().strip()) - if category == "all": - channel_parameters = channeltools.get_channel_parameters('url') - # Si prefiere el banner y el canal lo tiene, cambia ahora de idea - if view == "banner_" and "banner" in channel_parameters: - channel_parameters["thumbnail"] = channel_parameters["banner"] + if not config.get_setting("only_channel_icons"): + if category == "all": + channel_parameters = channeltools.get_channel_parameters('url') + # If you prefer the banner and the channel has it, now change your mind + if view == "banner_" and "banner" in channel_parameters: + channel_parameters["thumbnail"] = channel_parameters["banner"] - channelslist.insert(0, Item(title=config.get_localized_string(60088), action="mainlist", channel="url", - thumbnail=channel_parameters["thumbnail"], type="generic", viewmode="list")) + channelslist.insert(0, Item(title=config.get_localized_string(60088), action="mainlist", channel="url", + thumbnail=channel_parameters["thumbnail"], type="generic", viewmode="list")) + # Special Category + if category in ['movie', 'tvshow']: + titles = [config.get_localized_string(70028), config.get_localized_string(30985), config.get_localized_string(70559), config.get_localized_string(60264), config.get_localized_string(70560)] + ids = ['popular', 'top_rated', 'now_playing', 'on_the_air'] + for x in range(0,3): + if x == 2 and category != 'movie': + title=titles[x+1] + id = ids[x+1] + else: + title=titles[x] + id = ids[x] + channelslist.insert(x, + Item(channel='search', action='discover_list', title=title, search_type='list', + list_type='%s/%s' % (category.replace('show',''), id), mode=category, thumbnail=get_thumb(id+".png"))) - if category in ['movie', 'tvshow']: - titles = [config.get_localized_string(70028), config.get_localized_string(30985), config.get_localized_string(70559), config.get_localized_string(60264), config.get_localized_string(70560)] - ids = ['popular', 'top_rated', 'now_playing', 'on_the_air'] - for x in range(0,3): - if x == 2 and category != 'movie': - title=titles[x+1] - id = ids[x+1] - else: - title=titles[x] - id = ids[x] - channelslist.insert(x, - Item(channel='search', action='discover_list', title=title, search_type='list', - list_type='%s/%s' % (category.replace('show',''), id), mode=category, thumbnail=get_thumb(id+".png"))) - - channelslist.insert(3, Item(channel='search', action='genres_menu', title=config.get_localized_string(30987), - type=category.replace('show',''), mode=category ,thumbnail=get_thumb("genres.png"))) + channelslist.insert(3, Item(channel='search', action='genres_menu', title=config.get_localized_string(30987), + type=category.replace('show',''), mode=category ,thumbnail=get_thumb("genres.png"))) return channelslist @@ -314,15 +257,11 @@ def set_channel_info(parameters): '*':'Italiano, Sottotitolato in Italiano'} for lang in langs: - # if 'vos' in parameters['categories']: - # lang = '*' - # if 'sub-ita' in parameters['categories']: - # lang = 'ita' if lang in lang_dict: - if language != '' and language != '*' and not parameters['adult']: + if language != '' and language != '*': language = '%s, %s' % (language, lang_dict[lang]) - elif not parameters['adult']: + else: language = lang_dict[lang] if lang == '*': break @@ -352,26 +291,6 @@ def auto_filter(auto_lang=False): return lang - # import xbmc, xbmcaddon - - # addon = xbmcaddon.Addon('metadata.themoviedb.org') - # def_lang = addon.getSetting('language') - # lang = 'all' - # lang_list = ['all'] - - # lang_dict = {'it':'ita'} - # lang_list_dict = {'it':['ita','vosi']} - - # if config.get_setting("channel_language") == 'auto' or auto_lang == True: - # lang = lang_dict[def_lang] - # lang_list = lang_list_dict[def_lang] - - # else: - # lang = config.get_setting("channel_language", default="all") - # lang_list = lang_list_dict[def_lang] - - # return lang, lang_list - def thumb(item_or_itemlist=None, genre=False, thumb=''): import re diff --git a/core/channeltools.py b/core/channeltools.py index 62bbdde9..1ab338ef 100644 --- a/core/channeltools.py +++ b/core/channeltools.py @@ -14,13 +14,6 @@ default_file = dict() remote_path = 'https://raw.githubusercontent.com/kodiondemand/media/master/' - -def is_adult(channel_name): - logger.info("channel_name=" + channel_name) - channel_parameters = get_channel_parameters(channel_name) - return channel_parameters["adult"] - - def is_enabled(channel_name): logger.info("channel_name=" + channel_name) return get_channel_parameters(channel_name)["active"] and get_channel_setting("enabled", channel=channel_name, @@ -41,14 +34,8 @@ def get_channel_parameters(channel_name): channel_parameters["channel"] = channel_parameters.pop("id") # si no existe el key se declaran valor por defecto para que no de fallos en las funciones que lo llaman - channel_parameters["adult"] = channel_parameters.get("adult", False) - logger.info(channel_parameters["adult"]) - if channel_parameters["adult"]: - channel_parameters["update_url"] = channel_parameters.get("update_url", DEFAULT_UPDATE_URL + 'porn/') - else: - channel_parameters["update_url"] = channel_parameters.get("update_url", DEFAULT_UPDATE_URL) + channel_parameters["update_url"] = channel_parameters.get("update_url", DEFAULT_UPDATE_URL) channel_parameters["language"] = channel_parameters.get("language", ["all"]) - ## channel_parameters["adult"] = channel_parameters.get("adult", False) channel_parameters["active"] = channel_parameters.get("active", False) channel_parameters["include_in_global_search"] = channel_parameters.get("include_in_global_search", False) @@ -92,7 +79,6 @@ def get_channel_parameters(channel_name): logger.error(channel_name + ".json error \n%s" % ex) channel_parameters = dict() channel_parameters["channel"] = "" - channel_parameters["adult"] = False channel_parameters['active'] = False channel_parameters["language"] = "" channel_parameters["update_url"] = DEFAULT_UPDATE_URL @@ -108,14 +94,12 @@ def get_channel_json(channel_name): try: channel_path = filetools.join(config.get_runtime_path(), "channels", channel_name + ".json") if not filetools.isfile(channel_path): - channel_path = filetools.join(config.get_runtime_path(), 'channels', "porn", channel_name + ".json") + channel_path = filetools.join(config.get_runtime_path(), "specials", channel_name + ".json") if not filetools.isfile(channel_path): - channel_path = filetools.join(config.get_runtime_path(), "specials", channel_name + ".json") + channel_path = filetools.join(config.get_runtime_path(), "servers", channel_name + ".json") if not filetools.isfile(channel_path): - channel_path = filetools.join(config.get_runtime_path(), "servers", channel_name + ".json") - if not filetools.isfile(channel_path): - channel_path = filetools.join(config.get_runtime_path(), "servers", "debriders", - channel_name + ".json") + channel_path = filetools.join(config.get_runtime_path(), "servers", "debriders", + channel_name + ".json") if filetools.isfile(channel_path): # logger.info("channel_data=" + channel_path) @@ -182,10 +166,9 @@ def get_default_settings(channel_name): default_file = jsontools.load(filetools.read(default_path)) channel_path = filetools.join(config.get_runtime_path(), 'channels', channel_name + '.json') - adult_path = filetools.join(config.get_runtime_path(), 'channels', 'porn', channel_name + '.json') # from core.support import dbg; dbg() - if filetools.exists(channel_path) or filetools.exists(adult_path): + if filetools.exists(channel_path): default_controls = default_file['settings'] default_controls_renumber = default_file['renumber'] channel_json = get_channel_json(channel_name) diff --git a/core/downloadtools.py b/core/downloadtools.py index 628b9bcb..57b020aa 100644 --- a/core/downloadtools.py +++ b/core/downloadtools.py @@ -621,7 +621,7 @@ def downloadfileGzipped(url, pathfichero): else: tiempofalta = 0 logger.info(sec_to_hms(tiempofalta)) - progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s falta " % + progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s mancanti " % (descargadosmb, totalmb, percent, old_div(velocidad, 1024), sec_to_hms(tiempofalta))) break except: diff --git a/core/httptools.py b/core/httptools.py index 6f1bde5c..9cf736ed 100755 --- a/core/httptools.py +++ b/core/httptools.py @@ -41,12 +41,23 @@ if HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT == 0: HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = # Random use of User-Agents, if nad is not specified HTTPTOOLS_DEFAULT_RANDOM_HEADERS = False -domainCF = list() -channelsCF = ['guardaserieclick', 'casacinema', 'dreamsub', 'ilgeniodellostreaming', 'piratestreaming', 'altadefinizioneclick', 'altadefinizione01_link', 'cineblog01'] -otherCF = ['altadefinizione-nuovo.link', 'wstream.video', 'akvideo.stream', 'backin.net', 'vcrypt.net'] -for ch in channelsCF: - domainCF.append(urlparse.urlparse(config.get_channel_url(name=ch)).hostname) -domainCF.extend(otherCF) +# old +# domainCF = list() +# channelsCF = ['guardaserieclick', 'ilgeniodellostreaming'] +# otherCF = ['akvideo.stream', 'backin.net', 'vcrypt.net'] +# for ch in channelsCF: +# domainCF.append(urlparse.urlparse(config.get_channel_url(name=ch)).hostname) +# domainCF.extend(otherCF) + +global CF_LIST +CF_LIST = list() +CF_LIST_PATH = os.path.join(config.get_data_path(), "CF_Domains.txt") + +if os.path.exists(CF_LIST_PATH): + with open(CF_LIST_PATH, "rb") as CF_File: + CF_LIST = CF_File.read().splitlines() + +FORCE_CLOUDSCRAPER_LIST = ['akvideo.stream'] def get_user_agent(): # Returns the global user agent to be used when necessary for the url. @@ -212,7 +223,7 @@ def show_infobox(info_dict): def downloadpage(url, **opt): - logger.info() + # logger.info() """ Open a url and return the data obtained @@ -257,9 +268,10 @@ def downloadpage(url, **opt): """ url = scrapertools.unescape(url) domain = urlparse.urlparse(url).netloc - global domainCF + global CF_LIST CF = False - if domain in domainCF or opt.get('cf', False): + + if domain in FORCE_CLOUDSCRAPER_LIST: from lib import cloudscraper session = cloudscraper.create_scraper() CF = True @@ -267,6 +279,10 @@ def downloadpage(url, **opt): from lib import requests session = requests.session() + if domain in CF_LIST or opt.get('CF', False): + url = 'https://web.archive.org/save/' + url + CF = True + if config.get_setting('resolver_dns') and not opt.get('use_requests', False): from specials import resolverdns session.mount('https://', resolverdns.CipherSuiteAdapter(domain, CF)) @@ -383,7 +399,18 @@ def downloadpage(url, **opt): response_code = req.status_code + if req.headers.get('Server', '').startswith('cloudflare') and response_code in [429, 503, 403] and not opt.get('CF', False): + if domain not in CF_LIST: + opt["CF"] = True + with open(CF_LIST_PATH, "a") as CF_File: + CF_File.write("%s\n" % domain) + logger.debug("CF retry... for domain: %s" % domain) + return downloadpage(url, **opt) + response['data'] = req.content if req.content else '' + if CF: + import re + response['data'] = re.sub('["|\']/save/[^"]*(https?://[^"]+)', '"\\1', response['data']) response['url'] = req.url if type(response['data']) != str: diff --git a/core/item.py b/core/item.py index 5ab216ea..f3d33b9a 100644 --- a/core/item.py +++ b/core/item.py @@ -41,7 +41,7 @@ class InfoLabels(dict): # super(InfoLabels, self).__setitem__('code', value) super(InfoLabels, self).__setitem__('imdb_id', str(value)) - elif name == "mediatype" and value not in ["list", "movie", "tvshow", "season", "episode"]: + elif name == "mediatype" and value not in ["list", "movie", "tvshow", "season", "episode", "music"]: super(InfoLabels, self).__setitem__('mediatype', 'list') elif name in ['tmdb_id', 'tvdb_id', 'noscrap_id']: diff --git a/core/jsontools.py b/core/jsontools.py index 07733fd8..d12a5213 100644 --- a/core/jsontools.py +++ b/core/jsontools.py @@ -6,6 +6,7 @@ import traceback from platformcode import logger +from inspect import stack try: import json @@ -43,6 +44,7 @@ def load(*args, **kwargs): except: logger.error("**NOT** able to load the JSON") logger.error(traceback.format_exc()) + logger.error('ERROR STACK ' + str(stack()[1][3])) value = {} return value @@ -146,7 +148,7 @@ def check_to_backup(data, fname, dict_data): logger.debug("The file is empty: %s" % fname) -def update_node(dict_node, name_file, node, path=None): +def update_node(dict_node, name_file, node, path=None, silent=False): """ actualiza el json_data de un fichero con el diccionario pasado @@ -162,7 +164,7 @@ def update_node(dict_node, name_file, node, path=None): @return json_data @rtype: dict """ - logger.info() + if not silent: logger.info() from platformcode import config from core import filetools @@ -183,14 +185,14 @@ def update_node(dict_node, name_file, node, path=None): # es un dict if dict_data: if node in dict_data: - logger.debug(" the key exists %s" % node) + if not silent: logger.debug(" the key exists %s" % node) dict_data[node] = dict_node else: - logger.debug(" The key does NOT exist %s" % node) + if not silent: logger.debug(" The key does NOT exist %s" % node) new_dict = {node: dict_node} dict_data.update(new_dict) else: - logger.debug(" It is NOT a dict") + if not silent: logger.debug(" It is NOT a dict") dict_data = {node: dict_node} json_data = dump(dict_data) result = filetools.write(fname, json_data) diff --git a/core/support.py b/core/support.py index 22dd0841..589e1b0b 100755 --- a/core/support.py +++ b/core/support.py @@ -6,6 +6,9 @@ import inspect import os import re import sys + +from lib.guessit import guessit + PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int if PY3: @@ -137,17 +140,6 @@ def regexDbg(item, patron, headers, data=''): webbrowser.open(url + "/r/" + permaLink) -def scrape2(item, patron = '', listGroups = [], headers="", blacklist="", data="", patronBlock="", - patronNext="", action="findvideos", addVideolibrary = True, typeContentDict={}, typeActionDict={}): - m = re.search(r'(?<!\\|\[)\((?!\?)', patron) - n = 0 - while m: - patron = patron[:m.end()] + '?P<' + listGroups[n] + '>' + patron[m.end():] - m = re.search(r'(?<!\\|\[)\((?!\?)', patron) - n += 1 - regexDbg(item, patron, headers) - - def scrapeLang(scraped, lang, longtitle): ## Aggiunto/modificato per gestire i siti che hanno i video ## in ita e subita delle serie tv nella stessa pagina @@ -171,7 +163,7 @@ def cleantitle(title): cleantitle = title.replace('"', "'").replace('×', 'x').replace('–', '-').strip() return cleantitle -def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang): +def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang, sceneTitle): itemlist = [] log("scrapeBlock qui") if debug: @@ -240,17 +232,6 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t Type = scraped['type'] if scraped['type'] else '' plot = cleantitle(scraped["plot"]) if scraped["plot"] else '' - # make formatted Title [longtitle] - s = ' - ' - title = episode + (s if episode and title else '') + title - longtitle = title + (s if title and title2 else '') + title2 - longtitle = typo(longtitle, 'bold') - longtitle += typo(quality, '_ [] color kod') if quality else '' - longtitle += typo(scraped['size'], '_ [] color kod') if scraped['size'] else '' - longtitle += typo(scraped['seed']+ ' SEEDS', '_ [] color kod') if scraped['seed'] else '' - - lang1, longtitle = scrapeLang(scraped, lang, longtitle) - # if title is set, probably this is a list of episodes or video sources # necessaria l'aggiunta di == scraped["title"] altrimenti non prende i gruppi dopo le categorie if item.infoLabels["title"] == scraped["title"]: @@ -275,6 +256,49 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t if scraped["rating"]: infolabels['rating'] = scrapertools.decodeHtmlentities(scraped["rating"]) + # make formatted Title [longtitle] + s = ' - ' + title = episode + (s if episode and title else '') + title + longtitle = title + (s if title and title2 else '') + title2 + '\n' + + if sceneTitle: + try: + parsedTitle = guessit(title) + title = longtitle = parsedTitle.get('title', '') + log('TITOLO',title) + if parsedTitle.get('source'): + quality = str(parsedTitle.get('source')) + if parsedTitle.get('screen_size'): + quality += ' ' + str(parsedTitle.get('screen_size', '')) + if not scraped['year']: + infolabels['year'] = parsedTitle.get('year', '') + if parsedTitle.get('episode') and parsedTitle.get('season'): + longtitle = title + s + + if type(parsedTitle.get('season')) == list: + longtitle += str(parsedTitle.get('season')[0]) + '-' + str(parsedTitle.get('season')[-1]) + else: + longtitle += str(parsedTitle.get('season')) + + if type(parsedTitle.get('episode')) == list: + longtitle += 'x' + str(parsedTitle.get('episode')[0]).zfill(2) + '-' + str(parsedTitle.get('episode')[-1]).zfill(2) + else: + longtitle += 'x' + str(parsedTitle.get('episode')).zfill(2) + elif parsedTitle.get('season') and type(parsedTitle.get('season')) == list: + longtitle += s + config.get_localized_string(30140) + " " +str(parsedTitle.get('season')[0]) + '-' + str(parsedTitle.get('season')[-1]) + elif parsedTitle.get('season'): + longtitle += s + config.get_localized_string(60027) % str(parsedTitle.get('season')) + if parsedTitle.get('episode_title'): + longtitle += s + parsedTitle.get('episode_title') + except: + log('Error') + + longtitle = typo(longtitle, 'bold') + lang1, longtitle = scrapeLang(scraped, lang, longtitle) + longtitle += typo(quality, '_ [] color kod') if quality else '' + longtitle += typo(scraped['size'], '_ [] color kod') if scraped['size'] else '' + longtitle += typo(scraped['seed'] + ' SEEDS', '_ [] color kod') if scraped['seed'] else '' + AC = CT = '' if typeContentDict: for name, variants in typeContentDict.items(): @@ -288,7 +312,6 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t AC = name break else: AC = action - if (scraped["title"] not in blacklist) and (search.lower() in longtitle.lower()): it = Item( channel=item.channel, @@ -380,6 +403,7 @@ def scrape(func): if 'pagination' in args and inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']: pagination = args['pagination'] if args['pagination'] else 20 else: pagination = '' lang = args['deflang'] if 'deflang' in args else '' + sceneTitle = args.get('sceneTitle') pag = item.page if item.page else 1 # pagination matches = [] @@ -402,7 +426,7 @@ def scrape(func): if 'season' in bl and bl['season']: item.season = bl['season'] blockItemlist, blockMatches = scrapeBlock(item, args, bl['block'], patron, headers, action, pagination, debug, - typeContentDict, typeActionDict, blacklist, search, pag, function, lang) + typeContentDict, typeActionDict, blacklist, search, pag, function, lang, sceneTitle) for it in blockItemlist: if 'lang' in bl: it.contentLanguage, it.title = scrapeLang(bl, it.contentLanguage, it.title) @@ -413,7 +437,7 @@ def scrape(func): matches.extend(blockMatches) elif patron: itemlist, matches = scrapeBlock(item, args, data, patron, headers, action, pagination, debug, typeContentDict, - typeActionDict, blacklist, search, pag, function, lang) + typeActionDict, blacklist, search, pag, function, lang, sceneTitle) if 'itemlistHook' in args: itemlist = args['itemlistHook'](itemlist) @@ -453,7 +477,7 @@ def scrape(func): page=pag + 1, thumbnail=thumb())) - if action != 'play' and function != 'episodios' and 'patronMenu' not in args: + if action != 'play' and function != 'episodios' and 'patronMenu' not in args and item.contentType in ['movie', 'tvshow', 'episode']: tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) from specials import autorenumber @@ -826,7 +850,6 @@ def match(item_url_string, **args): match: first match matches: all the matches ''' - log() matches = blocks = [] url = None @@ -839,7 +862,7 @@ def match(item_url_string, **args): string = args.get('string', False) # remove scrape arguments - args = dict([(key, val) for key, val in args.items() if key not in ['patron', 'patronBlock', 'patronBlocks', 'debug', 'debugBlock', 'string']]) + args = dict([(key, val) for key, val in args.items() if key not in ['patron', 'patronBlock', 'patronBlocks', 'debug', 'debugBlock', 'string']]) # check type of item_url_string if string: @@ -927,11 +950,13 @@ def download(itemlist, item, typography='', function_level=1, function=''): elif item.contentType == 'episode': from_action = 'findvideos' title = typo(config.get_localized_string(60356), typography) + ' - ' + item.title - else: + elif item.contentType == 'tvshow': from_action = 'episodios' title = typo(config.get_localized_string(60355), typography) + else: # content type does not support download + return itemlist - function = function if function else inspect.stack()[function_level][3] + # function = function if function else inspect.stack()[function_level][3] contentSerieName=item.contentSerieName if item.contentSerieName else '' contentTitle=item.contentTitle if item.contentTitle else '' @@ -1119,21 +1144,7 @@ def controls(itemlist, item, AutoPlay=True, CheckLinks=True, down_load=True, vid from platformcode.config import get_setting CL = get_setting('checklinks') or get_setting('checklinks', item.channel) - autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') - channel_node = autoplay_node.get(item.channel, {}) - if not channel_node: # non ha mai aperto il menu del canale quindi in autoplay_data.json non c'e la key - try: - channelFile = __import__('channels.' + item.channel, fromlist=["channels.%s" % item.channel]) - except: - channelFile = __import__('specials.' + item.channel, fromlist=["specials.%s" % item.channel]) - if hasattr(channelFile, 'list_servers') and hasattr(channelFile, 'list_quality'): - autoplay.init(item.channel, channelFile.list_servers, channelFile.list_quality) - - autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') - channel_node = autoplay_node.get(item.channel, {}) - settings_node = channel_node.get('settings', {}) - AP = get_setting('autoplay') or (settings_node['active'] if 'active' in settings_node else False) - HS = config.get_setting('hide_servers') or (settings_node['hide_servers'] if 'hide_server' in settings_node else False) + AP, HS = autoplay.get_channel_AP_HS(item) if CL and not AP: if get_setting('checklinks', item.channel): diff --git a/core/tmdb.py b/core/tmdb.py index 0e3c137e..18b7464d 100644 --- a/core/tmdb.py +++ b/core/tmdb.py @@ -826,7 +826,7 @@ class Tmdb(object): self.busqueda_texto = re.sub('\[\\\?(B|I|COLOR)\s?[^\]]*\]', '', self.texto_buscado).strip() self.busqueda_tipo = kwargs.get('tipo', '') self.busqueda_idioma = kwargs.get('idioma_busqueda', def_lang) - self.busqueda_include_adult = kwargs.get('include_adult', False) + # self.busqueda_include_adult = kwargs.get('include_adult', False) self.busqueda_year = kwargs.get('year', '') self.busqueda_filtro = kwargs.get('filtro', {}) self.discover = kwargs.get('discover', {}) @@ -978,7 +978,7 @@ class Tmdb(object): # &include_adult=false&page=1 url = ('http://api.themoviedb.org/3/search/%s?api_key=a1ab8b8669da03637a4b98fa39c39228&query=%s&language=%s' '&include_adult=%s&page=%s' % (self.busqueda_tipo, text_quote, - self.busqueda_idioma, self.busqueda_include_adult, page)) + self.busqueda_idioma, True, page)) if self.busqueda_year: url += '&year=%s' % self.busqueda_year diff --git a/core/videolibrarytools.py b/core/videolibrarytools.py index 1b25cc07..006f631b 100644 --- a/core/videolibrarytools.py +++ b/core/videolibrarytools.py @@ -111,7 +111,7 @@ def save_movie(item, silent=False): # Si llegados a este punto no tenemos titulo, salimos if not item.contentTitle or not item.channel: logger.debug("contentTitle NOT FOUND") - return 0, 0, -1 # Salimos sin guardar + return 0, 0, -1, path # Salimos sin guardar scraper_return = scraper.find_and_set_infoLabels(item) @@ -123,7 +123,7 @@ def save_movie(item, silent=False): # TODO de momento si no hay resultado no añadimos nada, # aunq podriamos abrir un cuadro para introducir el identificador/nombre a mano logger.debug("NOT FOUND IN SCRAPER OR DO NOT HAVE code") - return 0, 0, -1 + return 0, 0, -1, path _id = item.infoLabels['code'][0] @@ -158,7 +158,7 @@ def save_movie(item, silent=False): logger.info("Creating movie directory:" + path) if not filetools.mkdir(path): logger.debug("Could not create directory") - return 0, 0, -1 + return 0, 0, -1, path nfo_path = filetools.join(path, "%s [%s].nfo" % (base_name, _id)) strm_path = filetools.join(path, "%s.strm" % base_name) @@ -227,14 +227,14 @@ def save_movie(item, silent=False): xbmc_videolibrary.update() if not silent: p_dialog.close() - return insertados, sobreescritos, fallidos + return insertados, sobreescritos, fallidos, path # Si llegamos a este punto es por q algo ha fallado logger.error("Could not save %s in the video library" % item.contentTitle) if not silent: p_dialog.update(100, config.get_localized_string(60063), item.contentTitle) p_dialog.close() - return 0, 0, -1 + return 0, 0, -1, path def update_renumber_options(item, head_nfo, path): from core import jsontools @@ -586,6 +586,22 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): logger.info("There is no episode list, we go out without creating strm") return 0, 0, 0 + # process local episodes + local_episodes_path = '' + nfo_path = filetools.join(path, "tvshow.nfo") + head_nfo, item_nfo = read_nfo(nfo_path) + if item_nfo.update_last: + local_episodes_path = item_nfo.local_episodes_path + elif config.get_setting("local_episodes", "videolibrary"): + done, local_episodes_path = config_local_episodes_path(path, serie.show) + if done < 0: + logger.info("An issue has occurred while configuring local episodes, going out without creating strm") + return 0, 0, done + item_nfo.local_episodes_path = local_episodes_path + filetools.write(nfo_path, head_nfo + item_nfo.tojson()) + if local_episodes_path: + process_local_episodes(local_episodes_path, path) + insertados = 0 sobreescritos = 0 fallidos = 0 @@ -671,9 +687,6 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): except: t = 0 - local_episodelist = get_local_content(path) - - last_season_episode = '' for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): if not silent: p_dialog.update(int(math.ceil((i + 1) * t)), config.get_localized_string(60064), e.title) @@ -695,9 +708,11 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): nfo_path = filetools.join(path, "%s.nfo" % season_episode) json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) - if season_episode in local_episodelist: - logger.info('Skipped: Serie ' + serie.contentSerieName + ' ' + season_episode + ' available as local content') + # check if the episode has been downloaded + if filetools.join(path, "%s [downloads].json" % season_episode) in ficheros: + logger.info('INFO: "%s" episode %s has been downloaded, skipping it' % (serie.contentSerieName, season_episode)) continue + strm_exists = strm_path in ficheros nfo_exists = nfo_path in ficheros json_exists = json_path in ficheros @@ -808,7 +823,8 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): tvshow_item.infoLabels["title"] = tvshow_item.infoLabels["tvshowtitle"] if max_sea == high_sea and max_epi == high_epi and (tvshow_item.infoLabels["status"] == "Ended" - or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0: + or tvshow_item.infoLabels["status"] == "Canceled") and insertados == 0 and fallidos == 0 \ + and not tvshow_item.local_episodes_path: tvshow_item.active = 0 # ... no la actualizaremos más logger.debug("%s [%s]: serie 'Terminada' o 'Cancelada'. Se desactiva la actualización periódica" % \ (serie.contentSerieName, serie.channel)) @@ -838,19 +854,72 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): return insertados, sobreescritos, fallidos -def get_local_content(path): +def config_local_episodes_path(path, title, silent=False): logger.info() - local_episodelist = [] - for root, folders, files in filetools.walk(path): - for file in files: - season_episode = scrapertools.get_season_and_episode(file) - if season_episode == "" or filetools.exists(filetools.join(path, "%s.strm" % season_episode)): - continue - local_episodelist.append(season_episode) - local_episodelist = sorted(set(local_episodelist)) + local_episodes_path = '' + if not silent: + silent = platformtools.dialog_yesno(config.get_localized_string(30131), config.get_localized_string(80044) % title) + if silent: + if config.is_xbmc() and not config.get_setting("videolibrary_kodi"): + platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80043)) + local_episodes_path = platformtools.dialog_browse(0, config.get_localized_string(80046)) + if local_episodes_path == '': + logger.info("User has canceled the dialog") + return -2, local_episodes_path + elif path in local_episodes_path: + platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(80045)) + logger.info("Selected folder is the same of the TV show one") + return -2, local_episodes_path - return local_episodelist + if local_episodes_path: + # import artwork + artwork_extensions = ['.jpg', '.jpeg', '.png'] + files = filetools.listdir(local_episodes_path) + for file in files: + if os.path.splitext(file)[1] in artwork_extensions: + filetools.copy(filetools.join(local_episodes_path, file), filetools.join(path, file)) + + return 0, local_episodes_path + + +def process_local_episodes(local_episodes_path, path): + logger.info() + + sub_extensions = ['.srt', '.sub', '.sbv', '.ass', '.idx', '.ssa', '.smi'] + artwork_extensions = ['.jpg', '.jpeg', '.png'] + extensions = sub_extensions + artwork_extensions + + local_episodes_list = [] + files_list = [] + for root, folders, files in filetools.walk(local_episodes_path): + for file in files: + if os.path.splitext(file)[1] in extensions: + continue + season_episode = scrapertools.get_season_and_episode(file) + if season_episode == "": + continue + local_episodes_list.append(season_episode) + files_list.append(file) + + nfo_path = filetools.join(path, "tvshow.nfo") + head_nfo, item_nfo = read_nfo(nfo_path) + + # if a local episode has been added, overwrites the strm + for season_episode, file in zip(local_episodes_list, files_list): + if not season_episode in item_nfo.local_episodes_list: + filetools.write(filetools.join(path, season_episode + '.strm'), filetools.join(root, file)) + + # if a local episode has been removed, deletes the strm + for season_episode in set(item_nfo.local_episodes_list).difference(local_episodes_list): + filetools.remove(filetools.join(path, season_episode + '.strm')) + + # updates the local episodes path and list in the nfo + if not local_episodes_list: + item_nfo.local_episodes_path = '' + item_nfo.local_episodes_list = sorted(set(local_episodes_list)) + + filetools.write(nfo_path, head_nfo + item_nfo.tojson()) def add_movie(item): @@ -883,12 +952,13 @@ def add_movie(item): # del item.tmdb_stat #Limpiamos el status para que no se grabe en la Videoteca new_item = item.clone(action="findvideos") - insertados, sobreescritos, fallidos = save_movie(new_item) + insertados, sobreescritos, fallidos, path = save_movie(new_item) if fallidos == 0: platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(30135) % new_item.contentTitle) # 'se ha añadido a la videoteca' else: + filetools.rmdirtree(path) platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60066) % new_item.contentTitle) #"ERROR, la pelicula NO se ha añadido a la videoteca") @@ -968,13 +1038,18 @@ def add_tvshow(item, channel=None): insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist) if not insertados and not sobreescritos and not fallidos: + filetools.rmdirtree(path) platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60067) % item.show) logger.error("La serie %s no se ha podido añadir a la videoteca. No se ha podido obtener ningun episodio" % item.show) elif fallidos == -1: + filetools.rmdirtree(path) platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60068) % item.show) logger.error("La serie %s no se ha podido añadir a la videoteca" % item.show) + elif fallidos == -2: + filetools.rmdirtree(path) + elif fallidos > 0: platformtools.dialog_ok(config.get_localized_string(30131), config.get_localized_string(60069) % item.show) logger.error("No se han podido añadir %s episodios de la serie %s a la videoteca" % (fallidos, item.show)) diff --git a/lib/babelfish/__init__.py b/lib/babelfish/__init__.py new file mode 100755 index 00000000..559705a2 --- /dev/null +++ b/lib/babelfish/__init__.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +__title__ = 'babelfish' +__version__ = '0.5.5-dev' +__author__ = 'Antoine Bertin' +__license__ = 'BSD' +__copyright__ = 'Copyright 2015 the BabelFish authors' + +import sys + +if sys.version_info[0] >= 3: + basestr = str +else: + basestr = basestring + +from .converters import (LanguageConverter, LanguageReverseConverter, LanguageEquivalenceConverter, CountryConverter, + CountryReverseConverter) +from .country import country_converters, COUNTRIES, COUNTRY_MATRIX, Country +from .exceptions import Error, LanguageConvertError, LanguageReverseError, CountryConvertError, CountryReverseError +from .language import language_converters, LANGUAGES, LANGUAGE_MATRIX, Language +from .script import SCRIPTS, SCRIPT_MATRIX, Script diff --git a/lib/babelfish/converters/__init__.py b/lib/babelfish/converters/__init__.py new file mode 100755 index 00000000..5dc98ddf --- /dev/null +++ b/lib/babelfish/converters/__init__.py @@ -0,0 +1,289 @@ +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +import collections +import functools +from importlib import import_module + +# from pkg_resources import iter_entry_points, EntryPoint +from ..exceptions import LanguageConvertError, LanguageReverseError + + +# from https://github.com/kennethreitz/requests/blob/master/requests/structures.py +class CaseInsensitiveDict(collections.MutableMapping): + """A case-insensitive ``dict``-like object. + + Implements all methods and operations of + ``collections.MutableMapping`` as well as dict's ``copy``. Also + provides ``lower_items``. + + All keys are expected to be strings. The structure remembers the + case of the last key to be set, and ``iter(instance)``, + ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` + will contain case-sensitive keys. However, querying and contains + testing is case insensitive: + + cid = CaseInsensitiveDict() + cid['English'] = 'eng' + cid['ENGLISH'] == 'eng' # True + list(cid) == ['English'] # True + + If the constructor, ``.update``, or equality comparison + operations are given keys that have equal ``.lower()``s, the + behavior is undefined. + + """ + def __init__(self, data=None, **kwargs): + self._store = dict() + if data is None: + data = {} + self.update(data, **kwargs) + + def __setitem__(self, key, value): + # Use the lowercased key for lookups, but store the actual + # key alongside the value. + self._store[key.lower()] = (key, value) + + def __getitem__(self, key): + return self._store[key.lower()][1] + + def __delitem__(self, key): + del self._store[key.lower()] + + def __iter__(self): + return (casedkey for casedkey, mappedvalue in self._store.values()) + + def __len__(self): + return len(self._store) + + def lower_items(self): + """Like iteritems(), but with all lowercase keys.""" + return ( + (lowerkey, keyval[1]) + for (lowerkey, keyval) + in self._store.items() + ) + + def __eq__(self, other): + if isinstance(other, collections.Mapping): + other = CaseInsensitiveDict(other) + else: + return NotImplemented + # Compare insensitively + return dict(self.lower_items()) == dict(other.lower_items()) + + # Copy is required + def copy(self): + return CaseInsensitiveDict(self._store.values()) + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, dict(self.items())) + + +class LanguageConverter(object): + """A :class:`LanguageConverter` supports converting an alpha3 language code with an + alpha2 country code and a script code into a custom code + + .. attribute:: codes + + Set of possible custom codes + + """ + def convert(self, alpha3, country=None, script=None): + """Convert an alpha3 language code with an alpha2 country code and a script code + into a custom code + + :param string alpha3: ISO-639-3 language code + :param country: ISO-3166 country code, if any + :type country: string or None + :param script: ISO-15924 script code, if any + :type script: string or None + :return: the corresponding custom code + :rtype: string + :raise: :class:`~babelfish.exceptions.LanguageConvertError` + + """ + raise NotImplementedError + + +class LanguageReverseConverter(LanguageConverter): + """A :class:`LanguageConverter` able to reverse a custom code into a alpha3 + ISO-639-3 language code, alpha2 ISO-3166-1 country code and ISO-15924 script code + + """ + def reverse(self, code): + """Reverse a custom code into alpha3, country and script code + + :param string code: custom code to reverse + :return: the corresponding alpha3 ISO-639-3 language code, alpha2 ISO-3166-1 country code and ISO-15924 script code + :rtype: tuple + :raise: :class:`~babelfish.exceptions.LanguageReverseError` + + """ + raise NotImplementedError + + +class LanguageEquivalenceConverter(LanguageReverseConverter): + """A :class:`LanguageEquivalenceConverter` is a utility class that allows you to easily define a + :class:`LanguageReverseConverter` by only specifying the dict from alpha3 to their corresponding symbols. + + You must specify the dict of equivalence as a class variable named SYMBOLS. + + If you also set the class variable CASE_SENSITIVE to ``True`` then the reverse conversion function will be + case-sensitive (it is case-insensitive by default). + + Example:: + + class MyCodeConverter(babelfish.LanguageEquivalenceConverter): + CASE_SENSITIVE = True + SYMBOLS = {'fra': 'mycode1', 'eng': 'mycode2'} + + """ + CASE_SENSITIVE = False + + def __init__(self): + self.codes = set() + self.to_symbol = {} + if self.CASE_SENSITIVE: + self.from_symbol = {} + else: + self.from_symbol = CaseInsensitiveDict() + + for alpha3, symbol in self.SYMBOLS.items(): + self.to_symbol[alpha3] = symbol + self.from_symbol[symbol] = (alpha3, None, None) + self.codes.add(symbol) + + def convert(self, alpha3, country=None, script=None): + try: + return self.to_symbol[alpha3] + except KeyError: + raise LanguageConvertError(alpha3, country, script) + + def reverse(self, code): + try: + return self.from_symbol[code] + except KeyError: + raise LanguageReverseError(code) + + +class CountryConverter(object): + """A :class:`CountryConverter` supports converting an alpha2 country code + into a custom code + + .. attribute:: codes + + Set of possible custom codes + + """ + def convert(self, alpha2): + """Convert an alpha2 country code into a custom code + + :param string alpha2: ISO-3166-1 language code + :return: the corresponding custom code + :rtype: string + :raise: :class:`~babelfish.exceptions.CountryConvertError` + + """ + raise NotImplementedError + + +class CountryReverseConverter(CountryConverter): + """A :class:`CountryConverter` able to reverse a custom code into a alpha2 + ISO-3166-1 country code + + """ + def reverse(self, code): + """Reverse a custom code into alpha2 code + + :param string code: custom code to reverse + :return: the corresponding alpha2 ISO-3166-1 country code + :rtype: string + :raise: :class:`~babelfish.exceptions.CountryReverseError` + + """ + raise NotImplementedError + + +class ConverterManager(object): + """Manager for babelfish converters behaving like a dict with lazy loading + + Loading is done in this order: + + * Entry point converters + * Registered converters + * Internal converters + + .. attribute:: entry_point + + The entry point where to look for converters + + .. attribute:: internal_converters + + Internal converters with entry point syntax + + """ + entry_point = '' + internal_converters = [] + + def __init__(self): + #: Registered converters with entry point syntax + self.registered_converters = [] + + #: Loaded converters + self.converters = {} + + def __getitem__(self, name): + """Get a converter, lazy loading it if necessary""" + if name in self.converters: + return self.converters[name] + # for ep in iter_entry_points(self.entry_point): + # if ep.name == name: + # self.converters[ep.name] = ep.load()() + # return self.converters[ep.name] + def parse(str): + import re + match = re.match('(?P<name>\w+) = (?P<module>[a-z0-9.]+):(?P<class>\w+)', str) + print match.groupdict() + return match.groupdict() + for ep in (parse(c) for c in self.registered_converters + self.internal_converters): + if ep.get('name') == name: + cl = getattr(import_module(ep.get('module')), ep.get('class')) + self.converters[ep.get('name')] = cl() + return self.converters[ep.get('name')] + raise KeyError(name) + + def __setitem__(self, name, converter): + """Load a converter""" + self.converters[name] = converter + + def __delitem__(self, name): + """Unload a converter""" + del self.converters[name] + + def __iter__(self): + """Iterator over loaded converters""" + return iter(self.converters) + + def register(self, entry_point): + """Register a converter + + :param string entry_point: converter to register (entry point syntax) + :raise: ValueError if already registered + + """ + if entry_point in self.registered_converters: + raise ValueError('Already registered') + self.registered_converters.insert(0, entry_point) + + def unregister(self, entry_point): + """Unregister a converter + + :param string entry_point: converter to unregister (entry point syntax) + + """ + self.registered_converters.remove(entry_point) + + def __contains__(self, name): + return name in self.converters diff --git a/lib/babelfish/converters/alpha2.py b/lib/babelfish/converters/alpha2.py new file mode 100755 index 00000000..aca973dd --- /dev/null +++ b/lib/babelfish/converters/alpha2.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals +from . import LanguageEquivalenceConverter +from ..language import LANGUAGE_MATRIX + + +class Alpha2Converter(LanguageEquivalenceConverter): + CASE_SENSITIVE = True + SYMBOLS = {} + for iso_language in LANGUAGE_MATRIX: + if iso_language.alpha2: + SYMBOLS[iso_language.alpha3] = iso_language.alpha2 diff --git a/lib/babelfish/converters/alpha3b.py b/lib/babelfish/converters/alpha3b.py new file mode 100755 index 00000000..e90c5f5e --- /dev/null +++ b/lib/babelfish/converters/alpha3b.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals +from . import LanguageEquivalenceConverter +from ..language import LANGUAGE_MATRIX + + +class Alpha3BConverter(LanguageEquivalenceConverter): + CASE_SENSITIVE = True + SYMBOLS = {} + for iso_language in LANGUAGE_MATRIX: + if iso_language.alpha3b: + SYMBOLS[iso_language.alpha3] = iso_language.alpha3b diff --git a/lib/babelfish/converters/alpha3t.py b/lib/babelfish/converters/alpha3t.py new file mode 100755 index 00000000..6de6e4c6 --- /dev/null +++ b/lib/babelfish/converters/alpha3t.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals +from . import LanguageEquivalenceConverter +from ..language import LANGUAGE_MATRIX + + +class Alpha3TConverter(LanguageEquivalenceConverter): + CASE_SENSITIVE = True + SYMBOLS = {} + for iso_language in LANGUAGE_MATRIX: + if iso_language.alpha3t: + SYMBOLS[iso_language.alpha3] = iso_language.alpha3t diff --git a/lib/babelfish/converters/countryname.py b/lib/babelfish/converters/countryname.py new file mode 100755 index 00000000..ff36c878 --- /dev/null +++ b/lib/babelfish/converters/countryname.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals +from . import CountryReverseConverter, CaseInsensitiveDict +from ..country import COUNTRY_MATRIX +from ..exceptions import CountryConvertError, CountryReverseError + + +class CountryNameConverter(CountryReverseConverter): + def __init__(self): + self.codes = set() + self.to_name = {} + self.from_name = CaseInsensitiveDict() + for country in COUNTRY_MATRIX: + self.codes.add(country.name) + self.to_name[country.alpha2] = country.name + self.from_name[country.name] = country.alpha2 + + def convert(self, alpha2): + if alpha2 not in self.to_name: + raise CountryConvertError(alpha2) + return self.to_name[alpha2] + + def reverse(self, name): + if name not in self.from_name: + raise CountryReverseError(name) + return self.from_name[name] diff --git a/lib/babelfish/converters/name.py b/lib/babelfish/converters/name.py new file mode 100755 index 00000000..8dd865b7 --- /dev/null +++ b/lib/babelfish/converters/name.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals +from . import LanguageEquivalenceConverter +from ..language import LANGUAGE_MATRIX + + +class NameConverter(LanguageEquivalenceConverter): + CASE_SENSITIVE = False + SYMBOLS = {} + for iso_language in LANGUAGE_MATRIX: + if iso_language.name: + SYMBOLS[iso_language.alpha3] = iso_language.name diff --git a/lib/babelfish/converters/opensubtitles.py b/lib/babelfish/converters/opensubtitles.py new file mode 100755 index 00000000..101c40fd --- /dev/null +++ b/lib/babelfish/converters/opensubtitles.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals +from . import LanguageReverseConverter, CaseInsensitiveDict +from ..exceptions import LanguageReverseError +from ..language import language_converters + + +class OpenSubtitlesConverter(LanguageReverseConverter): + def __init__(self): + self.alpha3b_converter = language_converters['alpha3b'] + self.alpha2_converter = language_converters['alpha2'] + self.to_opensubtitles = {('por', 'BR'): 'pob', ('gre', None): 'ell', ('srp', None): 'scc', ('srp', 'ME'): 'mne'} + self.from_opensubtitles = CaseInsensitiveDict({'pob': ('por', 'BR'), 'pb': ('por', 'BR'), 'ell': ('ell', None), + 'scc': ('srp', None), 'mne': ('srp', 'ME')}) + self.codes = (self.alpha2_converter.codes | self.alpha3b_converter.codes | set(['pob', 'pb', 'scc', 'mne'])) + + def convert(self, alpha3, country=None, script=None): + alpha3b = self.alpha3b_converter.convert(alpha3, country, script) + if (alpha3b, country) in self.to_opensubtitles: + return self.to_opensubtitles[(alpha3b, country)] + return alpha3b + + def reverse(self, opensubtitles): + if opensubtitles in self.from_opensubtitles: + return self.from_opensubtitles[opensubtitles] + for conv in [self.alpha3b_converter, self.alpha2_converter]: + try: + return conv.reverse(opensubtitles) + except LanguageReverseError: + pass + raise LanguageReverseError(opensubtitles) diff --git a/lib/babelfish/converters/scope.py b/lib/babelfish/converters/scope.py new file mode 100755 index 00000000..73540063 --- /dev/null +++ b/lib/babelfish/converters/scope.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals +from . import LanguageConverter +from ..exceptions import LanguageConvertError +from ..language import LANGUAGE_MATRIX + + +class ScopeConverter(LanguageConverter): + FULLNAME = {'I': 'individual', 'M': 'macrolanguage', 'S': 'special'} + SYMBOLS = {} + for iso_language in LANGUAGE_MATRIX: + SYMBOLS[iso_language.alpha3] = iso_language.scope + codes = set(SYMBOLS.values()) + + def convert(self, alpha3, country=None, script=None): + if self.SYMBOLS[alpha3] in self.FULLNAME: + return self.FULLNAME[self.SYMBOLS[alpha3]] + raise LanguageConvertError(alpha3, country, script) diff --git a/lib/babelfish/converters/type.py b/lib/babelfish/converters/type.py new file mode 100755 index 00000000..3b7378c2 --- /dev/null +++ b/lib/babelfish/converters/type.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals +from . import LanguageConverter +from ..exceptions import LanguageConvertError +from ..language import LANGUAGE_MATRIX + + +class LanguageTypeConverter(LanguageConverter): + FULLNAME = {'A': 'ancient', 'C': 'constructed', 'E': 'extinct', 'H': 'historical', 'L': 'living', 'S': 'special'} + SYMBOLS = {} + for iso_language in LANGUAGE_MATRIX: + SYMBOLS[iso_language.alpha3] = iso_language.type + codes = set(SYMBOLS.values()) + + def convert(self, alpha3, country=None, script=None): + if self.SYMBOLS[alpha3] in self.FULLNAME: + return self.FULLNAME[self.SYMBOLS[alpha3]] + raise LanguageConvertError(alpha3, country, script) diff --git a/lib/babelfish/country.py b/lib/babelfish/country.py new file mode 100755 index 00000000..8e5bf80f --- /dev/null +++ b/lib/babelfish/country.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals +from collections import namedtuple +from functools import partial +# from pkg_resources import resource_stream # @UnresolvedImport +import os +from .converters import ConverterManager +from . import basestr + + +COUNTRIES = {} +COUNTRY_MATRIX = [] + +#: The namedtuple used in the :data:`COUNTRY_MATRIX` +IsoCountry = namedtuple('IsoCountry', ['name', 'alpha2']) + +f = open(os.path.join(os.path.dirname(__file__), 'data/iso-3166-1.txt')) +f.readline() +for l in f: + iso_country = IsoCountry(*l.decode('utf-8').strip().split(';')) + COUNTRIES[iso_country.alpha2] = iso_country.name + COUNTRY_MATRIX.append(iso_country) +f.close() + + +class CountryConverterManager(ConverterManager): + """:class:`~babelfish.converters.ConverterManager` for country converters""" + entry_point = 'babelfish.country_converters' + internal_converters = ['name = babelfish.converters.countryname:CountryNameConverter'] + +country_converters = CountryConverterManager() + + +class CountryMeta(type): + """The :class:`Country` metaclass + + Dynamically redirect :meth:`Country.frommycode` to :meth:`Country.fromcode` with the ``mycode`` `converter` + + """ + def __getattr__(cls, name): + if name.startswith('from'): + return partial(cls.fromcode, converter=name[4:]) + return type.__getattribute__(cls, name) + + +class Country(CountryMeta(str('CountryBase'), (object,), {})): + """A country on Earth + + A country is represented by a 2-letter code from the ISO-3166 standard + + :param string country: 2-letter ISO-3166 country code + + """ + def __init__(self, country): + if country not in COUNTRIES: + raise ValueError('%r is not a valid country' % country) + + #: ISO-3166 2-letter country code + self.alpha2 = country + + @classmethod + def fromcode(cls, code, converter): + """Create a :class:`Country` by its `code` using `converter` to + :meth:`~babelfish.converters.CountryReverseConverter.reverse` it + + :param string code: the code to reverse + :param string converter: name of the :class:`~babelfish.converters.CountryReverseConverter` to use + :return: the corresponding :class:`Country` instance + :rtype: :class:`Country` + + """ + return cls(country_converters[converter].reverse(code)) + + def __getstate__(self): + return self.alpha2 + + def __setstate__(self, state): + self.alpha2 = state + + def __getattr__(self, name): + try: + return country_converters[name].convert(self.alpha2) + except KeyError: + raise AttributeError(name) + + def __hash__(self): + return hash(self.alpha2) + + def __eq__(self, other): + if isinstance(other, basestr): + return str(self) == other + if not isinstance(other, Country): + return False + return self.alpha2 == other.alpha2 + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return '<Country [%s]>' % self + + def __str__(self): + return self.alpha2 diff --git a/lib/babelfish/data/iso-3166-1.txt b/lib/babelfish/data/iso-3166-1.txt new file mode 100755 index 00000000..da105072 --- /dev/null +++ b/lib/babelfish/data/iso-3166-1.txt @@ -0,0 +1,250 @@ +Country Name;ISO 3166-1-alpha-2 code +AFGHANISTAN;AF +ÅLAND ISLANDS;AX +ALBANIA;AL +ALGERIA;DZ +AMERICAN SAMOA;AS +ANDORRA;AD +ANGOLA;AO +ANGUILLA;AI +ANTARCTICA;AQ +ANTIGUA AND BARBUDA;AG +ARGENTINA;AR +ARMENIA;AM +ARUBA;AW +AUSTRALIA;AU +AUSTRIA;AT +AZERBAIJAN;AZ +BAHAMAS;BS +BAHRAIN;BH +BANGLADESH;BD +BARBADOS;BB +BELARUS;BY +BELGIUM;BE +BELIZE;BZ +BENIN;BJ +BERMUDA;BM +BHUTAN;BT +BOLIVIA, PLURINATIONAL STATE OF;BO +BONAIRE, SINT EUSTATIUS AND SABA;BQ +BOSNIA AND HERZEGOVINA;BA +BOTSWANA;BW +BOUVET ISLAND;BV +BRAZIL;BR +BRITISH INDIAN OCEAN TERRITORY;IO +BRUNEI DARUSSALAM;BN +BULGARIA;BG +BURKINA FASO;BF +BURUNDI;BI +CAMBODIA;KH +CAMEROON;CM +CANADA;CA +CAPE VERDE;CV +CAYMAN ISLANDS;KY +CENTRAL AFRICAN REPUBLIC;CF +CHAD;TD +CHILE;CL +CHINA;CN +CHRISTMAS ISLAND;CX +COCOS (KEELING) ISLANDS;CC +COLOMBIA;CO +COMOROS;KM +CONGO;CG +CONGO, THE DEMOCRATIC REPUBLIC OF THE;CD +COOK ISLANDS;CK +COSTA RICA;CR +CÔTE D'IVOIRE;CI +CROATIA;HR +CUBA;CU +CURAÇAO;CW +CYPRUS;CY +CZECH REPUBLIC;CZ +DENMARK;DK +DJIBOUTI;DJ +DOMINICA;DM +DOMINICAN REPUBLIC;DO +ECUADOR;EC +EGYPT;EG +EL SALVADOR;SV +EQUATORIAL GUINEA;GQ +ERITREA;ER +ESTONIA;EE +ETHIOPIA;ET +FALKLAND ISLANDS (MALVINAS);FK +FAROE ISLANDS;FO +FIJI;FJ +FINLAND;FI +FRANCE;FR +FRENCH GUIANA;GF +FRENCH POLYNESIA;PF +FRENCH SOUTHERN TERRITORIES;TF +GABON;GA +GAMBIA;GM +GEORGIA;GE +GERMANY;DE +GHANA;GH +GIBRALTAR;GI +GREECE;GR +GREENLAND;GL +GRENADA;GD +GUADELOUPE;GP +GUAM;GU +GUATEMALA;GT +GUERNSEY;GG +GUINEA;GN +GUINEA-BISSAU;GW +GUYANA;GY +HAITI;HT +HEARD ISLAND AND MCDONALD ISLANDS;HM +HOLY SEE (VATICAN CITY STATE);VA +HONDURAS;HN +HONG KONG;HK +HUNGARY;HU +ICELAND;IS +INDIA;IN +INDONESIA;ID +IRAN, ISLAMIC REPUBLIC OF;IR +IRAQ;IQ +IRELAND;IE +ISLE OF MAN;IM +ISRAEL;IL +ITALY;IT +JAMAICA;JM +JAPAN;JP +JERSEY;JE +JORDAN;JO +KAZAKHSTAN;KZ +KENYA;KE +KIRIBATI;KI +KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF;KP +KOREA, REPUBLIC OF;KR +KUWAIT;KW +KYRGYZSTAN;KG +LAO PEOPLE'S DEMOCRATIC REPUBLIC;LA +LATVIA;LV +LEBANON;LB +LESOTHO;LS +LIBERIA;LR +LIBYA;LY +LIECHTENSTEIN;LI +LITHUANIA;LT +LUXEMBOURG;LU +MACAO;MO +MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF;MK +MADAGASCAR;MG +MALAWI;MW +MALAYSIA;MY +MALDIVES;MV +MALI;ML +MALTA;MT +MARSHALL ISLANDS;MH +MARTINIQUE;MQ +MAURITANIA;MR +MAURITIUS;MU +MAYOTTE;YT +MEXICO;MX +MICRONESIA, FEDERATED STATES OF;FM +MOLDOVA, REPUBLIC OF;MD +MONACO;MC +MONGOLIA;MN +MONTENEGRO;ME +MONTSERRAT;MS +MOROCCO;MA +MOZAMBIQUE;MZ +MYANMAR;MM +NAMIBIA;NA +NAURU;NR +NEPAL;NP +NETHERLANDS;NL +NEW CALEDONIA;NC +NEW ZEALAND;NZ +NICARAGUA;NI +NIGER;NE +NIGERIA;NG +NIUE;NU +NORFOLK ISLAND;NF +NORTHERN MARIANA ISLANDS;MP +NORWAY;NO +OMAN;OM +PAKISTAN;PK +PALAU;PW +PALESTINE, STATE OF;PS +PANAMA;PA +PAPUA NEW GUINEA;PG +PARAGUAY;PY +PERU;PE +PHILIPPINES;PH +PITCAIRN;PN +POLAND;PL +PORTUGAL;PT +PUERTO RICO;PR +QATAR;QA +RÉUNION;RE +ROMANIA;RO +RUSSIAN FEDERATION;RU +RWANDA;RW +SAINT BARTHÉLEMY;BL +SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA;SH +SAINT KITTS AND NEVIS;KN +SAINT LUCIA;LC +SAINT MARTIN (FRENCH PART);MF +SAINT PIERRE AND MIQUELON;PM +SAINT VINCENT AND THE GRENADINES;VC +SAMOA;WS +SAN MARINO;SM +SAO TOME AND PRINCIPE;ST +SAUDI ARABIA;SA +SENEGAL;SN +SERBIA;RS +SEYCHELLES;SC +SIERRA LEONE;SL +SINGAPORE;SG +SINT MAARTEN (DUTCH PART);SX +SLOVAKIA;SK +SLOVENIA;SI +SOLOMON ISLANDS;SB +SOMALIA;SO +SOUTH AFRICA;ZA +SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS;GS +SOUTH SUDAN;SS +SPAIN;ES +SRI LANKA;LK +SUDAN;SD +SURINAME;SR +SVALBARD AND JAN MAYEN;SJ +SWAZILAND;SZ +SWEDEN;SE +SWITZERLAND;CH +SYRIAN ARAB REPUBLIC;SY +TAIWAN, PROVINCE OF CHINA;TW +TAJIKISTAN;TJ +TANZANIA, UNITED REPUBLIC OF;TZ +THAILAND;TH +TIMOR-LESTE;TL +TOGO;TG +TOKELAU;TK +TONGA;TO +TRINIDAD AND TOBAGO;TT +TUNISIA;TN +TURKEY;TR +TURKMENISTAN;TM +TURKS AND CAICOS ISLANDS;TC +TUVALU;TV +UGANDA;UG +UKRAINE;UA +UNITED ARAB EMIRATES;AE +UNITED KINGDOM;GB +UNITED STATES;US +UNITED STATES MINOR OUTLYING ISLANDS;UM +URUGUAY;UY +UZBEKISTAN;UZ +VANUATU;VU +VENEZUELA, BOLIVARIAN REPUBLIC OF;VE +VIET NAM;VN +VIRGIN ISLANDS, BRITISH;VG +VIRGIN ISLANDS, U.S.;VI +WALLIS AND FUTUNA;WF +WESTERN SAHARA;EH +YEMEN;YE +ZAMBIA;ZM +ZIMBABWE;ZW \ No newline at end of file diff --git a/lib/babelfish/data/iso-639-3.tab b/lib/babelfish/data/iso-639-3.tab new file mode 100755 index 00000000..f66d683b --- /dev/null +++ b/lib/babelfish/data/iso-639-3.tab @@ -0,0 +1,7875 @@ +Id Part2B Part2T Part1 Scope Language_Type Ref_Name Comment +aaa I L Ghotuo +aab I L Alumu-Tesu +aac I L Ari +aad I L Amal +aae I L Arbëreshë Albanian +aaf I L Aranadan +aag I L Ambrak +aah I L Abu' Arapesh +aai I L Arifama-Miniafia +aak I L Ankave +aal I L Afade +aam I L Aramanik +aan I L Anambé +aao I L Algerian Saharan Arabic +aap I L Pará Arára +aaq I E Eastern Abnaki +aar aar aar aa I L Afar +aas I L Aasáx +aat I L Arvanitika Albanian +aau I L Abau +aaw I L Solong +aax I L Mandobo Atas +aaz I L Amarasi +aba I L Abé +abb I L Bankon +abc I L Ambala Ayta +abd I L Manide +abe I E Western Abnaki +abf I L Abai Sungai +abg I L Abaga +abh I L Tajiki Arabic +abi I L Abidji +abj I E Aka-Bea +abk abk abk ab I L Abkhazian +abl I L Lampung Nyo +abm I L Abanyom +abn I L Abua +abo I L Abon +abp I L Abellen Ayta +abq I L Abaza +abr I L Abron +abs I L Ambonese Malay +abt I L Ambulas +abu I L Abure +abv I L Baharna Arabic +abw I L Pal +abx I L Inabaknon +aby I L Aneme Wake +abz I L Abui +aca I L Achagua +acb I L Áncá +acd I L Gikyode +ace ace ace I L Achinese +acf I L Saint Lucian Creole French +ach ach ach I L Acoli +aci I E Aka-Cari +ack I E Aka-Kora +acl I E Akar-Bale +acm I L Mesopotamian Arabic +acn I L Achang +acp I L Eastern Acipa +acq I L Ta'izzi-Adeni Arabic +acr I L Achi +acs I E Acroá +act I L Achterhoeks +acu I L Achuar-Shiwiar +acv I L Achumawi +acw I L Hijazi Arabic +acx I L Omani Arabic +acy I L Cypriot Arabic +acz I L Acheron +ada ada ada I L Adangme +adb I L Adabe +add I L Dzodinka +ade I L Adele +adf I L Dhofari Arabic +adg I L Andegerebinha +adh I L Adhola +adi I L Adi +adj I L Adioukrou +adl I L Galo +adn I L Adang +ado I L Abu +adp I L Adap +adq I L Adangbe +adr I L Adonara +ads I L Adamorobe Sign Language +adt I L Adnyamathanha +adu I L Aduge +adw I L Amundava +adx I L Amdo Tibetan +ady ady ady I L Adyghe +adz I L Adzera +aea I E Areba +aeb I L Tunisian Arabic +aec I L Saidi Arabic +aed I L Argentine Sign Language +aee I L Northeast Pashayi +aek I L Haeke +ael I L Ambele +aem I L Arem +aen I L Armenian Sign Language +aeq I L Aer +aer I L Eastern Arrernte +aes I E Alsea +aeu I L Akeu +aew I L Ambakich +aey I L Amele +aez I L Aeka +afb I L Gulf Arabic +afd I L Andai +afe I L Putukwam +afg I L Afghan Sign Language +afh afh afh I C Afrihili +afi I L Akrukay +afk I L Nanubae +afn I L Defaka +afo I L Eloyi +afp I L Tapei +afr afr afr af I L Afrikaans +afs I L Afro-Seminole Creole +aft I L Afitti +afu I L Awutu +afz I L Obokuitai +aga I E Aguano +agb I L Legbo +agc I L Agatu +agd I L Agarabi +age I L Angal +agf I L Arguni +agg I L Angor +agh I L Ngelima +agi I L Agariya +agj I L Argobba +agk I L Isarog Agta +agl I L Fembe +agm I L Angaataha +agn I L Agutaynen +ago I L Tainae +agq I L Aghem +agr I L Aguaruna +ags I L Esimbi +agt I L Central Cagayan Agta +agu I L Aguacateco +agv I L Remontado Dumagat +agw I L Kahua +agx I L Aghul +agy I L Southern Alta +agz I L Mt. Iriga Agta +aha I L Ahanta +ahb I L Axamb +ahg I L Qimant +ahh I L Aghu +ahi I L Tiagbamrin Aizi +ahk I L Akha +ahl I L Igo +ahm I L Mobumrin Aizi +ahn I L Àhàn +aho I E Ahom +ahp I L Aproumu Aizi +ahr I L Ahirani +ahs I L Ashe +aht I L Ahtena +aia I L Arosi +aib I L Ainu (China) +aic I L Ainbai +aid I E Alngith +aie I L Amara +aif I L Agi +aig I L Antigua and Barbuda Creole English +aih I L Ai-Cham +aii I L Assyrian Neo-Aramaic +aij I L Lishanid Noshan +aik I L Ake +ail I L Aimele +aim I L Aimol +ain ain ain I L Ainu (Japan) +aio I L Aiton +aip I L Burumakok +aiq I L Aimaq +air I L Airoran +ais I L Nataoran Amis +ait I E Arikem +aiw I L Aari +aix I L Aighon +aiy I L Ali +aja I L Aja (Sudan) +ajg I L Aja (Benin) +aji I L Ajië +ajn I L Andajin +ajp I L South Levantine Arabic +ajt I L Judeo-Tunisian Arabic +aju I L Judeo-Moroccan Arabic +ajw I E Ajawa +ajz I L Amri Karbi +aka aka aka ak M L Akan +akb I L Batak Angkola +akc I L Mpur +akd I L Ukpet-Ehom +ake I L Akawaio +akf I L Akpa +akg I L Anakalangu +akh I L Angal Heneng +aki I L Aiome +akj I E Aka-Jeru +akk akk akk I A Akkadian +akl I L Aklanon +akm I E Aka-Bo +ako I L Akurio +akp I L Siwu +akq I L Ak +akr I L Araki +aks I L Akaselem +akt I L Akolet +aku I L Akum +akv I L Akhvakh +akw I L Akwa +akx I E Aka-Kede +aky I E Aka-Kol +akz I L Alabama +ala I L Alago +alc I L Qawasqar +ald I L Alladian +ale ale ale I L Aleut +alf I L Alege +alh I L Alawa +ali I L Amaimon +alj I L Alangan +alk I L Alak +all I L Allar +alm I L Amblong +aln I L Gheg Albanian +alo I L Larike-Wakasihu +alp I L Alune +alq I L Algonquin +alr I L Alutor +als I L Tosk Albanian +alt alt alt I L Southern Altai +alu I L 'Are'are +alw I L Alaba-K’abeena +alx I L Amol +aly I L Alyawarr +alz I L Alur +ama I E Amanayé +amb I L Ambo +amc I L Amahuaca +ame I L Yanesha' +amf I L Hamer-Banna +amg I L Amurdak +amh amh amh am I L Amharic +ami I L Amis +amj I L Amdang +amk I L Ambai +aml I L War-Jaintia +amm I L Ama (Papua New Guinea) +amn I L Amanab +amo I L Amo +amp I L Alamblak +amq I L Amahai +amr I L Amarakaeri +ams I L Southern Amami-Oshima +amt I L Amto +amu I L Guerrero Amuzgo +amv I L Ambelau +amw I L Western Neo-Aramaic +amx I L Anmatyerre +amy I L Ami +amz I E Atampaya +ana I E Andaqui +anb I E Andoa +anc I L Ngas +and I L Ansus +ane I L Xârâcùù +anf I L Animere +ang ang ang I H Old English (ca. 450-1100) +anh I L Nend +ani I L Andi +anj I L Anor +ank I L Goemai +anl I L Anu-Hkongso Chin +anm I L Anal +ann I L Obolo +ano I L Andoque +anp anp anp I L Angika +anq I L Jarawa (India) +anr I L Andh +ans I E Anserma +ant I L Antakarinya +anu I L Anuak +anv I L Denya +anw I L Anaang +anx I L Andra-Hus +any I L Anyin +anz I L Anem +aoa I L Angolar +aob I L Abom +aoc I L Pemon +aod I L Andarum +aoe I L Angal Enen +aof I L Bragat +aog I L Angoram +aoh I E Arma +aoi I L Anindilyakwa +aoj I L Mufian +aok I L Arhö +aol I L Alor +aom I L Ömie +aon I L Bumbita Arapesh +aor I E Aore +aos I L Taikat +aot I L A'tong +aou I L A'ou +aox I L Atorada +aoz I L Uab Meto +apb I L Sa'a +apc I L North Levantine Arabic +apd I L Sudanese Arabic +ape I L Bukiyip +apf I L Pahanan Agta +apg I L Ampanang +aph I L Athpariya +api I L Apiaká +apj I L Jicarilla Apache +apk I L Kiowa Apache +apl I L Lipan Apache +apm I L Mescalero-Chiricahua Apache +apn I L Apinayé +apo I L Ambul +app I L Apma +apq I L A-Pucikwar +apr I L Arop-Lokep +aps I L Arop-Sissano +apt I L Apatani +apu I L Apurinã +apv I E Alapmunte +apw I L Western Apache +apx I L Aputai +apy I L Apalaí +apz I L Safeyoka +aqc I L Archi +aqd I L Ampari Dogon +aqg I L Arigidi +aqm I L Atohwaim +aqn I L Northern Alta +aqp I E Atakapa +aqr I L Arhâ +aqz I L Akuntsu +ara ara ara ar M L Arabic +arb I L Standard Arabic +arc arc arc I A Official Aramaic (700-300 BCE) +ard I E Arabana +are I L Western Arrarnta +arg arg arg an I L Aragonese +arh I L Arhuaco +ari I L Arikara +arj I E Arapaso +ark I L Arikapú +arl I L Arabela +arn arn arn I L Mapudungun +aro I L Araona +arp arp arp I L Arapaho +arq I L Algerian Arabic +arr I L Karo (Brazil) +ars I L Najdi Arabic +aru I E Aruá (Amazonas State) +arv I L Arbore +arw arw arw I L Arawak +arx I L Aruá (Rodonia State) +ary I L Moroccan Arabic +arz I L Egyptian Arabic +asa I L Asu (Tanzania) +asb I L Assiniboine +asc I L Casuarina Coast Asmat +asd I L Asas +ase I L American Sign Language +asf I L Australian Sign Language +asg I L Cishingini +ash I E Abishira +asi I L Buruwai +asj I L Sari +ask I L Ashkun +asl I L Asilulu +asm asm asm as I L Assamese +asn I L Xingú Asuriní +aso I L Dano +asp I L Algerian Sign Language +asq I L Austrian Sign Language +asr I L Asuri +ass I L Ipulo +ast ast ast I L Asturian +asu I L Tocantins Asurini +asv I L Asoa +asw I L Australian Aborigines Sign Language +asx I L Muratayak +asy I L Yaosakor Asmat +asz I L As +ata I L Pele-Ata +atb I L Zaiwa +atc I E Atsahuaca +atd I L Ata Manobo +ate I L Atemble +atg I L Ivbie North-Okpela-Arhe +ati I L Attié +atj I L Atikamekw +atk I L Ati +atl I L Mt. Iraya Agta +atm I L Ata +atn I L Ashtiani +ato I L Atong +atp I L Pudtol Atta +atq I L Aralle-Tabulahan +atr I L Waimiri-Atroari +ats I L Gros Ventre +att I L Pamplona Atta +atu I L Reel +atv I L Northern Altai +atw I L Atsugewi +atx I L Arutani +aty I L Aneityum +atz I L Arta +aua I L Asumboa +aub I L Alugu +auc I L Waorani +aud I L Anuta +aue I L =/Kx'au//'ein +aug I L Aguna +auh I L Aushi +aui I L Anuki +auj I L Awjilah +auk I L Heyo +aul I L Aulua +aum I L Asu (Nigeria) +aun I L Molmo One +auo I E Auyokawa +aup I L Makayam +auq I L Anus +aur I L Aruek +aut I L Austral +auu I L Auye +auw I L Awyi +aux I E Aurá +auy I L Awiyaana +auz I L Uzbeki Arabic +ava ava ava av I L Avaric +avb I L Avau +avd I L Alviri-Vidari +ave ave ave ae I A Avestan +avi I L Avikam +avk I C Kotava +avl I L Eastern Egyptian Bedawi Arabic +avm I E Angkamuthi +avn I L Avatime +avo I E Agavotaguerra +avs I E Aushiri +avt I L Au +avu I L Avokaya +avv I L Avá-Canoeiro +awa awa awa I L Awadhi +awb I L Awa (Papua New Guinea) +awc I L Cicipu +awe I L Awetí +awg I E Anguthimri +awh I L Awbono +awi I L Aekyom +awk I E Awabakal +awm I L Arawum +awn I L Awngi +awo I L Awak +awr I L Awera +aws I L South Awyu +awt I L Araweté +awu I L Central Awyu +awv I L Jair Awyu +aww I L Awun +awx I L Awara +awy I L Edera Awyu +axb I E Abipon +axe I E Ayerrerenge +axg I E Mato Grosso Arára +axk I L Yaka (Central African Republic) +axl I E Lower Southern Aranda +axm I H Middle Armenian +axx I L Xârâgurè +aya I L Awar +ayb I L Ayizo Gbe +ayc I L Southern Aymara +ayd I E Ayabadhu +aye I L Ayere +ayg I L Ginyanga +ayh I L Hadrami Arabic +ayi I L Leyigha +ayk I L Akuku +ayl I L Libyan Arabic +aym aym aym ay M L Aymara +ayn I L Sanaani Arabic +ayo I L Ayoreo +ayp I L North Mesopotamian Arabic +ayq I L Ayi (Papua New Guinea) +ayr I L Central Aymara +ays I L Sorsogon Ayta +ayt I L Magbukun Ayta +ayu I L Ayu +ayy I E Tayabas Ayta +ayz I L Mai Brat +aza I L Azha +azb I L South Azerbaijani +azd I L Eastern Durango Nahuatl +aze aze aze az M L Azerbaijani +azg I L San Pedro Amuzgos Amuzgo +azj I L North Azerbaijani +azm I L Ipalapa Amuzgo +azn I L Western Durango Nahuatl +azo I L Awing +azt I L Faire Atta +azz I L Highland Puebla Nahuatl +baa I L Babatana +bab I L Bainouk-Gunyuño +bac I L Badui +bae I E Baré +baf I L Nubaca +bag I L Tuki +bah I L Bahamas Creole English +baj I L Barakai +bak bak bak ba I L Bashkir +bal bal bal M L Baluchi +bam bam bam bm I L Bambara +ban ban ban I L Balinese +bao I L Waimaha +bap I L Bantawa +bar I L Bavarian +bas bas bas I L Basa (Cameroon) +bau I L Bada (Nigeria) +bav I L Vengo +baw I L Bambili-Bambui +bax I L Bamun +bay I L Batuley +bba I L Baatonum +bbb I L Barai +bbc I L Batak Toba +bbd I L Bau +bbe I L Bangba +bbf I L Baibai +bbg I L Barama +bbh I L Bugan +bbi I L Barombi +bbj I L Ghomálá' +bbk I L Babanki +bbl I L Bats +bbm I L Babango +bbn I L Uneapa +bbo I L Northern Bobo Madaré +bbp I L West Central Banda +bbq I L Bamali +bbr I L Girawa +bbs I L Bakpinka +bbt I L Mburku +bbu I L Kulung (Nigeria) +bbv I L Karnai +bbw I L Baba +bbx I L Bubia +bby I L Befang +bbz I L Babalia Creole Arabic +bca I L Central Bai +bcb I L Bainouk-Samik +bcc I L Southern Balochi +bcd I L North Babar +bce I L Bamenyam +bcf I L Bamu +bcg I L Baga Binari +bch I L Bariai +bci I L Baoulé +bcj I L Bardi +bck I L Bunaba +bcl I L Central Bikol +bcm I L Bannoni +bcn I L Bali (Nigeria) +bco I L Kaluli +bcp I L Bali (Democratic Republic of Congo) +bcq I L Bench +bcr I L Babine +bcs I L Kohumono +bct I L Bendi +bcu I L Awad Bing +bcv I L Shoo-Minda-Nye +bcw I L Bana +bcy I L Bacama +bcz I L Bainouk-Gunyaamolo +bda I L Bayot +bdb I L Basap +bdc I L Emberá-Baudó +bdd I L Bunama +bde I L Bade +bdf I L Biage +bdg I L Bonggi +bdh I L Baka (Sudan) +bdi I L Burun +bdj I L Bai +bdk I L Budukh +bdl I L Indonesian Bajau +bdm I L Buduma +bdn I L Baldemu +bdo I L Morom +bdp I L Bende +bdq I L Bahnar +bdr I L West Coast Bajau +bds I L Burunge +bdt I L Bokoto +bdu I L Oroko +bdv I L Bodo Parja +bdw I L Baham +bdx I L Budong-Budong +bdy I L Bandjalang +bdz I L Badeshi +bea I L Beaver +beb I L Bebele +bec I L Iceve-Maci +bed I L Bedoanas +bee I L Byangsi +bef I L Benabena +beg I L Belait +beh I L Biali +bei I L Bekati' +bej bej bej I L Beja +bek I L Bebeli +bel bel bel be I L Belarusian +bem bem bem I L Bemba (Zambia) +ben ben ben bn I L Bengali +beo I L Beami +bep I L Besoa +beq I L Beembe +bes I L Besme +bet I L Guiberoua Béte +beu I L Blagar +bev I L Daloa Bété +bew I L Betawi +bex I L Jur Modo +bey I L Beli (Papua New Guinea) +bez I L Bena (Tanzania) +bfa I L Bari +bfb I L Pauri Bareli +bfc I L Northern Bai +bfd I L Bafut +bfe I L Betaf +bff I L Bofi +bfg I L Busang Kayan +bfh I L Blafe +bfi I L British Sign Language +bfj I L Bafanji +bfk I L Ban Khor Sign Language +bfl I L Banda-Ndélé +bfm I L Mmen +bfn I L Bunak +bfo I L Malba Birifor +bfp I L Beba +bfq I L Badaga +bfr I L Bazigar +bfs I L Southern Bai +bft I L Balti +bfu I L Gahri +bfw I L Bondo +bfx I L Bantayanon +bfy I L Bagheli +bfz I L Mahasu Pahari +bga I L Gwamhi-Wuri +bgb I L Bobongko +bgc I L Haryanvi +bgd I L Rathwi Bareli +bge I L Bauria +bgf I L Bangandu +bgg I L Bugun +bgi I L Giangan +bgj I L Bangolan +bgk I L Bit +bgl I L Bo (Laos) +bgm I L Baga Mboteni +bgn I L Western Balochi +bgo I L Baga Koga +bgp I L Eastern Balochi +bgq I L Bagri +bgr I L Bawm Chin +bgs I L Tagabawa +bgt I L Bughotu +bgu I L Mbongno +bgv I L Warkay-Bipim +bgw I L Bhatri +bgx I L Balkan Gagauz Turkish +bgy I L Benggoi +bgz I L Banggai +bha I L Bharia +bhb I L Bhili +bhc I L Biga +bhd I L Bhadrawahi +bhe I L Bhaya +bhf I L Odiai +bhg I L Binandere +bhh I L Bukharic +bhi I L Bhilali +bhj I L Bahing +bhl I L Bimin +bhm I L Bathari +bhn I L Bohtan Neo-Aramaic +bho bho bho I L Bhojpuri +bhp I L Bima +bhq I L Tukang Besi South +bhr I L Bara Malagasy +bhs I L Buwal +bht I L Bhattiyali +bhu I L Bhunjia +bhv I L Bahau +bhw I L Biak +bhx I L Bhalay +bhy I L Bhele +bhz I L Bada (Indonesia) +bia I L Badimaya +bib I L Bissa +bic I L Bikaru +bid I L Bidiyo +bie I L Bepour +bif I L Biafada +big I L Biangai +bij I L Vaghat-Ya-Bijim-Legeri +bik bik bik M L Bikol +bil I L Bile +bim I L Bimoba +bin bin bin I L Bini +bio I L Nai +bip I L Bila +biq I L Bipi +bir I L Bisorio +bis bis bis bi I L Bislama +bit I L Berinomo +biu I L Biete +biv I L Southern Birifor +biw I L Kol (Cameroon) +bix I L Bijori +biy I L Birhor +biz I L Baloi +bja I L Budza +bjb I E Banggarla +bjc I L Bariji +bje I L Biao-Jiao Mien +bjf I L Barzani Jewish Neo-Aramaic +bjg I L Bidyogo +bjh I L Bahinemo +bji I L Burji +bjj I L Kanauji +bjk I L Barok +bjl I L Bulu (Papua New Guinea) +bjm I L Bajelani +bjn I L Banjar +bjo I L Mid-Southern Banda +bjp I L Fanamaket +bjr I L Binumarien +bjs I L Bajan +bjt I L Balanta-Ganja +bju I L Busuu +bjv I L Bedjond +bjw I L Bakwé +bjx I L Banao Itneg +bjy I E Bayali +bjz I L Baruga +bka I L Kyak +bkc I L Baka (Cameroon) +bkd I L Binukid +bkf I L Beeke +bkg I L Buraka +bkh I L Bakoko +bki I L Baki +bkj I L Pande +bkk I L Brokskat +bkl I L Berik +bkm I L Kom (Cameroon) +bkn I L Bukitan +bko I L Kwa' +bkp I L Boko (Democratic Republic of Congo) +bkq I L Bakairí +bkr I L Bakumpai +bks I L Northern Sorsoganon +bkt I L Boloki +bku I L Buhid +bkv I L Bekwarra +bkw I L Bekwel +bkx I L Baikeno +bky I L Bokyi +bkz I L Bungku +bla bla bla I L Siksika +blb I L Bilua +blc I L Bella Coola +bld I L Bolango +ble I L Balanta-Kentohe +blf I L Buol +blg I L Balau +blh I L Kuwaa +bli I L Bolia +blj I L Bolongan +blk I L Pa'o Karen +bll I E Biloxi +blm I L Beli (Sudan) +bln I L Southern Catanduanes Bikol +blo I L Anii +blp I L Blablanga +blq I L Baluan-Pam +blr I L Blang +bls I L Balaesang +blt I L Tai Dam +blv I L Bolo +blw I L Balangao +blx I L Mag-Indi Ayta +bly I L Notre +blz I L Balantak +bma I L Lame +bmb I L Bembe +bmc I L Biem +bmd I L Baga Manduri +bme I L Limassa +bmf I L Bom +bmg I L Bamwe +bmh I L Kein +bmi I L Bagirmi +bmj I L Bote-Majhi +bmk I L Ghayavi +bml I L Bomboli +bmm I L Northern Betsimisaraka Malagasy +bmn I E Bina (Papua New Guinea) +bmo I L Bambalang +bmp I L Bulgebi +bmq I L Bomu +bmr I L Muinane +bms I L Bilma Kanuri +bmt I L Biao Mon +bmu I L Somba-Siawari +bmv I L Bum +bmw I L Bomwali +bmx I L Baimak +bmy I L Bemba (Democratic Republic of Congo) +bmz I L Baramu +bna I L Bonerate +bnb I L Bookan +bnc M L Bontok +bnd I L Banda (Indonesia) +bne I L Bintauna +bnf I L Masiwang +bng I L Benga +bni I L Bangi +bnj I L Eastern Tawbuid +bnk I L Bierebo +bnl I L Boon +bnm I L Batanga +bnn I L Bunun +bno I L Bantoanon +bnp I L Bola +bnq I L Bantik +bnr I L Butmas-Tur +bns I L Bundeli +bnu I L Bentong +bnv I L Bonerif +bnw I L Bisis +bnx I L Bangubangu +bny I L Bintulu +bnz I L Beezen +boa I L Bora +bob I L Aweer +bod tib bod bo I L Tibetan +boe I L Mundabli +bof I L Bolon +bog I L Bamako Sign Language +boh I L Boma +boi I E Barbareño +boj I L Anjam +bok I L Bonjo +bol I L Bole +bom I L Berom +bon I L Bine +boo I L Tiemacèwè Bozo +bop I L Bonkiman +boq I L Bogaya +bor I L Borôro +bos bos bos bs I L Bosnian +bot I L Bongo +bou I L Bondei +bov I L Tuwuli +bow I E Rema +box I L Buamu +boy I L Bodo (Central African Republic) +boz I L Tiéyaxo Bozo +bpa I L Daakaka +bpb I E Barbacoas +bpd I L Banda-Banda +bpg I L Bonggo +bph I L Botlikh +bpi I L Bagupi +bpj I L Binji +bpk I L Orowe +bpl I L Broome Pearling Lugger Pidgin +bpm I L Biyom +bpn I L Dzao Min +bpo I L Anasi +bpp I L Kaure +bpq I L Banda Malay +bpr I L Koronadal Blaan +bps I L Sarangani Blaan +bpt I E Barrow Point +bpu I L Bongu +bpv I L Bian Marind +bpw I L Bo (Papua New Guinea) +bpx I L Palya Bareli +bpy I L Bishnupriya +bpz I L Bilba +bqa I L Tchumbuli +bqb I L Bagusa +bqc I L Boko (Benin) +bqd I L Bung +bqf I E Baga Kaloum +bqg I L Bago-Kusuntu +bqh I L Baima +bqi I L Bakhtiari +bqj I L Bandial +bqk I L Banda-Mbrès +bql I L Bilakura +bqm I L Wumboko +bqn I L Bulgarian Sign Language +bqo I L Balo +bqp I L Busa +bqq I L Biritai +bqr I L Burusu +bqs I L Bosngun +bqt I L Bamukumbit +bqu I L Boguru +bqv I L Koro Wachi +bqw I L Buru (Nigeria) +bqx I L Baangi +bqy I L Bengkala Sign Language +bqz I L Bakaka +bra bra bra I L Braj +brb I L Lave +brc I E Berbice Creole Dutch +brd I L Baraamu +bre bre bre br I L Breton +brf I L Bera +brg I L Baure +brh I L Brahui +bri I L Mokpwe +brj I L Bieria +brk I E Birked +brl I L Birwa +brm I L Barambu +brn I L Boruca +bro I L Brokkat +brp I L Barapasi +brq I L Breri +brr I L Birao +brs I L Baras +brt I L Bitare +bru I L Eastern Bru +brv I L Western Bru +brw I L Bellari +brx I L Bodo (India) +bry I L Burui +brz I L Bilbil +bsa I L Abinomn +bsb I L Brunei Bisaya +bsc I L Bassari +bse I L Wushi +bsf I L Bauchi +bsg I L Bashkardi +bsh I L Kati +bsi I L Bassossi +bsj I L Bangwinji +bsk I L Burushaski +bsl I E Basa-Gumna +bsm I L Busami +bsn I L Barasana-Eduria +bso I L Buso +bsp I L Baga Sitemu +bsq I L Bassa +bsr I L Bassa-Kontagora +bss I L Akoose +bst I L Basketo +bsu I L Bahonsuai +bsv I E Baga Sobané +bsw I L Baiso +bsx I L Yangkam +bsy I L Sabah Bisaya +bta I L Bata +btc I L Bati (Cameroon) +btd I L Batak Dairi +bte I E Gamo-Ningi +btf I L Birgit +btg I L Gagnoa Bété +bth I L Biatah Bidayuh +bti I L Burate +btj I L Bacanese Malay +btl I L Bhatola +btm I L Batak Mandailing +btn I L Ratagnon +bto I L Rinconada Bikol +btp I L Budibud +btq I L Batek +btr I L Baetora +bts I L Batak Simalungun +btt I L Bete-Bendi +btu I L Batu +btv I L Bateri +btw I L Butuanon +btx I L Batak Karo +bty I L Bobot +btz I L Batak Alas-Kluet +bua bua bua M L Buriat +bub I L Bua +buc I L Bushi +bud I L Ntcham +bue I E Beothuk +buf I L Bushoong +bug bug bug I L Buginese +buh I L Younuo Bunu +bui I L Bongili +buj I L Basa-Gurmana +buk I L Bugawac +bul bul bul bg I L Bulgarian +bum I L Bulu (Cameroon) +bun I L Sherbro +buo I L Terei +bup I L Busoa +buq I L Brem +bus I L Bokobaru +but I L Bungain +buu I L Budu +buv I L Bun +buw I L Bubi +bux I L Boghom +buy I L Bullom So +buz I L Bukwen +bva I L Barein +bvb I L Bube +bvc I L Baelelea +bvd I L Baeggu +bve I L Berau Malay +bvf I L Boor +bvg I L Bonkeng +bvh I L Bure +bvi I L Belanda Viri +bvj I L Baan +bvk I L Bukat +bvl I L Bolivian Sign Language +bvm I L Bamunka +bvn I L Buna +bvo I L Bolgo +bvp I L Bumang +bvq I L Birri +bvr I L Burarra +bvt I L Bati (Indonesia) +bvu I L Bukit Malay +bvv I E Baniva +bvw I L Boga +bvx I L Dibole +bvy I L Baybayanon +bvz I L Bauzi +bwa I L Bwatoo +bwb I L Namosi-Naitasiri-Serua +bwc I L Bwile +bwd I L Bwaidoka +bwe I L Bwe Karen +bwf I L Boselewa +bwg I L Barwe +bwh I L Bishuo +bwi I L Baniwa +bwj I L Láá Láá Bwamu +bwk I L Bauwaki +bwl I L Bwela +bwm I L Biwat +bwn I L Wunai Bunu +bwo I L Boro (Ethiopia) +bwp I L Mandobo Bawah +bwq I L Southern Bobo Madaré +bwr I L Bura-Pabir +bws I L Bomboma +bwt I L Bafaw-Balong +bwu I L Buli (Ghana) +bww I L Bwa +bwx I L Bu-Nao Bunu +bwy I L Cwi Bwamu +bwz I L Bwisi +bxa I L Tairaha +bxb I L Belanda Bor +bxc I L Molengue +bxd I L Pela +bxe I L Birale +bxf I L Bilur +bxg I L Bangala +bxh I L Buhutu +bxi I E Pirlatapa +bxj I L Bayungu +bxk I L Bukusu +bxl I L Jalkunan +bxm I L Mongolia Buriat +bxn I L Burduna +bxo I L Barikanchi +bxp I L Bebil +bxq I L Beele +bxr I L Russia Buriat +bxs I L Busam +bxu I L China Buriat +bxv I L Berakou +bxw I L Bankagooma +bxx I L Borna (Democratic Republic of Congo) +bxz I L Binahari +bya I L Batak +byb I L Bikya +byc I L Ubaghara +byd I L Benyadu' +bye I L Pouye +byf I L Bete +byg I E Baygo +byh I L Bhujel +byi I L Buyu +byj I L Bina (Nigeria) +byk I L Biao +byl I L Bayono +bym I L Bidyara +byn byn byn I L Bilin +byo I L Biyo +byp I L Bumaji +byq I E Basay +byr I L Baruya +bys I L Burak +byt I E Berti +byv I L Medumba +byw I L Belhariya +byx I L Qaqet +byy I L Buya +byz I L Banaro +bza I L Bandi +bzb I L Andio +bzc I L Southern Betsimisaraka Malagasy +bzd I L Bribri +bze I L Jenaama Bozo +bzf I L Boikin +bzg I L Babuza +bzh I L Mapos Buang +bzi I L Bisu +bzj I L Belize Kriol English +bzk I L Nicaragua Creole English +bzl I L Boano (Sulawesi) +bzm I L Bolondo +bzn I L Boano (Maluku) +bzo I L Bozaba +bzp I L Kemberano +bzq I L Buli (Indonesia) +bzr I E Biri +bzs I L Brazilian Sign Language +bzt I C Brithenig +bzu I L Burmeso +bzv I L Naami +bzw I L Basa (Nigeria) +bzx I L Kɛlɛngaxo Bozo +bzy I L Obanliku +bzz I L Evant +caa I L Chortí +cab I L Garifuna +cac I L Chuj +cad cad cad I L Caddo +cae I L Lehar +caf I L Southern Carrier +cag I L Nivaclé +cah I L Cahuarano +caj I E Chané +cak I L Kaqchikel +cal I L Carolinian +cam I L Cemuhî +can I L Chambri +cao I L Chácobo +cap I L Chipaya +caq I L Car Nicobarese +car car car I L Galibi Carib +cas I L Tsimané +cat cat cat ca I L Catalan +cav I L Cavineña +caw I L Callawalla +cax I L Chiquitano +cay I L Cayuga +caz I E Canichana +cbb I L Cabiyarí +cbc I L Carapana +cbd I L Carijona +cbe I E Chipiajes +cbg I L Chimila +cbh I E Cagua +cbi I L Chachi +cbj I L Ede Cabe +cbk I L Chavacano +cbl I L Bualkhaw Chin +cbn I L Nyahkur +cbo I L Izora +cbr I L Cashibo-Cacataibo +cbs I L Cashinahua +cbt I L Chayahuita +cbu I L Candoshi-Shapra +cbv I L Cacua +cbw I L Kinabalian +cby I L Carabayo +cca I E Cauca +ccc I L Chamicuro +ccd I L Cafundo Creole +cce I L Chopi +ccg I L Samba Daka +cch I L Atsam +ccj I L Kasanga +ccl I L Cutchi-Swahili +ccm I L Malaccan Creole Malay +cco I L Comaltepec Chinantec +ccp I L Chakma +ccr I E Cacaopera +cda I L Choni +cde I L Chenchu +cdf I L Chiru +cdg I L Chamari +cdh I L Chambeali +cdi I L Chodri +cdj I L Churahi +cdm I L Chepang +cdn I L Chaudangsi +cdo I L Min Dong Chinese +cdr I L Cinda-Regi-Tiyal +cds I L Chadian Sign Language +cdy I L Chadong +cdz I L Koda +cea I E Lower Chehalis +ceb ceb ceb I L Cebuano +ceg I L Chamacoco +cek I L Eastern Khumi Chin +cen I L Cen +ces cze ces cs I L Czech +cet I L Centúúm +cfa I L Dijim-Bwilim +cfd I L Cara +cfg I L Como Karim +cfm I L Falam Chin +cga I L Changriwa +cgc I L Kagayanen +cgg I L Chiga +cgk I L Chocangacakha +cha cha cha ch I L Chamorro +chb chb chb I E Chibcha +chc I E Catawba +chd I L Highland Oaxaca Chontal +che che che ce I L Chechen +chf I L Tabasco Chontal +chg chg chg I E Chagatai +chh I L Chinook +chj I L Ojitlán Chinantec +chk chk chk I L Chuukese +chl I L Cahuilla +chm chm chm M L Mari (Russia) +chn chn chn I L Chinook jargon +cho cho cho I L Choctaw +chp chp chp I L Chipewyan +chq I L Quiotepec Chinantec +chr chr chr I L Cherokee +cht I E Cholón +chu chu chu cu I A Church Slavic +chv chv chv cv I L Chuvash +chw I L Chuwabu +chx I L Chantyal +chy chy chy I L Cheyenne +chz I L Ozumacín Chinantec +cia I L Cia-Cia +cib I L Ci Gbe +cic I L Chickasaw +cid I E Chimariko +cie I L Cineni +cih I L Chinali +cik I L Chitkuli Kinnauri +cim I L Cimbrian +cin I L Cinta Larga +cip I L Chiapanec +cir I L Tiri +ciw I L Chippewa +ciy I L Chaima +cja I L Western Cham +cje I L Chru +cjh I E Upper Chehalis +cji I L Chamalal +cjk I L Chokwe +cjm I L Eastern Cham +cjn I L Chenapian +cjo I L Ashéninka Pajonal +cjp I L Cabécar +cjs I L Shor +cjv I L Chuave +cjy I L Jinyu Chinese +ckb I L Central Kurdish +ckh I L Chak +ckl I L Cibak +ckn I L Kaang Chin +cko I L Anufo +ckq I L Kajakse +ckr I L Kairak +cks I L Tayo +ckt I L Chukot +cku I L Koasati +ckv I L Kavalan +ckx I L Caka +cky I L Cakfem-Mushere +ckz I L Cakchiquel-Quiché Mixed Language +cla I L Ron +clc I L Chilcotin +cld I L Chaldean Neo-Aramaic +cle I L Lealao Chinantec +clh I L Chilisso +cli I L Chakali +clj I L Laitu Chin +clk I L Idu-Mishmi +cll I L Chala +clm I L Clallam +clo I L Lowland Oaxaca Chontal +clt I L Lautu Chin +clu I L Caluyanun +clw I L Chulym +cly I L Eastern Highland Chatino +cma I L Maa +cme I L Cerma +cmg I H Classical Mongolian +cmi I L Emberá-Chamí +cml I L Campalagian +cmm I E Michigamea +cmn I L Mandarin Chinese +cmo I L Central Mnong +cmr I L Mro-Khimi Chin +cms I A Messapic +cmt I L Camtho +cna I L Changthang +cnb I L Chinbon Chin +cnc I L Côông +cng I L Northern Qiang +cnh I L Haka Chin +cni I L Asháninka +cnk I L Khumi Chin +cnl I L Lalana Chinantec +cno I L Con +cns I L Central Asmat +cnt I L Tepetotutla Chinantec +cnu I L Chenoua +cnw I L Ngawn Chin +cnx I H Middle Cornish +coa I L Cocos Islands Malay +cob I E Chicomuceltec +coc I L Cocopa +cod I L Cocama-Cocamilla +coe I L Koreguaje +cof I L Colorado +cog I L Chong +coh I L Chonyi-Dzihana-Kauma +coj I E Cochimi +cok I L Santa Teresa Cora +col I L Columbia-Wenatchi +com I L Comanche +con I L Cofán +coo I L Comox +cop cop cop I E Coptic +coq I E Coquille +cor cor cor kw I L Cornish +cos cos cos co I L Corsican +cot I L Caquinte +cou I L Wamey +cov I L Cao Miao +cow I E Cowlitz +cox I L Nanti +coy I E Coyaima +coz I L Chochotec +cpa I L Palantla Chinantec +cpb I L Ucayali-Yurúa Ashéninka +cpc I L Ajyíninka Apurucayali +cpg I E Cappadocian Greek +cpi I L Chinese Pidgin English +cpn I L Cherepon +cpo I L Kpeego +cps I L Capiznon +cpu I L Pichis Ashéninka +cpx I L Pu-Xian Chinese +cpy I L South Ucayali Ashéninka +cqd I L Chuanqiandian Cluster Miao +cqu I L Chilean Quechua +cra I L Chara +crb I E Island Carib +crc I L Lonwolwol +crd I L Coeur d'Alene +cre cre cre cr M L Cree +crf I E Caramanta +crg I L Michif +crh crh crh I L Crimean Tatar +cri I L Sãotomense +crj I L Southern East Cree +crk I L Plains Cree +crl I L Northern East Cree +crm I L Moose Cree +crn I L El Nayar Cora +cro I L Crow +crq I L Iyo'wujwa Chorote +crr I E Carolina Algonquian +crs I L Seselwa Creole French +crt I L Iyojwa'ja Chorote +crv I L Chaura +crw I L Chrau +crx I L Carrier +cry I L Cori +crz I E Cruzeño +csa I L Chiltepec Chinantec +csb csb csb I L Kashubian +csc I L Catalan Sign Language +csd I L Chiangmai Sign Language +cse I L Czech Sign Language +csf I L Cuba Sign Language +csg I L Chilean Sign Language +csh I L Asho Chin +csi I E Coast Miwok +csj I L Songlai Chin +csk I L Jola-Kasa +csl I L Chinese Sign Language +csm I L Central Sierra Miwok +csn I L Colombian Sign Language +cso I L Sochiapam Chinantec +csq I L Croatia Sign Language +csr I L Costa Rican Sign Language +css I E Southern Ohlone +cst I L Northern Ohlone +csv I L Sumtu Chin +csw I L Swampy Cree +csy I L Siyin Chin +csz I L Coos +cta I L Tataltepec Chatino +ctc I L Chetco +ctd I L Tedim Chin +cte I L Tepinapa Chinantec +ctg I L Chittagonian +cth I L Thaiphum Chin +ctl I L Tlacoatzintepec Chinantec +ctm I E Chitimacha +ctn I L Chhintange +cto I L Emberá-Catío +ctp I L Western Highland Chatino +cts I L Northern Catanduanes Bikol +ctt I L Wayanad Chetti +ctu I L Chol +ctz I L Zacatepec Chatino +cua I L Cua +cub I L Cubeo +cuc I L Usila Chinantec +cug I L Cung +cuh I L Chuka +cui I L Cuiba +cuj I L Mashco Piro +cuk I L San Blas Kuna +cul I L Culina +cum I E Cumeral +cuo I E Cumanagoto +cup I E Cupeño +cuq I L Cun +cur I L Chhulung +cut I L Teutila Cuicatec +cuu I L Tai Ya +cuv I L Cuvok +cuw I L Chukwa +cux I L Tepeuxila Cuicatec +cvg I L Chug +cvn I L Valle Nacional Chinantec +cwa I L Kabwa +cwb I L Maindo +cwd I L Woods Cree +cwe I L Kwere +cwg I L Chewong +cwt I L Kuwaataay +cya I L Nopala Chatino +cyb I E Cayubaba +cym wel cym cy I L Welsh +cyo I L Cuyonon +czh I L Huizhou Chinese +czk I E Knaanic +czn I L Zenzontepec Chatino +czo I L Min Zhong Chinese +czt I L Zotung Chin +daa I L Dangaléat +dac I L Dambi +dad I L Marik +dae I L Duupa +dag I L Dagbani +dah I L Gwahatike +dai I L Day +daj I L Dar Fur Daju +dak dak dak I L Dakota +dal I L Dahalo +dam I L Damakawa +dan dan dan da I L Danish +dao I L Daai Chin +daq I L Dandami Maria +dar dar dar I L Dargwa +das I L Daho-Doo +dau I L Dar Sila Daju +dav I L Taita +daw I L Davawenyo +dax I L Dayi +daz I L Dao +dba I L Bangime +dbb I L Deno +dbd I L Dadiya +dbe I L Dabe +dbf I L Edopi +dbg I L Dogul Dom Dogon +dbi I L Doka +dbj I L Ida'an +dbl I L Dyirbal +dbm I L Duguri +dbn I L Duriankere +dbo I L Dulbu +dbp I L Duwai +dbq I L Daba +dbr I L Dabarre +dbt I L Ben Tey Dogon +dbu I L Bondum Dom Dogon +dbv I L Dungu +dbw I L Bankan Tey Dogon +dby I L Dibiyaso +dcc I L Deccan +dcr I E Negerhollands +dda I E Dadi Dadi +ddd I L Dongotono +dde I L Doondo +ddg I L Fataluku +ddi I L West Goodenough +ddj I L Jaru +ddn I L Dendi (Benin) +ddo I L Dido +ddr I E Dhudhuroa +dds I L Donno So Dogon +ddw I L Dawera-Daweloor +dec I L Dagik +ded I L Dedua +dee I L Dewoin +def I L Dezfuli +deg I L Degema +deh I L Dehwari +dei I L Demisa +dek I L Dek +del del del M L Delaware +dem I L Dem +den den den M L Slave (Athapascan) +dep I E Pidgin Delaware +deq I L Dendi (Central African Republic) +der I L Deori +des I L Desano +deu ger deu de I L German +dev I L Domung +dez I L Dengese +dga I L Southern Dagaare +dgb I L Bunoge Dogon +dgc I L Casiguran Dumagat Agta +dgd I L Dagaari Dioula +dge I L Degenan +dgg I L Doga +dgh I L Dghwede +dgi I L Northern Dagara +dgk I L Dagba +dgl I L Andaandi +dgn I E Dagoman +dgo I L Dogri (individual language) +dgr dgr dgr I L Dogrib +dgs I L Dogoso +dgt I E Ndra'ngith +dgu I L Degaru +dgw I E Daungwurrung +dgx I L Doghoro +dgz I L Daga +dhd I L Dhundari +dhg I L Djangu +dhi I L Dhimal +dhl I L Dhalandji +dhm I L Zemba +dhn I L Dhanki +dho I L Dhodia +dhr I L Dhargari +dhs I L Dhaiso +dhu I E Dhurga +dhv I L Dehu +dhw I L Dhanwar (Nepal) +dhx I L Dhungaloo +dia I L Dia +dib I L South Central Dinka +dic I L Lakota Dida +did I L Didinga +dif I E Dieri +dig I L Digo +dih I L Kumiai +dii I L Dimbong +dij I L Dai +dik I L Southwestern Dinka +dil I L Dilling +dim I L Dime +din din din M L Dinka +dio I L Dibo +dip I L Northeastern Dinka +diq I L Dimli (individual language) +dir I L Dirim +dis I L Dimasa +dit I E Dirari +diu I L Diriku +div div div dv I L Dhivehi +diw I L Northwestern Dinka +dix I L Dixon Reef +diy I L Diuwe +diz I L Ding +dja I E Djadjawurrung +djb I L Djinba +djc I L Dar Daju Daju +djd I L Djamindjung +dje I L Zarma +djf I E Djangun +dji I L Djinang +djj I L Djeebbana +djk I L Eastern Maroon Creole +djm I L Jamsay Dogon +djn I L Djauan +djo I L Jangkang +djr I L Djambarrpuyngu +dju I L Kapriman +djw I E Djawi +dka I L Dakpakha +dkk I L Dakka +dkr I L Kuijau +dks I L Southeastern Dinka +dkx I L Mazagway +dlg I L Dolgan +dlk I L Dahalik +dlm I E Dalmatian +dln I L Darlong +dma I L Duma +dmb I L Mombo Dogon +dmc I L Gavak +dmd I E Madhi Madhi +dme I L Dugwor +dmg I L Upper Kinabatangan +dmk I L Domaaki +dml I L Dameli +dmm I L Dama +dmo I L Kemedzung +dmr I L East Damar +dms I L Dampelas +dmu I L Dubu +dmv I L Dumpas +dmw I L Mudburra +dmx I L Dema +dmy I L Demta +dna I L Upper Grand Valley Dani +dnd I L Daonda +dne I L Ndendeule +dng I L Dungan +dni I L Lower Grand Valley Dani +dnj I L Dan +dnk I L Dengka +dnn I L Dzùùngoo +dnr I L Danaru +dnt I L Mid Grand Valley Dani +dnu I L Danau +dnv I L Danu +dnw I L Western Dani +dny I L Dení +doa I L Dom +dob I L Dobu +doc I L Northern Dong +doe I L Doe +dof I L Domu +doh I L Dong +doi doi doi M L Dogri (macrolanguage) +dok I L Dondo +dol I L Doso +don I L Toura (Papua New Guinea) +doo I L Dongo +dop I L Lukpa +doq I L Dominican Sign Language +dor I L Dori'o +dos I L Dogosé +dot I L Dass +dov I L Dombe +dow I L Doyayo +dox I L Bussa +doy I L Dompo +doz I L Dorze +dpp I L Papar +drb I L Dair +drc I L Minderico +drd I L Darmiya +dre I L Dolpo +drg I L Rungus +dri I L C'lela +drl I L Paakantyi +drn I L West Damar +dro I L Daro-Matu Melanau +drq I E Dura +drr I E Dororo +drs I L Gedeo +drt I L Drents +dru I L Rukai +dry I L Darai +dsb dsb dsb I L Lower Sorbian +dse I L Dutch Sign Language +dsh I L Daasanach +dsi I L Disa +dsl I L Danish Sign Language +dsn I L Dusner +dso I L Desiya +dsq I L Tadaksahak +dta I L Daur +dtb I L Labuk-Kinabatangan Kadazan +dtd I L Ditidaht +dth I E Adithinngithigh +dti I L Ana Tinga Dogon +dtk I L Tene Kan Dogon +dtm I L Tomo Kan Dogon +dto I L Tommo So Dogon +dtp I L Central Dusun +dtr I L Lotud +dts I L Toro So Dogon +dtt I L Toro Tegu Dogon +dtu I L Tebul Ure Dogon +dty I L Dotyali +dua dua dua I L Duala +dub I L Dubli +duc I L Duna +dud I L Hun-Saare +due I L Umiray Dumaget Agta +duf I L Dumbea +dug I L Duruma +duh I L Dungra Bhil +dui I L Dumun +duj I L Dhuwal +duk I L Uyajitaya +dul I L Alabat Island Agta +dum dum dum I H Middle Dutch (ca. 1050-1350) +dun I L Dusun Deyah +duo I L Dupaninan Agta +dup I L Duano +duq I L Dusun Malang +dur I L Dii +dus I L Dumi +duu I L Drung +duv I L Duvle +duw I L Dusun Witu +dux I L Duungooma +duy I E Dicamay Agta +duz I E Duli +dva I L Duau +dwa I L Diri +dwr I L Dawro +dws I C Dutton World Speedwords +dww I L Dawawa +dya I L Dyan +dyb I E Dyaberdyaber +dyd I E Dyugun +dyg I E Villa Viciosa Agta +dyi I L Djimini Senoufo +dym I L Yanda Dom Dogon +dyn I L Dyangadi +dyo I L Jola-Fonyi +dyu dyu dyu I L Dyula +dyy I L Dyaabugay +dza I L Tunzu +dzd I L Daza +dze I E Djiwarli +dzg I L Dazaga +dzl I L Dzalakha +dzn I L Dzando +dzo dzo dzo dz I L Dzongkha +eaa I E Karenggapa +ebg I L Ebughu +ebk I L Eastern Bontok +ebo I L Teke-Ebo +ebr I L Ebrié +ebu I L Embu +ecr I A Eteocretan +ecs I L Ecuadorian Sign Language +ecy I A Eteocypriot +eee I L E +efa I L Efai +efe I L Efe +efi efi efi I L Efik +ega I L Ega +egl I L Emilian +ego I L Eggon +egy egy egy I A Egyptian (Ancient) +ehu I L Ehueun +eip I L Eipomek +eit I L Eitiep +eiv I L Askopan +eja I L Ejamat +eka eka eka I L Ekajuk +ekc I E Eastern Karnic +eke I L Ekit +ekg I L Ekari +eki I L Eki +ekk I L Standard Estonian +ekl I L Kol (Bangladesh) +ekm I L Elip +eko I L Koti +ekp I L Ekpeye +ekr I L Yace +eky I L Eastern Kayah +ele I L Elepi +elh I L El Hugeirat +eli I E Nding +elk I L Elkei +ell gre ell el I L Modern Greek (1453-) +elm I L Eleme +elo I L El Molo +elu I L Elu +elx elx elx I A Elamite +ema I L Emai-Iuleha-Ora +emb I L Embaloh +eme I L Emerillon +emg I L Eastern Meohang +emi I L Mussau-Emira +emk I L Eastern Maninkakan +emm I E Mamulique +emn I L Eman +emo I E Emok +emp I L Northern Emberá +ems I L Pacific Gulf Yupik +emu I L Eastern Muria +emw I L Emplawas +emx I L Erromintxela +emy I E Epigraphic Mayan +ena I L Apali +enb I L Markweeta +enc I L En +end I L Ende +enf I L Forest Enets +eng eng eng en I L English +enh I L Tundra Enets +enm enm enm I H Middle English (1100-1500) +enn I L Engenni +eno I L Enggano +enq I L Enga +enr I L Emumu +enu I L Enu +env I L Enwan (Edu State) +enw I L Enwan (Akwa Ibom State) +eot I L Beti (Côte d'Ivoire) +epi I L Epie +epo epo epo eo I C Esperanto +era I L Eravallan +erg I L Sie +erh I L Eruwa +eri I L Ogea +erk I L South Efate +ero I L Horpa +err I E Erre +ers I L Ersu +ert I L Eritai +erw I L Erokwanas +ese I L Ese Ejja +esh I L Eshtehardi +esi I L North Alaskan Inupiatun +esk I L Northwest Alaska Inupiatun +esl I L Egypt Sign Language +esm I E Esuma +esn I L Salvadoran Sign Language +eso I L Estonian Sign Language +esq I E Esselen +ess I L Central Siberian Yupik +est est est et M L Estonian +esu I L Central Yupik +etb I L Etebi +etc I E Etchemin +eth I L Ethiopian Sign Language +etn I L Eton (Vanuatu) +eto I L Eton (Cameroon) +etr I L Edolo +ets I L Yekhee +ett I A Etruscan +etu I L Ejagham +etx I L Eten +etz I L Semimi +eus baq eus eu I L Basque +eve I L Even +evh I L Uvbie +evn I L Evenki +ewe ewe ewe ee I L Ewe +ewo ewo ewo I L Ewondo +ext I L Extremaduran +eya I E Eyak +eyo I L Keiyo +eza I L Ezaa +eze I L Uzekwe +faa I L Fasu +fab I L Fa d'Ambu +fad I L Wagi +faf I L Fagani +fag I L Finongan +fah I L Baissa Fali +fai I L Faiwol +faj I L Faita +fak I L Fang (Cameroon) +fal I L South Fali +fam I L Fam +fan fan fan I L Fang (Equatorial Guinea) +fao fao fao fo I L Faroese +fap I L Palor +far I L Fataleka +fas per fas fa M L Persian +fat fat fat I L Fanti +fau I L Fayu +fax I L Fala +fay I L Southwestern Fars +faz I L Northwestern Fars +fbl I L West Albay Bikol +fcs I L Quebec Sign Language +fer I L Feroge +ffi I L Foia Foia +ffm I L Maasina Fulfulde +fgr I L Fongoro +fia I L Nobiin +fie I L Fyer +fij fij fij fj I L Fijian +fil fil fil I L Filipino +fin fin fin fi I L Finnish +fip I L Fipa +fir I L Firan +fit I L Tornedalen Finnish +fiw I L Fiwaga +fkk I L Kirya-Konzəl +fkv I L Kven Finnish +fla I L Kalispel-Pend d'Oreille +flh I L Foau +fli I L Fali +fll I L North Fali +fln I E Flinders Island +flr I L Fuliiru +fly I L Tsotsitaal +fmp I L Fe'fe' +fmu I L Far Western Muria +fng I L Fanagalo +fni I L Fania +fod I L Foodo +foi I L Foi +fom I L Foma +fon fon fon I L Fon +for I L Fore +fos I E Siraya +fpe I L Fernando Po Creole English +fqs I L Fas +fra fre fra fr I L French +frc I L Cajun French +frd I L Fordata +frk I E Frankish +frm frm frm I H Middle French (ca. 1400-1600) +fro fro fro I H Old French (842-ca. 1400) +frp I L Arpitan +frq I L Forak +frr frr frr I L Northern Frisian +frs frs frs I L Eastern Frisian +frt I L Fortsenal +fry fry fry fy I L Western Frisian +fse I L Finnish Sign Language +fsl I L French Sign Language +fss I L Finland-Swedish Sign Language +fub I L Adamawa Fulfulde +fuc I L Pulaar +fud I L East Futuna +fue I L Borgu Fulfulde +fuf I L Pular +fuh I L Western Niger Fulfulde +fui I L Bagirmi Fulfulde +fuj I L Ko +ful ful ful ff M L Fulah +fum I L Fum +fun I L Fulniô +fuq I L Central-Eastern Niger Fulfulde +fur fur fur I L Friulian +fut I L Futuna-Aniwa +fuu I L Furu +fuv I L Nigerian Fulfulde +fuy I L Fuyug +fvr I L Fur +fwa I L Fwâi +fwe I L Fwe +gaa gaa gaa I L Ga +gab I L Gabri +gac I L Mixed Great Andamanese +gad I L Gaddang +gae I L Guarequena +gaf I L Gende +gag I L Gagauz +gah I L Alekano +gai I L Borei +gaj I L Gadsup +gak I L Gamkonora +gal I L Galolen +gam I L Kandawo +gan I L Gan Chinese +gao I L Gants +gap I L Gal +gaq I L Gata' +gar I L Galeya +gas I L Adiwasi Garasia +gat I L Kenati +gau I L Mudhili Gadaba +gaw I L Nobonob +gax I L Borana-Arsi-Guji Oromo +gay gay gay I L Gayo +gaz I L West Central Oromo +gba gba gba M L Gbaya (Central African Republic) +gbb I L Kaytetye +gbd I L Karadjeri +gbe I L Niksek +gbf I L Gaikundi +gbg I L Gbanziri +gbh I L Defi Gbe +gbi I L Galela +gbj I L Bodo Gadaba +gbk I L Gaddi +gbl I L Gamit +gbm I L Garhwali +gbn I L Mo'da +gbo I L Northern Grebo +gbp I L Gbaya-Bossangoa +gbq I L Gbaya-Bozoum +gbr I L Gbagyi +gbs I L Gbesi Gbe +gbu I L Gagadu +gbv I L Gbanu +gbw I L Gabi-Gabi +gbx I L Eastern Xwla Gbe +gby I L Gbari +gbz I L Zoroastrian Dari +gcc I L Mali +gcd I E Ganggalida +gce I E Galice +gcf I L Guadeloupean Creole French +gcl I L Grenadian Creole English +gcn I L Gaina +gcr I L Guianese Creole French +gct I L Colonia Tovar German +gda I L Gade Lohar +gdb I L Pottangi Ollar Gadaba +gdc I E Gugu Badhun +gdd I L Gedaged +gde I L Gude +gdf I L Guduf-Gava +gdg I L Ga'dang +gdh I L Gadjerawang +gdi I L Gundi +gdj I L Gurdjar +gdk I L Gadang +gdl I L Dirasha +gdm I L Laal +gdn I L Umanakaina +gdo I L Ghodoberi +gdq I L Mehri +gdr I L Wipi +gds I L Ghandruk Sign Language +gdt I E Kungardutyi +gdu I L Gudu +gdx I L Godwari +gea I L Geruma +geb I L Kire +gec I L Gboloo Grebo +ged I L Gade +geg I L Gengle +geh I L Hutterite German +gei I L Gebe +gej I L Gen +gek I L Yiwom +gel I L ut-Ma'in +geq I L Geme +ges I L Geser-Gorom +gew I L Gera +gex I L Garre +gey I L Enya +gez gez gez I A Geez +gfk I L Patpatar +gft I E Gafat +gfx I L Mangetti Dune !Xung +gga I L Gao +ggb I L Gbii +ggd I E Gugadj +gge I L Guragone +ggg I L Gurgula +ggk I E Kungarakany +ggl I L Ganglau +ggm I E Gugu Mini +ggn I L Eastern Gurung +ggo I L Southern Gondi +ggt I L Gitua +ggu I L Gagu +ggw I L Gogodala +gha I L Ghadamès +ghc I E Hiberno-Scottish Gaelic +ghe I L Southern Ghale +ghh I L Northern Ghale +ghk I L Geko Karen +ghl I L Ghulfan +ghn I L Ghanongga +gho I E Ghomara +ghr I L Ghera +ghs I L Guhu-Samane +ght I L Kuke +gia I L Kitja +gib I L Gibanawa +gic I L Gail +gid I L Gidar +gig I L Goaria +gih I L Githabul +gil gil gil I L Gilbertese +gim I L Gimi (Eastern Highlands) +gin I L Hinukh +gip I L Gimi (West New Britain) +giq I L Green Gelao +gir I L Red Gelao +gis I L North Giziga +git I L Gitxsan +giu I L Mulao +giw I L White Gelao +gix I L Gilima +giy I L Giyug +giz I L South Giziga +gji I L Geji +gjk I L Kachi Koli +gjm I E Gunditjmara +gjn I L Gonja +gju I L Gujari +gka I L Guya +gke I L Ndai +gkn I L Gokana +gko I E Kok-Nar +gkp I L Guinea Kpelle +gla gla gla gd I L Scottish Gaelic +glc I L Bon Gula +gld I L Nanai +gle gle gle ga I L Irish +glg glg glg gl I L Galician +glh I L Northwest Pashayi +gli I E Guliguli +glj I L Gula Iro +glk I L Gilaki +gll I E Garlali +glo I L Galambu +glr I L Glaro-Twabo +glu I L Gula (Chad) +glv glv glv gv I L Manx +glw I L Glavda +gly I E Gule +gma I E Gambera +gmb I L Gula'alaa +gmd I L Mághdì +gmh gmh gmh I H Middle High German (ca. 1050-1500) +gml I H Middle Low German +gmm I L Gbaya-Mbodomo +gmn I L Gimnime +gmu I L Gumalu +gmv I L Gamo +gmx I L Magoma +gmy I A Mycenaean Greek +gmz I L Mgbolizhia +gna I L Kaansa +gnb I L Gangte +gnc I E Guanche +gnd I L Zulgo-Gemzek +gne I L Ganang +gng I L Ngangam +gnh I L Lere +gni I L Gooniyandi +gnk I L //Gana +gnl I E Gangulu +gnm I L Ginuman +gnn I L Gumatj +gno I L Northern Gondi +gnq I L Gana +gnr I E Gureng Gureng +gnt I L Guntai +gnu I L Gnau +gnw I L Western Bolivian Guaraní +gnz I L Ganzi +goa I L Guro +gob I L Playero +goc I L Gorakor +god I L Godié +goe I L Gongduk +gof I L Gofa +gog I L Gogo +goh goh goh I H Old High German (ca. 750-1050) +goi I L Gobasi +goj I L Gowlan +gok I L Gowli +gol I L Gola +gom I L Goan Konkani +gon gon gon M L Gondi +goo I L Gone Dau +gop I L Yeretuar +goq I L Gorap +gor gor gor I L Gorontalo +gos I L Gronings +got got got I A Gothic +gou I L Gavar +gow I L Gorowa +gox I L Gobu +goy I L Goundo +goz I L Gozarkhani +gpa I L Gupa-Abawa +gpe I L Ghanaian Pidgin English +gpn I L Taiap +gqa I L Ga'anda +gqi I L Guiqiong +gqn I E Guana (Brazil) +gqr I L Gor +gqu I L Qau +gra I L Rajput Garasia +grb grb grb M L Grebo +grc grc grc I H Ancient Greek (to 1453) +grd I L Guruntum-Mbaaru +grg I L Madi +grh I L Gbiri-Niragu +gri I L Ghari +grj I L Southern Grebo +grm I L Kota Marudu Talantang +grn grn grn gn M L Guarani +gro I L Groma +grq I L Gorovu +grr I L Taznatit +grs I L Gresi +grt I L Garo +gru I L Kistane +grv I L Central Grebo +grw I L Gweda +grx I L Guriaso +gry I L Barclayville Grebo +grz I L Guramalum +gse I L Ghanaian Sign Language +gsg I L German Sign Language +gsl I L Gusilay +gsm I L Guatemalan Sign Language +gsn I L Gusan +gso I L Southwest Gbaya +gsp I L Wasembo +gss I L Greek Sign Language +gsw gsw gsw I L Swiss German +gta I L Guató +gti I L Gbati-ri +gtu I E Aghu-Tharnggala +gua I L Shiki +gub I L Guajajára +guc I L Wayuu +gud I L Yocoboué Dida +gue I L Gurinji +guf I L Gupapuyngu +gug I L Paraguayan Guaraní +guh I L Guahibo +gui I L Eastern Bolivian Guaraní +guj guj guj gu I L Gujarati +guk I L Gumuz +gul I L Sea Island Creole English +gum I L Guambiano +gun I L Mbyá Guaraní +guo I L Guayabero +gup I L Gunwinggu +guq I L Aché +gur I L Farefare +gus I L Guinean Sign Language +gut I L Maléku Jaíka +guu I L Yanomamö +guv I E Gey +guw I L Gun +gux I L Gourmanchéma +guz I L Gusii +gva I L Guana (Paraguay) +gvc I L Guanano +gve I L Duwet +gvf I L Golin +gvj I L Guajá +gvl I L Gulay +gvm I L Gurmana +gvn I L Kuku-Yalanji +gvo I L Gavião Do Jiparaná +gvp I L Pará Gavião +gvr I L Western Gurung +gvs I L Gumawana +gvy I E Guyani +gwa I L Mbato +gwb I L Gwa +gwc I L Kalami +gwd I L Gawwada +gwe I L Gweno +gwf I L Gowro +gwg I L Moo +gwi gwi gwi I L Gwichʼin +gwj I L /Gwi +gwm I E Awngthim +gwn I L Gwandara +gwr I L Gwere +gwt I L Gawar-Bati +gwu I E Guwamu +gww I L Kwini +gwx I L Gua +gxx I L Wè Southern +gya I L Northwest Gbaya +gyb I L Garus +gyd I L Kayardild +gye I L Gyem +gyf I E Gungabula +gyg I L Gbayi +gyi I L Gyele +gyl I L Gayil +gym I L Ngäbere +gyn I L Guyanese Creole English +gyr I L Guarayu +gyy I E Gunya +gza I L Ganza +gzi I L Gazi +gzn I L Gane +haa I L Han +hab I L Hanoi Sign Language +hac I L Gurani +had I L Hatam +hae I L Eastern Oromo +haf I L Haiphong Sign Language +hag I L Hanga +hah I L Hahon +hai hai hai M L Haida +haj I L Hajong +hak I L Hakka Chinese +hal I L Halang +ham I L Hewa +han I L Hangaza +hao I L Hakö +hap I L Hupla +haq I L Ha +har I L Harari +has I L Haisla +hat hat hat ht I L Haitian +hau hau hau ha I L Hausa +hav I L Havu +haw haw haw I L Hawaiian +hax I L Southern Haida +hay I L Haya +haz I L Hazaragi +hba I L Hamba +hbb I L Huba +hbn I L Heiban +hbo I H Ancient Hebrew +hbs sh M L Serbo-Croatian Code element for 639-1 has been deprecated +hbu I L Habu +hca I L Andaman Creole Hindi +hch I L Huichol +hdn I L Northern Haida +hds I L Honduras Sign Language +hdy I L Hadiyya +hea I L Northern Qiandong Miao +heb heb heb he I L Hebrew +hed I L Herdé +heg I L Helong +heh I L Hehe +hei I L Heiltsuk +hem I L Hemba +her her her hz I L Herero +hgm I L Hai//om +hgw I L Haigwai +hhi I L Hoia Hoia +hhr I L Kerak +hhy I L Hoyahoya +hia I L Lamang +hib I E Hibito +hid I L Hidatsa +hif I L Fiji Hindi +hig I L Kamwe +hih I L Pamosu +hii I L Hinduri +hij I L Hijuk +hik I L Seit-Kaitetu +hil hil hil I L Hiligaynon +hin hin hin hi I L Hindi +hio I L Tsoa +hir I L Himarimã +hit hit hit I A Hittite +hiw I L Hiw +hix I L Hixkaryána +hji I L Haji +hka I L Kahe +hke I L Hunde +hkk I L Hunjara-Kaina Ke +hks I L Hong Kong Sign Language +hla I L Halia +hlb I L Halbi +hld I L Halang Doan +hle I L Hlersu +hlt I L Matu Chin +hlu I A Hieroglyphic Luwian +hma I L Southern Mashan Hmong +hmb I L Humburi Senni Songhay +hmc I L Central Huishui Hmong +hmd I L Large Flowery Miao +hme I L Eastern Huishui Hmong +hmf I L Hmong Don +hmg I L Southwestern Guiyang Hmong +hmh I L Southwestern Huishui Hmong +hmi I L Northern Huishui Hmong +hmj I L Ge +hmk I E Maek +hml I L Luopohe Hmong +hmm I L Central Mashan Hmong +hmn hmn hmn M L Hmong +hmo hmo hmo ho I L Hiri Motu +hmp I L Northern Mashan Hmong +hmq I L Eastern Qiandong Miao +hmr I L Hmar +hms I L Southern Qiandong Miao +hmt I L Hamtai +hmu I L Hamap +hmv I L Hmong Dô +hmw I L Western Mashan Hmong +hmy I L Southern Guiyang Hmong +hmz I L Hmong Shua +hna I L Mina (Cameroon) +hnd I L Southern Hindko +hne I L Chhattisgarhi +hnh I L //Ani +hni I L Hani +hnj I L Hmong Njua +hnn I L Hanunoo +hno I L Northern Hindko +hns I L Caribbean Hindustani +hnu I L Hung +hoa I L Hoava +hob I L Mari (Madang Province) +hoc I L Ho +hod I E Holma +hoe I L Horom +hoh I L Hobyót +hoi I L Holikachuk +hoj I L Hadothi +hol I L Holu +hom I E Homa +hoo I L Holoholo +hop I L Hopi +hor I E Horo +hos I L Ho Chi Minh City Sign Language +hot I L Hote +hov I L Hovongan +how I L Honi +hoy I L Holiya +hoz I L Hozo +hpo I L Hpon +hps I L Hawai'i Pidgin Sign Language +hra I L Hrangkhol +hrc I L Niwer Mil +hre I L Hre +hrk I L Haruku +hrm I L Horned Miao +hro I L Haroi +hrp I E Nhirrpi +hrt I L Hértevin +hru I L Hruso +hrv hrv hrv hr I L Croatian +hrw I L Warwar Feni +hrx I L Hunsrik +hrz I L Harzani +hsb hsb hsb I L Upper Sorbian +hsh I L Hungarian Sign Language +hsl I L Hausa Sign Language +hsn I L Xiang Chinese +hss I L Harsusi +hti I L Hoti +hto I L Minica Huitoto +hts I L Hadza +htu I L Hitu +htx I A Middle Hittite +hub I L Huambisa +huc I L =/Hua +hud I L Huaulu +hue I L San Francisco Del Mar Huave +huf I L Humene +hug I L Huachipaeri +huh I L Huilliche +hui I L Huli +huj I L Northern Guiyang Hmong +huk I L Hulung +hul I L Hula +hum I L Hungana +hun hun hun hu I L Hungarian +huo I L Hu +hup hup hup I L Hupa +huq I L Tsat +hur I L Halkomelem +hus I L Huastec +hut I L Humla +huu I L Murui Huitoto +huv I L San Mateo Del Mar Huave +huw I E Hukumina +hux I L Nüpode Huitoto +huy I L Hulaulá +huz I L Hunzib +hvc I L Haitian Vodoun Culture Language +hve I L San Dionisio Del Mar Huave +hvk I L Haveke +hvn I L Sabu +hvv I L Santa María Del Mar Huave +hwa I L Wané +hwc I L Hawai'i Creole English +hwo I L Hwana +hya I L Hya +hye arm hye hy I L Armenian +iai I L Iaai +ian I L Iatmul +iap I L Iapama +iar I L Purari +iba iba iba I L Iban +ibb I L Ibibio +ibd I L Iwaidja +ibe I L Akpes +ibg I L Ibanag +ibl I L Ibaloi +ibm I L Agoi +ibn I L Ibino +ibo ibo ibo ig I L Igbo +ibr I L Ibuoro +ibu I L Ibu +iby I L Ibani +ica I L Ede Ica +ich I L Etkywan +icl I L Icelandic Sign Language +icr I L Islander Creole English +ida I L Idakho-Isukha-Tiriki +idb I L Indo-Portuguese +idc I L Idon +idd I L Ede Idaca +ide I L Idere +idi I L Idi +ido ido ido io I C Ido +idr I L Indri +ids I L Idesa +idt I L Idaté +idu I L Idoma +ifa I L Amganad Ifugao +ifb I L Batad Ifugao +ife I L Ifè +iff I E Ifo +ifk I L Tuwali Ifugao +ifm I L Teke-Fuumu +ifu I L Mayoyao Ifugao +ify I L Keley-I Kallahan +igb I L Ebira +ige I L Igede +igg I L Igana +igl I L Igala +igm I L Kanggape +ign I L Ignaciano +igo I L Isebe +igs I C Interglossa +igw I L Igwe +ihb I L Iha Based Pidgin +ihi I L Ihievbe +ihp I L Iha +ihw I E Bidhawal +iii iii iii ii I L Sichuan Yi +iin I E Thiin +ijc I L Izon +ije I L Biseni +ijj I L Ede Ije +ijn I L Kalabari +ijs I L Southeast Ijo +ike I L Eastern Canadian Inuktitut +iki I L Iko +ikk I L Ika +ikl I L Ikulu +iko I L Olulumo-Ikom +ikp I L Ikpeshi +ikr I E Ikaranggal +ikt I L Inuinnaqtun +iku iku iku iu M L Inuktitut +ikv I L Iku-Gora-Ankwa +ikw I L Ikwere +ikx I L Ik +ikz I L Ikizu +ila I L Ile Ape +ilb I L Ila +ile ile ile ie I C Interlingue +ilg I E Garig-Ilgar +ili I L Ili Turki +ilk I L Ilongot +ill I L Iranun +ilo ilo ilo I L Iloko +ils I L International Sign +ilu I L Ili'uun +ilv I L Ilue +ima I L Mala Malasar +ime I L Imeraguen +imi I L Anamgura +iml I E Miluk +imn I L Imonda +imo I L Imbongu +imr I L Imroing +ims I A Marsian +imy I A Milyan +ina ina ina ia I C Interlingua (International Auxiliary Language Association) +inb I L Inga +ind ind ind id I L Indonesian +ing I L Degexit'an +inh inh inh I L Ingush +inj I L Jungle Inga +inl I L Indonesian Sign Language +inm I A Minaean +inn I L Isinai +ino I L Inoke-Yate +inp I L Iñapari +ins I L Indian Sign Language +int I L Intha +inz I E Ineseño +ior I L Inor +iou I L Tuma-Irumu +iow I E Iowa-Oto +ipi I L Ipili +ipk ipk ipk ik M L Inupiaq +ipo I L Ipiko +iqu I L Iquito +iqw I L Ikwo +ire I L Iresim +irh I L Irarutu +iri I L Irigwe +irk I L Iraqw +irn I L Irántxe +irr I L Ir +iru I L Irula +irx I L Kamberau +iry I L Iraya +isa I L Isabi +isc I L Isconahua +isd I L Isnag +ise I L Italian Sign Language +isg I L Irish Sign Language +ish I L Esan +isi I L Nkem-Nkum +isk I L Ishkashimi +isl ice isl is I L Icelandic +ism I L Masimasi +isn I L Isanzu +iso I L Isoko +isr I L Israeli Sign Language +ist I L Istriot +isu I L Isu (Menchum Division) +ita ita ita it I L Italian +itb I L Binongan Itneg +ite I E Itene +iti I L Inlaod Itneg +itk I L Judeo-Italian +itl I L Itelmen +itm I L Itu Mbon Uzo +ito I L Itonama +itr I L Iteri +its I L Isekiri +itt I L Maeng Itneg +itv I L Itawit +itw I L Ito +itx I L Itik +ity I L Moyadan Itneg +itz I L Itzá +ium I L Iu Mien +ivb I L Ibatan +ivv I L Ivatan +iwk I L I-Wak +iwm I L Iwam +iwo I L Iwur +iws I L Sepik Iwam +ixc I L Ixcatec +ixl I L Ixil +iya I L Iyayu +iyo I L Mesaka +iyx I L Yaka (Congo) +izh I L Ingrian +izr I L Izere +izz I L Izii +jaa I L Jamamadí +jab I L Hyam +jac I L Popti' +jad I L Jahanka +jae I L Yabem +jaf I L Jara +jah I L Jah Hut +jaj I L Zazao +jak I L Jakun +jal I L Yalahatan +jam I L Jamaican Creole English +jan I E Jandai +jao I L Yanyuwa +jaq I L Yaqay +jas I L New Caledonian Javanese +jat I L Jakati +jau I L Yaur +jav jav jav jv I L Javanese +jax I L Jambi Malay +jay I L Yan-nhangu +jaz I L Jawe +jbe I L Judeo-Berber +jbi I E Badjiri +jbj I L Arandai +jbk I L Barikewa +jbn I L Nafusi +jbo jbo jbo I C Lojban +jbr I L Jofotek-Bromnya +jbt I L Jabutí +jbu I L Jukun Takum +jbw I E Yawijibaya +jcs I L Jamaican Country Sign Language +jct I L Krymchak +jda I L Jad +jdg I L Jadgali +jdt I L Judeo-Tat +jeb I L Jebero +jee I L Jerung +jeg I L Jeng +jeh I L Jeh +jei I L Yei +jek I L Jeri Kuo +jel I L Yelmek +jen I L Dza +jer I L Jere +jet I L Manem +jeu I L Jonkor Bourmataguil +jgb I E Ngbee +jge I L Judeo-Georgian +jgk I L Gwak +jgo I L Ngomba +jhi I L Jehai +jhs I L Jhankot Sign Language +jia I L Jina +jib I L Jibu +jic I L Tol +jid I L Bu +jie I L Jilbe +jig I L Djingili +jih I L sTodsde +jii I L Jiiddu +jil I L Jilim +jim I L Jimi (Cameroon) +jio I L Jiamao +jiq I L Guanyinqiao +jit I L Jita +jiu I L Youle Jinuo +jiv I L Shuar +jiy I L Buyuan Jinuo +jjr I L Bankal +jkm I L Mobwa Karen +jko I L Kubo +jkp I L Paku Karen +jkr I L Koro (India) +jku I L Labir +jle I L Ngile +jls I L Jamaican Sign Language +jma I L Dima +jmb I L Zumbun +jmc I L Machame +jmd I L Yamdena +jmi I L Jimi (Nigeria) +jml I L Jumli +jmn I L Makuri Naga +jmr I L Kamara +jms I L Mashi (Nigeria) +jmw I L Mouwase +jmx I L Western Juxtlahuaca Mixtec +jna I L Jangshung +jnd I L Jandavra +jng I E Yangman +jni I L Janji +jnj I L Yemsa +jnl I L Rawat +jns I L Jaunsari +job I L Joba +jod I L Wojenaka +jor I E Jorá +jos I L Jordanian Sign Language +jow I L Jowulu +jpa I H Jewish Palestinian Aramaic +jpn jpn jpn ja I L Japanese +jpr jpr jpr I L Judeo-Persian +jqr I L Jaqaru +jra I L Jarai +jrb jrb jrb M L Judeo-Arabic +jrr I L Jiru +jrt I L Jorto +jru I L Japrería +jsl I L Japanese Sign Language +jua I L Júma +jub I L Wannu +juc I E Jurchen +jud I L Worodougou +juh I L Hõne +jui I E Ngadjuri +juk I L Wapan +jul I L Jirel +jum I L Jumjum +jun I L Juang +juo I L Jiba +jup I L Hupdë +jur I L Jurúna +jus I L Jumla Sign Language +jut I L Jutish +juu I L Ju +juw I L Wãpha +juy I L Juray +jvd I E Javindo +jvn I L Caribbean Javanese +jwi I L Jwira-Pepesa +jya I L Jiarong +jye I L Judeo-Yemeni Arabic +jyy I L Jaya +kaa kaa kaa I L Kara-Kalpak +kab kab kab I L Kabyle +kac kac kac I L Kachin +kad I L Adara +kae I E Ketangalan +kaf I L Katso +kag I L Kajaman +kah I L Kara (Central African Republic) +kai I L Karekare +kaj I L Jju +kak I L Kayapa Kallahan +kal kal kal kl I L Kalaallisut +kam kam kam I L Kamba (Kenya) +kan kan kan kn I L Kannada +kao I L Xaasongaxango +kap I L Bezhta +kaq I L Capanahua +kas kas kas ks I L Kashmiri +kat geo kat ka I L Georgian +kau kau kau kr M L Kanuri +kav I L Katukína +kaw kaw kaw I A Kawi +kax I L Kao +kay I L Kamayurá +kaz kaz kaz kk I L Kazakh +kba I E Kalarko +kbb I E Kaxuiâna +kbc I L Kadiwéu +kbd kbd kbd I L Kabardian +kbe I L Kanju +kbf I E Kakauhua +kbg I L Khamba +kbh I L Camsá +kbi I L Kaptiau +kbj I L Kari +kbk I L Grass Koiari +kbl I L Kanembu +kbm I L Iwal +kbn I L Kare (Central African Republic) +kbo I L Keliko +kbp I L Kabiyè +kbq I L Kamano +kbr I L Kafa +kbs I L Kande +kbt I L Abadi +kbu I L Kabutra +kbv I L Dera (Indonesia) +kbw I L Kaiep +kbx I L Ap Ma +kby I L Manga Kanuri +kbz I L Duhwa +kca I L Khanty +kcb I L Kawacha +kcc I L Lubila +kcd I L Ngkâlmpw Kanum +kce I L Kaivi +kcf I L Ukaan +kcg I L Tyap +kch I L Vono +kci I L Kamantan +kcj I L Kobiana +kck I L Kalanga +kcl I L Kela (Papua New Guinea) +kcm I L Gula (Central African Republic) +kcn I L Nubi +kco I L Kinalakna +kcp I L Kanga +kcq I L Kamo +kcr I L Katla +kcs I L Koenoem +kct I L Kaian +kcu I L Kami (Tanzania) +kcv I L Kete +kcw I L Kabwari +kcx I L Kachama-Ganjule +kcy I L Korandje +kcz I L Konongo +kda I E Worimi +kdc I L Kutu +kdd I L Yankunytjatjara +kde I L Makonde +kdf I L Mamusi +kdg I L Seba +kdh I L Tem +kdi I L Kumam +kdj I L Karamojong +kdk I L Numèè +kdl I L Tsikimba +kdm I L Kagoma +kdn I L Kunda +kdp I L Kaningdon-Nindem +kdq I L Koch +kdr I L Karaim +kdt I L Kuy +kdu I L Kadaru +kdw I L Koneraw +kdx I L Kam +kdy I L Keder +kdz I L Kwaja +kea I L Kabuverdianu +keb I L Kélé +kec I L Keiga +ked I L Kerewe +kee I L Eastern Keres +kef I L Kpessi +keg I L Tese +keh I L Keak +kei I L Kei +kej I L Kadar +kek I L Kekchí +kel I L Kela (Democratic Republic of Congo) +kem I L Kemak +ken I L Kenyang +keo I L Kakwa +kep I L Kaikadi +keq I L Kamar +ker I L Kera +kes I L Kugbo +ket I L Ket +keu I L Akebu +kev I L Kanikkaran +kew I L West Kewa +kex I L Kukna +key I L Kupia +kez I L Kukele +kfa I L Kodava +kfb I L Northwestern Kolami +kfc I L Konda-Dora +kfd I L Korra Koraga +kfe I L Kota (India) +kff I L Koya +kfg I L Kudiya +kfh I L Kurichiya +kfi I L Kannada Kurumba +kfj I L Kemiehua +kfk I L Kinnauri +kfl I L Kung +kfm I L Khunsari +kfn I L Kuk +kfo I L Koro (Côte d'Ivoire) +kfp I L Korwa +kfq I L Korku +kfr I L Kachchi +kfs I L Bilaspuri +kft I L Kanjari +kfu I L Katkari +kfv I L Kurmukar +kfw I L Kharam Naga +kfx I L Kullu Pahari +kfy I L Kumaoni +kfz I L Koromfé +kga I L Koyaga +kgb I L Kawe +kgc I L Kasseng +kgd I L Kataang +kge I L Komering +kgf I L Kube +kgg I L Kusunda +kgi I L Selangor Sign Language +kgj I L Gamale Kham +kgk I L Kaiwá +kgl I E Kunggari +kgm I E Karipúna +kgn I L Karingani +kgo I L Krongo +kgp I L Kaingang +kgq I L Kamoro +kgr I L Abun +kgs I L Kumbainggar +kgt I L Somyev +kgu I L Kobol +kgv I L Karas +kgw I L Karon Dori +kgx I L Kamaru +kgy I L Kyerung +kha kha kha I L Khasi +khb I L Lü +khc I L Tukang Besi North +khd I L Bädi Kanum +khe I L Korowai +khf I L Khuen +khg I L Khams Tibetan +khh I L Kehu +khj I L Kuturmi +khk I L Halh Mongolian +khl I L Lusi +khm khm khm km I L Central Khmer +khn I L Khandesi +kho kho kho I A Khotanese +khp I L Kapori +khq I L Koyra Chiini Songhay +khr I L Kharia +khs I L Kasua +kht I L Khamti +khu I L Nkhumbi +khv I L Khvarshi +khw I L Khowar +khx I L Kanu +khy I L Kele (Democratic Republic of Congo) +khz I L Keapara +kia I L Kim +kib I L Koalib +kic I L Kickapoo +kid I L Koshin +kie I L Kibet +kif I L Eastern Parbate Kham +kig I L Kimaama +kih I L Kilmeri +kii I E Kitsai +kij I L Kilivila +kik kik kik ki I L Kikuyu +kil I L Kariya +kim I L Karagas +kin kin kin rw I L Kinyarwanda +kio I L Kiowa +kip I L Sheshi Kham +kiq I L Kosadle +kir kir kir ky I L Kirghiz +kis I L Kis +kit I L Agob +kiu I L Kirmanjki (individual language) +kiv I L Kimbu +kiw I L Northeast Kiwai +kix I L Khiamniungan Naga +kiy I L Kirikiri +kiz I L Kisi +kja I L Mlap +kjb I L Q'anjob'al +kjc I L Coastal Konjo +kjd I L Southern Kiwai +kje I L Kisar +kjf I L Khalaj +kjg I L Khmu +kjh I L Khakas +kji I L Zabana +kjj I L Khinalugh +kjk I L Highland Konjo +kjl I L Western Parbate Kham +kjm I L Kháng +kjn I L Kunjen +kjo I L Harijan Kinnauri +kjp I L Pwo Eastern Karen +kjq I L Western Keres +kjr I L Kurudu +kjs I L East Kewa +kjt I L Phrae Pwo Karen +kju I L Kashaya +kjx I L Ramopa +kjy I L Erave +kjz I L Bumthangkha +kka I L Kakanda +kkb I L Kwerisa +kkc I L Odoodee +kkd I L Kinuku +kke I L Kakabe +kkf I L Kalaktang Monpa +kkg I L Mabaka Valley Kalinga +kkh I L Khün +kki I L Kagulu +kkj I L Kako +kkk I L Kokota +kkl I L Kosarek Yale +kkm I L Kiong +kkn I L Kon Keu +kko I L Karko +kkp I L Gugubera +kkq I L Kaiku +kkr I L Kir-Balar +kks I L Giiwo +kkt I L Koi +kku I L Tumi +kkv I L Kangean +kkw I L Teke-Kukuya +kkx I L Kohin +kky I L Guguyimidjir +kkz I L Kaska +kla I E Klamath-Modoc +klb I L Kiliwa +klc I L Kolbila +kld I L Gamilaraay +kle I L Kulung (Nepal) +klf I L Kendeje +klg I L Tagakaulo +klh I L Weliki +kli I L Kalumpang +klj I L Turkic Khalaj +klk I L Kono (Nigeria) +kll I L Kagan Kalagan +klm I L Migum +kln M L Kalenjin +klo I L Kapya +klp I L Kamasa +klq I L Rumu +klr I L Khaling +kls I L Kalasha +klt I L Nukna +klu I L Klao +klv I L Maskelynes +klw I L Lindu +klx I L Koluwawa +kly I L Kalao +klz I L Kabola +kma I L Konni +kmb kmb kmb I L Kimbundu +kmc I L Southern Dong +kmd I L Majukayang Kalinga +kme I L Bakole +kmf I L Kare (Papua New Guinea) +kmg I L Kâte +kmh I L Kalam +kmi I L Kami (Nigeria) +kmj I L Kumarbhag Paharia +kmk I L Limos Kalinga +kml I L Tanudan Kalinga +kmm I L Kom (India) +kmn I L Awtuw +kmo I L Kwoma +kmp I L Gimme +kmq I L Kwama +kmr I L Northern Kurdish +kms I L Kamasau +kmt I L Kemtuik +kmu I L Kanite +kmv I L Karipúna Creole French +kmw I L Komo (Democratic Republic of Congo) +kmx I L Waboda +kmy I L Koma +kmz I L Khorasani Turkish +kna I L Dera (Nigeria) +knb I L Lubuagan Kalinga +knc I L Central Kanuri +knd I L Konda +kne I L Kankanaey +knf I L Mankanya +kng I L Koongo +kni I L Kanufi +knj I L Western Kanjobal +knk I L Kuranko +knl I L Keninjal +knm I L Kanamarí +knn I L Konkani (individual language) +kno I L Kono (Sierra Leone) +knp I L Kwanja +knq I L Kintaq +knr I L Kaningra +kns I L Kensiu +knt I L Panoan Katukína +knu I L Kono (Guinea) +knv I L Tabo +knw I L Kung-Ekoka +knx I L Kendayan +kny I L Kanyok +knz I L Kalamsé +koa I L Konomala +koc I E Kpati +kod I L Kodi +koe I L Kacipo-Balesi +kof I E Kubi +kog I L Cogui +koh I L Koyo +koi I L Komi-Permyak +koj I L Sara Dunjo +kok kok kok M L Konkani (macrolanguage) +kol I L Kol (Papua New Guinea) +kom kom kom kv M L Komi +kon kon kon kg M L Kongo +koo I L Konzo +kop I L Waube +koq I L Kota (Gabon) +kor kor kor ko I L Korean +kos kos kos I L Kosraean +kot I L Lagwan +kou I L Koke +kov I L Kudu-Camo +kow I L Kugama +kox I E Coxima +koy I L Koyukon +koz I L Korak +kpa I L Kutto +kpb I L Mullu Kurumba +kpc I L Curripaco +kpd I L Koba +kpe kpe kpe M L Kpelle +kpf I L Komba +kpg I L Kapingamarangi +kph I L Kplang +kpi I L Kofei +kpj I L Karajá +kpk I L Kpan +kpl I L Kpala +kpm I L Koho +kpn I E Kepkiriwát +kpo I L Ikposo +kpq I L Korupun-Sela +kpr I L Korafe-Yegha +kps I L Tehit +kpt I L Karata +kpu I L Kafoa +kpv I L Komi-Zyrian +kpw I L Kobon +kpx I L Mountain Koiali +kpy I L Koryak +kpz I L Kupsabiny +kqa I L Mum +kqb I L Kovai +kqc I L Doromu-Koki +kqd I L Koy Sanjaq Surat +kqe I L Kalagan +kqf I L Kakabai +kqg I L Khe +kqh I L Kisankasa +kqi I L Koitabu +kqj I L Koromira +kqk I L Kotafon Gbe +kql I L Kyenele +kqm I L Khisa +kqn I L Kaonde +kqo I L Eastern Krahn +kqp I L Kimré +kqq I L Krenak +kqr I L Kimaragang +kqs I L Northern Kissi +kqt I L Klias River Kadazan +kqu I E Seroa +kqv I L Okolod +kqw I L Kandas +kqx I L Mser +kqy I L Koorete +kqz I E Korana +kra I L Kumhali +krb I E Karkin +krc krc krc I L Karachay-Balkar +krd I L Kairui-Midiki +kre I L Panará +krf I L Koro (Vanuatu) +krh I L Kurama +kri I L Krio +krj I L Kinaray-A +krk I E Kerek +krl krl krl I L Karelian +krm I L Krim +krn I L Sapo +krp I L Korop +krr I L Kru'ng 2 +krs I L Gbaya (Sudan) +krt I L Tumari Kanuri +kru kru kru I L Kurukh +krv I L Kavet +krw I L Western Krahn +krx I L Karon +kry I L Kryts +krz I L Sota Kanum +ksa I L Shuwa-Zamani +ksb I L Shambala +ksc I L Southern Kalinga +ksd I L Kuanua +kse I L Kuni +ksf I L Bafia +ksg I L Kusaghe +ksh I L Kölsch +ksi I L Krisa +ksj I L Uare +ksk I L Kansa +ksl I L Kumalu +ksm I L Kumba +ksn I L Kasiguranin +kso I L Kofa +ksp I L Kaba +ksq I L Kwaami +ksr I L Borong +kss I L Southern Kisi +kst I L Winyé +ksu I L Khamyang +ksv I L Kusu +ksw I L S'gaw Karen +ksx I L Kedang +ksy I L Kharia Thar +ksz I L Kodaku +kta I L Katua +ktb I L Kambaata +ktc I L Kholok +ktd I L Kokata +kte I L Nubri +ktf I L Kwami +ktg I E Kalkutung +kth I L Karanga +kti I L North Muyu +ktj I L Plapo Krumen +ktk I E Kaniet +ktl I L Koroshi +ktm I L Kurti +ktn I L Karitiâna +kto I L Kuot +ktp I L Kaduo +ktq I E Katabaga +ktr I L Kota Marudu Tinagas +kts I L South Muyu +ktt I L Ketum +ktu I L Kituba (Democratic Republic of Congo) +ktv I L Eastern Katu +ktw I E Kato +ktx I L Kaxararí +kty I L Kango (Bas-Uélé District) +ktz I L Ju/'hoan +kua kua kua kj I L Kuanyama +kub I L Kutep +kuc I L Kwinsu +kud I L 'Auhelawa +kue I L Kuman +kuf I L Western Katu +kug I L Kupa +kuh I L Kushi +kui I L Kuikúro-Kalapálo +kuj I L Kuria +kuk I L Kepo' +kul I L Kulere +kum kum kum I L Kumyk +kun I L Kunama +kuo I L Kumukio +kup I L Kunimaipa +kuq I L Karipuna +kur kur kur ku M L Kurdish +kus I L Kusaal +kut kut kut I L Kutenai +kuu I L Upper Kuskokwim +kuv I L Kur +kuw I L Kpagua +kux I L Kukatja +kuy I L Kuuku-Ya'u +kuz I E Kunza +kva I L Bagvalal +kvb I L Kubu +kvc I L Kove +kvd I L Kui (Indonesia) +kve I L Kalabakan +kvf I L Kabalai +kvg I L Kuni-Boazi +kvh I L Komodo +kvi I L Kwang +kvj I L Psikye +kvk I L Korean Sign Language +kvl I L Kayaw +kvm I L Kendem +kvn I L Border Kuna +kvo I L Dobel +kvp I L Kompane +kvq I L Geba Karen +kvr I L Kerinci +kvs I L Kunggara +kvt I L Lahta Karen +kvu I L Yinbaw Karen +kvv I L Kola +kvw I L Wersing +kvx I L Parkari Koli +kvy I L Yintale Karen +kvz I L Tsakwambo +kwa I L Dâw +kwb I L Kwa +kwc I L Likwala +kwd I L Kwaio +kwe I L Kwerba +kwf I L Kwara'ae +kwg I L Sara Kaba Deme +kwh I L Kowiai +kwi I L Awa-Cuaiquer +kwj I L Kwanga +kwk I L Kwakiutl +kwl I L Kofyar +kwm I L Kwambi +kwn I L Kwangali +kwo I L Kwomtari +kwp I L Kodia +kwq I L Kwak +kwr I L Kwer +kws I L Kwese +kwt I L Kwesten +kwu I L Kwakum +kwv I L Sara Kaba Náà +kww I L Kwinti +kwx I L Khirwar +kwy I L San Salvador Kongo +kwz I E Kwadi +kxa I L Kairiru +kxb I L Krobu +kxc I L Konso +kxd I L Brunei +kxe I L Kakihum +kxf I L Manumanaw Karen +kxh I L Karo (Ethiopia) +kxi I L Keningau Murut +kxj I L Kulfa +kxk I L Zayein Karen +kxl I L Nepali Kurux +kxm I L Northern Khmer +kxn I L Kanowit-Tanjong Melanau +kxo I E Kanoé +kxp I L Wadiyara Koli +kxq I L Smärky Kanum +kxr I L Koro (Papua New Guinea) +kxs I L Kangjia +kxt I L Koiwat +kxu I L Kui (India) +kxv I L Kuvi +kxw I L Konai +kxx I L Likuba +kxy I L Kayong +kxz I L Kerewo +kya I L Kwaya +kyb I L Butbut Kalinga +kyc I L Kyaka +kyd I L Karey +kye I L Krache +kyf I L Kouya +kyg I L Keyagana +kyh I L Karok +kyi I L Kiput +kyj I L Karao +kyk I L Kamayo +kyl I L Kalapuya +kym I L Kpatili +kyn I L Northern Binukidnon +kyo I L Kelon +kyp I L Kang +kyq I L Kenga +kyr I L Kuruáya +kys I L Baram Kayan +kyt I L Kayagar +kyu I L Western Kayah +kyv I L Kayort +kyw I L Kudmali +kyx I L Rapoisi +kyy I L Kambaira +kyz I L Kayabí +kza I L Western Karaboro +kzb I L Kaibobo +kzc I L Bondoukou Kulango +kzd I L Kadai +kze I L Kosena +kzf I L Da'a Kaili +kzg I L Kikai +kzi I L Kelabit +kzj I L Coastal Kadazan +kzk I E Kazukuru +kzl I L Kayeli +kzm I L Kais +kzn I L Kokola +kzo I L Kaningi +kzp I L Kaidipang +kzq I L Kaike +kzr I L Karang +kzs I L Sugut Dusun +kzt I L Tambunan Dusun +kzu I L Kayupulau +kzv I L Komyandaret +kzw I E Karirí-Xocó +kzx I L Kamarian +kzy I L Kango (Tshopo District) +kzz I L Kalabra +laa I L Southern Subanen +lab I A Linear A +lac I L Lacandon +lad lad lad I L Ladino +lae I L Pattani +laf I L Lafofa +lag I L Langi +lah lah lah M L Lahnda +lai I L Lambya +laj I L Lango (Uganda) +lak I L Laka (Nigeria) +lal I L Lalia +lam lam lam I L Lamba +lan I L Laru +lao lao lao lo I L Lao +lap I L Laka (Chad) +laq I L Qabiao +lar I L Larteh +las I L Lama (Togo) +lat lat lat la I A Latin +lau I L Laba +lav lav lav lv M L Latvian +law I L Lauje +lax I L Tiwa +lay I L Lama (Myanmar) +laz I E Aribwatsa +lba I E Lui +lbb I L Label +lbc I L Lakkia +lbe I L Lak +lbf I L Tinani +lbg I L Laopang +lbi I L La'bi +lbj I L Ladakhi +lbk I L Central Bontok +lbl I L Libon Bikol +lbm I L Lodhi +lbn I L Lamet +lbo I L Laven +lbq I L Wampar +lbr I L Lohorung +lbs I L Libyan Sign Language +lbt I L Lachi +lbu I L Labu +lbv I L Lavatbura-Lamusong +lbw I L Tolaki +lbx I L Lawangan +lby I E Lamu-Lamu +lbz I L Lardil +lcc I L Legenyem +lcd I L Lola +lce I L Loncong +lcf I L Lubu +lch I L Luchazi +lcl I L Lisela +lcm I L Tungag +lcp I L Western Lawa +lcq I L Luhu +lcs I L Lisabata-Nuniali +lda I L Kla-Dan +ldb I L Dũya +ldd I L Luri +ldg I L Lenyima +ldh I L Lamja-Dengsa-Tola +ldi I L Laari +ldj I L Lemoro +ldk I L Leelau +ldl I L Kaan +ldm I L Landoma +ldn I C Láadan +ldo I L Loo +ldp I L Tso +ldq I L Lufu +lea I L Lega-Shabunda +leb I L Lala-Bisa +lec I L Leco +led I L Lendu +lee I L Lyélé +lef I L Lelemi +leg I L Lengua +leh I L Lenje +lei I L Lemio +lej I L Lengola +lek I L Leipon +lel I L Lele (Democratic Republic of Congo) +lem I L Nomaande +len I E Lenca +leo I L Leti (Cameroon) +lep I L Lepcha +leq I L Lembena +ler I L Lenkau +les I L Lese +let I L Lesing-Gelimi +leu I L Kara (Papua New Guinea) +lev I L Lamma +lew I L Ledo Kaili +lex I L Luang +ley I L Lemolang +lez lez lez I L Lezghian +lfa I L Lefa +lfn I C Lingua Franca Nova +lga I L Lungga +lgb I L Laghu +lgg I L Lugbara +lgh I L Laghuu +lgi I L Lengilu +lgk I L Lingarak +lgl I L Wala +lgm I L Lega-Mwenga +lgn I L Opuuo +lgq I L Logba +lgr I L Lengo +lgt I L Pahi +lgu I L Longgu +lgz I L Ligenza +lha I L Laha (Viet Nam) +lhh I L Laha (Indonesia) +lhi I L Lahu Shi +lhl I L Lahul Lohar +lhm I L Lhomi +lhn I L Lahanan +lhp I L Lhokpu +lhs I E Mlahsö +lht I L Lo-Toga +lhu I L Lahu +lia I L West-Central Limba +lib I L Likum +lic I L Hlai +lid I L Nyindrou +lie I L Likila +lif I L Limbu +lig I L Ligbi +lih I L Lihir +lii I L Lingkhim +lij I L Ligurian +lik I L Lika +lil I L Lillooet +lim lim lim li I L Limburgan +lin lin lin ln I L Lingala +lio I L Liki +lip I L Sekpele +liq I L Libido +lir I L Liberian English +lis I L Lisu +lit lit lit lt I L Lithuanian +liu I L Logorik +liv I L Liv +liw I L Col +lix I L Liabuku +liy I L Banda-Bambari +liz I L Libinza +lja I E Golpa +lje I L Rampi +lji I L Laiyolo +ljl I L Li'o +ljp I L Lampung Api +ljw I L Yirandali +ljx I E Yuru +lka I L Lakalei +lkb I L Kabras +lkc I L Kucong +lkd I L Lakondê +lke I L Kenyi +lkh I L Lakha +lki I L Laki +lkj I L Remun +lkl I L Laeko-Libuat +lkm I E Kalaamaya +lkn I L Lakon +lko I L Khayo +lkr I L Päri +lks I L Kisa +lkt I L Lakota +lku I E Kungkari +lky I L Lokoya +lla I L Lala-Roba +llb I L Lolo +llc I L Lele (Guinea) +lld I L Ladin +lle I L Lele (Papua New Guinea) +llf I E Hermit +llg I L Lole +llh I L Lamu +lli I L Teke-Laali +llj I E Ladji Ladji +llk I E Lelak +lll I L Lilau +llm I L Lasalimu +lln I L Lele (Chad) +llo I L Khlor +llp I L North Efate +llq I L Lolak +lls I L Lithuanian Sign Language +llu I L Lau +llx I L Lauan +lma I L East Limba +lmb I L Merei +lmc I E Limilngan +lmd I L Lumun +lme I L Pévé +lmf I L South Lembata +lmg I L Lamogai +lmh I L Lambichhong +lmi I L Lombi +lmj I L West Lembata +lmk I L Lamkang +lml I L Hano +lmm I L Lamam +lmn I L Lambadi +lmo I L Lombard +lmp I L Limbum +lmq I L Lamatuka +lmr I L Lamalera +lmu I L Lamenu +lmv I L Lomaiviti +lmw I L Lake Miwok +lmx I L Laimbue +lmy I L Lamboya +lmz I E Lumbee +lna I L Langbashe +lnb I L Mbalanhu +lnd I L Lundayeh +lng I A Langobardic +lnh I L Lanoh +lni I L Daantanai' +lnj I E Leningitij +lnl I L South Central Banda +lnm I L Langam +lnn I L Lorediakarkar +lno I L Lango (Sudan) +lns I L Lamnso' +lnu I L Longuda +lnw I E Lanima +lnz I L Lonzo +loa I L Loloda +lob I L Lobi +loc I L Inonhan +loe I L Saluan +lof I L Logol +log I L Logo +loh I L Narim +loi I L Loma (Côte d'Ivoire) +loj I L Lou +lok I L Loko +lol lol lol I L Mongo +lom I L Loma (Liberia) +lon I L Malawi Lomwe +loo I L Lombo +lop I L Lopa +loq I L Lobala +lor I L Téén +los I L Loniu +lot I L Otuho +lou I L Louisiana Creole French +lov I L Lopi +low I L Tampias Lobu +lox I L Loun +loy I L Loke +loz loz loz I L Lozi +lpa I L Lelepa +lpe I L Lepki +lpn I L Long Phuri Naga +lpo I L Lipo +lpx I L Lopit +lra I L Rara Bakati' +lrc I L Northern Luri +lre I E Laurentian +lrg I E Laragia +lri I L Marachi +lrk I L Loarki +lrl I L Lari +lrm I L Marama +lrn I L Lorang +lro I L Laro +lrr I L Southern Yamphu +lrt I L Larantuka Malay +lrv I L Larevat +lrz I L Lemerig +lsa I L Lasgerdi +lsd I L Lishana Deni +lse I L Lusengo +lsg I L Lyons Sign Language +lsh I L Lish +lsi I L Lashi +lsl I L Latvian Sign Language +lsm I L Saamia +lso I L Laos Sign Language +lsp I L Panamanian Sign Language +lsr I L Aruop +lss I L Lasi +lst I L Trinidad and Tobago Sign Language +lsy I L Mauritian Sign Language +ltc I H Late Middle Chinese +ltg I L Latgalian +lti I L Leti (Indonesia) +ltn I L Latundê +lto I L Tsotso +lts I L Tachoni +ltu I L Latu +ltz ltz ltz lb I L Luxembourgish +lua lua lua I L Luba-Lulua +lub lub lub lu I L Luba-Katanga +luc I L Aringa +lud I L Ludian +lue I L Luvale +luf I L Laua +lug lug lug lg I L Ganda +lui lui lui I L Luiseno +luj I L Luna +luk I L Lunanakha +lul I L Olu'bo +lum I L Luimbi +lun lun lun I L Lunda +luo luo luo I L Luo (Kenya and Tanzania) +lup I L Lumbu +luq I L Lucumi +lur I L Laura +lus lus lus I L Lushai +lut I L Lushootseed +luu I L Lumba-Yakkha +luv I L Luwati +luw I L Luo (Cameroon) +luy M L Luyia +luz I L Southern Luri +lva I L Maku'a +lvk I L Lavukaleve +lvs I L Standard Latvian +lvu I L Levuka +lwa I L Lwalu +lwe I L Lewo Eleng +lwg I L Wanga +lwh I L White Lachi +lwl I L Eastern Lawa +lwm I L Laomian +lwo I L Luwo +lwt I L Lewotobi +lwu I L Lawu +lww I L Lewo +lya I L Layakha +lyg I L Lyngngam +lyn I L Luyana +lzh I H Literary Chinese +lzl I L Litzlitz +lzn I L Leinong Naga +lzz I L Laz +maa I L San Jerónimo Tecóatl Mazatec +mab I L Yutanduchi Mixtec +mad mad mad I L Madurese +mae I L Bo-Rukul +maf I L Mafa +mag mag mag I L Magahi +mah mah mah mh I L Marshallese +mai mai mai I L Maithili +maj I L Jalapa De Díaz Mazatec +mak mak mak I L Makasar +mal mal mal ml I L Malayalam +mam I L Mam +man man man M L Mandingo +maq I L Chiquihuitlán Mazatec +mar mar mar mr I L Marathi +mas mas mas I L Masai +mat I L San Francisco Matlatzinca +mau I L Huautla Mazatec +mav I L Sateré-Mawé +maw I L Mampruli +max I L North Moluccan Malay +maz I L Central Mazahua +mba I L Higaonon +mbb I L Western Bukidnon Manobo +mbc I L Macushi +mbd I L Dibabawon Manobo +mbe I E Molale +mbf I L Baba Malay +mbh I L Mangseng +mbi I L Ilianen Manobo +mbj I L Nadëb +mbk I L Malol +mbl I L Maxakalí +mbm I L Ombamba +mbn I L Macaguán +mbo I L Mbo (Cameroon) +mbp I L Malayo +mbq I L Maisin +mbr I L Nukak Makú +mbs I L Sarangani Manobo +mbt I L Matigsalug Manobo +mbu I L Mbula-Bwazza +mbv I L Mbulungish +mbw I L Maring +mbx I L Mari (East Sepik Province) +mby I L Memoni +mbz I L Amoltepec Mixtec +mca I L Maca +mcb I L Machiguenga +mcc I L Bitur +mcd I L Sharanahua +mce I L Itundujia Mixtec +mcf I L Matsés +mcg I L Mapoyo +mch I L Maquiritari +mci I L Mese +mcj I L Mvanip +mck I L Mbunda +mcl I E Macaguaje +mcm I L Malaccan Creole Portuguese +mcn I L Masana +mco I L Coatlán Mixe +mcp I L Makaa +mcq I L Ese +mcr I L Menya +mcs I L Mambai +mct I L Mengisa +mcu I L Cameroon Mambila +mcv I L Minanibai +mcw I L Mawa (Chad) +mcx I L Mpiemo +mcy I L South Watut +mcz I L Mawan +mda I L Mada (Nigeria) +mdb I L Morigi +mdc I L Male (Papua New Guinea) +mdd I L Mbum +mde I L Maba (Chad) +mdf mdf mdf I L Moksha +mdg I L Massalat +mdh I L Maguindanaon +mdi I L Mamvu +mdj I L Mangbetu +mdk I L Mangbutu +mdl I L Maltese Sign Language +mdm I L Mayogo +mdn I L Mbati +mdp I L Mbala +mdq I L Mbole +mdr mdr mdr I L Mandar +mds I L Maria (Papua New Guinea) +mdt I L Mbere +mdu I L Mboko +mdv I L Santa Lucía Monteverde Mixtec +mdw I L Mbosi +mdx I L Dizin +mdy I L Male (Ethiopia) +mdz I L Suruí Do Pará +mea I L Menka +meb I L Ikobi +mec I L Mara +med I L Melpa +mee I L Mengen +mef I L Megam +meh I L Southwestern Tlaxiaco Mixtec +mei I L Midob +mej I L Meyah +mek I L Mekeo +mel I L Central Melanau +mem I E Mangala +men men men I L Mende (Sierra Leone) +meo I L Kedah Malay +mep I L Miriwung +meq I L Merey +mer I L Meru +mes I L Masmaje +met I L Mato +meu I L Motu +mev I L Mano +mew I L Maaka +mey I L Hassaniyya +mez I L Menominee +mfa I L Pattani Malay +mfb I L Bangka +mfc I L Mba +mfd I L Mendankwe-Nkwen +mfe I L Morisyen +mff I L Naki +mfg I L Mogofin +mfh I L Matal +mfi I L Wandala +mfj I L Mefele +mfk I L North Mofu +mfl I L Putai +mfm I L Marghi South +mfn I L Cross River Mbembe +mfo I L Mbe +mfp I L Makassar Malay +mfq I L Moba +mfr I L Marithiel +mfs I L Mexican Sign Language +mft I L Mokerang +mfu I L Mbwela +mfv I L Mandjak +mfw I E Mulaha +mfx I L Melo +mfy I L Mayo +mfz I L Mabaan +mga mga mga I H Middle Irish (900-1200) +mgb I L Mararit +mgc I L Morokodo +mgd I L Moru +mge I L Mango +mgf I L Maklew +mgg I L Mpumpong +mgh I L Makhuwa-Meetto +mgi I L Lijili +mgj I L Abureni +mgk I L Mawes +mgl I L Maleu-Kilenge +mgm I L Mambae +mgn I L Mbangi +mgo I L Meta' +mgp I L Eastern Magar +mgq I L Malila +mgr I L Mambwe-Lungu +mgs I L Manda (Tanzania) +mgt I L Mongol +mgu I L Mailu +mgv I L Matengo +mgw I L Matumbi +mgy I L Mbunga +mgz I L Mbugwe +mha I L Manda (India) +mhb I L Mahongwe +mhc I L Mocho +mhd I L Mbugu +mhe I L Besisi +mhf I L Mamaa +mhg I L Margu +mhh I L Maskoy Pidgin +mhi I L Ma'di +mhj I L Mogholi +mhk I L Mungaka +mhl I L Mauwake +mhm I L Makhuwa-Moniga +mhn I L Mócheno +mho I L Mashi (Zambia) +mhp I L Balinese Malay +mhq I L Mandan +mhr I L Eastern Mari +mhs I L Buru (Indonesia) +mht I L Mandahuaca +mhu I L Digaro-Mishmi +mhw I L Mbukushu +mhx I L Maru +mhy I L Ma'anyan +mhz I L Mor (Mor Islands) +mia I L Miami +mib I L Atatláhuca Mixtec +mic mic mic I L Mi'kmaq +mid I L Mandaic +mie I L Ocotepec Mixtec +mif I L Mofu-Gudur +mig I L San Miguel El Grande Mixtec +mih I L Chayuco Mixtec +mii I L Chigmecatitlán Mixtec +mij I L Abar +mik I L Mikasuki +mil I L Peñoles Mixtec +mim I L Alacatlatzala Mixtec +min min min I L Minangkabau +mio I L Pinotepa Nacional Mixtec +mip I L Apasco-Apoala Mixtec +miq I L Mískito +mir I L Isthmus Mixe +mis mis mis S S Uncoded languages +mit I L Southern Puebla Mixtec +miu I L Cacaloxtepec Mixtec +miw I L Akoye +mix I L Mixtepec Mixtec +miy I L Ayutla Mixtec +miz I L Coatzospan Mixtec +mjc I L San Juan Colorado Mixtec +mjd I L Northwest Maidu +mje I E Muskum +mjg I L Tu +mjh I L Mwera (Nyasa) +mji I L Kim Mun +mjj I L Mawak +mjk I L Matukar +mjl I L Mandeali +mjm I L Medebur +mjn I L Ma (Papua New Guinea) +mjo I L Malankuravan +mjp I L Malapandaram +mjq I E Malaryan +mjr I L Malavedan +mjs I L Miship +mjt I L Sauria Paharia +mju I L Manna-Dora +mjv I L Mannan +mjw I L Karbi +mjx I L Mahali +mjy I E Mahican +mjz I L Majhi +mka I L Mbre +mkb I L Mal Paharia +mkc I L Siliput +mkd mac mkd mk I L Macedonian +mke I L Mawchi +mkf I L Miya +mkg I L Mak (China) +mki I L Dhatki +mkj I L Mokilese +mkk I L Byep +mkl I L Mokole +mkm I L Moklen +mkn I L Kupang Malay +mko I L Mingang Doso +mkp I L Moikodi +mkq I E Bay Miwok +mkr I L Malas +mks I L Silacayoapan Mixtec +mkt I L Vamale +mku I L Konyanka Maninka +mkv I L Mafea +mkw I L Kituba (Congo) +mkx I L Kinamiging Manobo +mky I L East Makian +mkz I L Makasae +mla I L Malo +mlb I L Mbule +mlc I L Cao Lan +mle I L Manambu +mlf I L Mal +mlg mlg mlg mg M L Malagasy +mlh I L Mape +mli I L Malimpung +mlj I L Miltu +mlk I L Ilwana +mll I L Malua Bay +mlm I L Mulam +mln I L Malango +mlo I L Mlomp +mlp I L Bargam +mlq I L Western Maninkakan +mlr I L Vame +mls I L Masalit +mlt mlt mlt mt I L Maltese +mlu I L To'abaita +mlv I L Motlav +mlw I L Moloko +mlx I L Malfaxal +mlz I L Malaynon +mma I L Mama +mmb I L Momina +mmc I L Michoacán Mazahua +mmd I L Maonan +mme I L Mae +mmf I L Mundat +mmg I L North Ambrym +mmh I L Mehináku +mmi I L Musar +mmj I L Majhwar +mmk I L Mukha-Dora +mml I L Man Met +mmm I L Maii +mmn I L Mamanwa +mmo I L Mangga Buang +mmp I L Siawi +mmq I L Musak +mmr I L Western Xiangxi Miao +mmt I L Malalamai +mmu I L Mmaala +mmv I E Miriti +mmw I L Emae +mmx I L Madak +mmy I L Migaama +mmz I L Mabaale +mna I L Mbula +mnb I L Muna +mnc mnc mnc I L Manchu +mnd I L Mondé +mne I L Naba +mnf I L Mundani +mng I L Eastern Mnong +mnh I L Mono (Democratic Republic of Congo) +mni mni mni I L Manipuri +mnj I L Munji +mnk I L Mandinka +mnl I L Tiale +mnm I L Mapena +mnn I L Southern Mnong +mnp I L Min Bei Chinese +mnq I L Minriq +mnr I L Mono (USA) +mns I L Mansi +mnu I L Mer +mnv I L Rennell-Bellona +mnw I L Mon +mnx I L Manikion +mny I L Manyawa +mnz I L Moni +moa I L Mwan +moc I L Mocoví +mod I E Mobilian +moe I L Montagnais +mog I L Mongondow +moh moh moh I L Mohawk +moi I L Mboi +moj I L Monzombo +mok I L Morori +mom I E Mangue +mon mon mon mn M L Mongolian +moo I L Monom +mop I L Mopán Maya +moq I L Mor (Bomberai Peninsula) +mor I L Moro +mos mos mos I L Mossi +mot I L Barí +mou I L Mogum +mov I L Mohave +mow I L Moi (Congo) +mox I L Molima +moy I L Shekkacho +moz I L Mukulu +mpa I L Mpoto +mpb I L Mullukmulluk +mpc I L Mangarayi +mpd I L Machinere +mpe I L Majang +mpg I L Marba +mph I L Maung +mpi I L Mpade +mpj I L Martu Wangka +mpk I L Mbara (Chad) +mpl I L Middle Watut +mpm I L Yosondúa Mixtec +mpn I L Mindiri +mpo I L Miu +mpp I L Migabac +mpq I L Matís +mpr I L Vangunu +mps I L Dadibi +mpt I L Mian +mpu I L Makuráp +mpv I L Mungkip +mpw I L Mapidian +mpx I L Misima-Panaeati +mpy I L Mapia +mpz I L Mpi +mqa I L Maba (Indonesia) +mqb I L Mbuko +mqc I L Mangole +mqe I L Matepi +mqf I L Momuna +mqg I L Kota Bangun Kutai Malay +mqh I L Tlazoyaltepec Mixtec +mqi I L Mariri +mqj I L Mamasa +mqk I L Rajah Kabunsuwan Manobo +mql I L Mbelime +mqm I L South Marquesan +mqn I L Moronene +mqo I L Modole +mqp I L Manipa +mqq I L Minokok +mqr I L Mander +mqs I L West Makian +mqt I L Mok +mqu I L Mandari +mqv I L Mosimo +mqw I L Murupi +mqx I L Mamuju +mqy I L Manggarai +mqz I L Pano +mra I L Mlabri +mrb I L Marino +mrc I L Maricopa +mrd I L Western Magar +mre I E Martha's Vineyard Sign Language +mrf I L Elseng +mrg I L Mising +mrh I L Mara Chin +mri mao mri mi I L Maori +mrj I L Western Mari +mrk I L Hmwaveke +mrl I L Mortlockese +mrm I L Merlav +mrn I L Cheke Holo +mro I L Mru +mrp I L Morouas +mrq I L North Marquesan +mrr I L Maria (India) +mrs I L Maragus +mrt I L Marghi Central +mru I L Mono (Cameroon) +mrv I L Mangareva +mrw I L Maranao +mrx I L Maremgi +mry I L Mandaya +mrz I L Marind +msa may msa ms M L Malay (macrolanguage) +msb I L Masbatenyo +msc I L Sankaran Maninka +msd I L Yucatec Maya Sign Language +mse I L Musey +msf I L Mekwei +msg I L Moraid +msh I L Masikoro Malagasy +msi I L Sabah Malay +msj I L Ma (Democratic Republic of Congo) +msk I L Mansaka +msl I L Molof +msm I L Agusan Manobo +msn I L Vurës +mso I L Mombum +msp I E Maritsauá +msq I L Caac +msr I L Mongolian Sign Language +mss I L West Masela +msu I L Musom +msv I L Maslam +msw I L Mansoanka +msx I L Moresada +msy I L Aruamu +msz I L Momare +mta I L Cotabato Manobo +mtb I L Anyin Morofo +mtc I L Munit +mtd I L Mualang +mte I L Mono (Solomon Islands) +mtf I L Murik (Papua New Guinea) +mtg I L Una +mth I L Munggui +mti I L Maiwa (Papua New Guinea) +mtj I L Moskona +mtk I L Mbe' +mtl I L Montol +mtm I E Mator +mtn I E Matagalpa +mto I L Totontepec Mixe +mtp I L Wichí Lhamtés Nocten +mtq I L Muong +mtr I L Mewari +mts I L Yora +mtt I L Mota +mtu I L Tututepec Mixtec +mtv I L Asaro'o +mtw I L Southern Binukidnon +mtx I L Tidaá Mixtec +mty I L Nabi +mua I L Mundang +mub I L Mubi +muc I L Ajumbu +mud I L Mednyj Aleut +mue I L Media Lengua +mug I L Musgu +muh I L Mündü +mui I L Musi +muj I L Mabire +muk I L Mugom +mul mul mul S S Multiple languages +mum I L Maiwala +muo I L Nyong +mup I L Malvi +muq I L Eastern Xiangxi Miao +mur I L Murle +mus mus mus I L Creek +mut I L Western Muria +muu I L Yaaku +muv I L Muthuvan +mux I L Bo-Ung +muy I L Muyang +muz I L Mursi +mva I L Manam +mvb I E Mattole +mvd I L Mamboru +mve I L Marwari (Pakistan) +mvf I L Peripheral Mongolian +mvg I L Yucuañe Mixtec +mvh I L Mulgi +mvi I L Miyako +mvk I L Mekmek +mvl I E Mbara (Australia) +mvm I L Muya +mvn I L Minaveha +mvo I L Marovo +mvp I L Duri +mvq I L Moere +mvr I L Marau +mvs I L Massep +mvt I L Mpotovoro +mvu I L Marfa +mvv I L Tagal Murut +mvw I L Machinga +mvx I L Meoswar +mvy I L Indus Kohistani +mvz I L Mesqan +mwa I L Mwatebu +mwb I L Juwal +mwc I L Are +mwe I L Mwera (Chimwera) +mwf I L Murrinh-Patha +mwg I L Aiklep +mwh I L Mouk-Aria +mwi I L Labo +mwj I L Maligo +mwk I L Kita Maninkakan +mwl mwl mwl I L Mirandese +mwm I L Sar +mwn I L Nyamwanga +mwo I L Central Maewo +mwp I L Kala Lagaw Ya +mwq I L Mün Chin +mwr mwr mwr M L Marwari +mws I L Mwimbi-Muthambi +mwt I L Moken +mwu I E Mittu +mwv I L Mentawai +mww I L Hmong Daw +mwx I L Mediak +mwy I L Mosiro +mwz I L Moingi +mxa I L Northwest Oaxaca Mixtec +mxb I L Tezoatlán Mixtec +mxc I L Manyika +mxd I L Modang +mxe I L Mele-Fila +mxf I L Malgbe +mxg I L Mbangala +mxh I L Mvuba +mxi I E Mozarabic +mxj I L Miju-Mishmi +mxk I L Monumbo +mxl I L Maxi Gbe +mxm I L Meramera +mxn I L Moi (Indonesia) +mxo I L Mbowe +mxp I L Tlahuitoltepec Mixe +mxq I L Juquila Mixe +mxr I L Murik (Malaysia) +mxs I L Huitepec Mixtec +mxt I L Jamiltepec Mixtec +mxu I L Mada (Cameroon) +mxv I L Metlatónoc Mixtec +mxw I L Namo +mxx I L Mahou +mxy I L Southeastern Nochixtlán Mixtec +mxz I L Central Masela +mya bur mya my I L Burmese +myb I L Mbay +myc I L Mayeka +myd I L Maramba +mye I L Myene +myf I L Bambassi +myg I L Manta +myh I L Makah +myi I L Mina (India) +myj I L Mangayat +myk I L Mamara Senoufo +myl I L Moma +mym I L Me'en +myo I L Anfillo +myp I L Pirahã +myr I L Muniche +mys I E Mesmes +myu I L Mundurukú +myv myv myv I L Erzya +myw I L Muyuw +myx I L Masaaba +myy I L Macuna +myz I H Classical Mandaic +mza I L Santa María Zacatepec Mixtec +mzb I L Tumzabt +mzc I L Madagascar Sign Language +mzd I L Malimba +mze I L Morawa +mzg I L Monastic Sign Language +mzh I L Wichí Lhamtés Güisnay +mzi I L Ixcatlán Mazatec +mzj I L Manya +mzk I L Nigeria Mambila +mzl I L Mazatlán Mixe +mzm I L Mumuye +mzn I L Mazanderani +mzo I E Matipuhy +mzp I L Movima +mzq I L Mori Atas +mzr I L Marúbo +mzs I L Macanese +mzt I L Mintil +mzu I L Inapang +mzv I L Manza +mzw I L Deg +mzx I L Mawayana +mzy I L Mozambican Sign Language +mzz I L Maiadomu +naa I L Namla +nab I L Southern Nambikuára +nac I L Narak +nad I L Nijadali +nae I L Naka'ela +naf I L Nabak +nag I L Naga Pidgin +naj I L Nalu +nak I L Nakanai +nal I L Nalik +nam I L Ngan'gityemerri +nan I L Min Nan Chinese +nao I L Naaba +nap nap nap I L Neapolitan +naq I L Nama (Namibia) +nar I L Iguta +nas I L Naasioi +nat I L Hungworo +nau nau nau na I L Nauru +nav nav nav nv I L Navajo +naw I L Nawuri +nax I L Nakwi +nay I E Narrinyeri +naz I L Coatepec Nahuatl +nba I L Nyemba +nbb I L Ndoe +nbc I L Chang Naga +nbd I L Ngbinda +nbe I L Konyak Naga +nbg I L Nagarchal +nbh I L Ngamo +nbi I L Mao Naga +nbj I L Ngarinman +nbk I L Nake +nbl nbl nbl nr I L South Ndebele +nbm I L Ngbaka Ma'bo +nbn I L Kuri +nbo I L Nkukoli +nbp I L Nnam +nbq I L Nggem +nbr I L Numana-Nunku-Gbantu-Numbu +nbs I L Namibian Sign Language +nbt I L Na +nbu I L Rongmei Naga +nbv I L Ngamambo +nbw I L Southern Ngbandi +nby I L Ningera +nca I L Iyo +ncb I L Central Nicobarese +ncc I L Ponam +ncd I L Nachering +nce I L Yale +ncf I L Notsi +ncg I L Nisga'a +nch I L Central Huasteca Nahuatl +nci I H Classical Nahuatl +ncj I L Northern Puebla Nahuatl +nck I L Nakara +ncl I L Michoacán Nahuatl +ncm I L Nambo +ncn I L Nauna +nco I L Sibe +ncp I L Ndaktup +ncr I L Ncane +ncs I L Nicaraguan Sign Language +nct I L Chothe Naga +ncu I L Chumburung +ncx I L Central Puebla Nahuatl +ncz I E Natchez +nda I L Ndasa +ndb I L Kenswei Nsei +ndc I L Ndau +ndd I L Nde-Nsele-Nta +nde nde nde nd I L North Ndebele +ndf I E Nadruvian +ndg I L Ndengereko +ndh I L Ndali +ndi I L Samba Leko +ndj I L Ndamba +ndk I L Ndaka +ndl I L Ndolo +ndm I L Ndam +ndn I L Ngundi +ndo ndo ndo ng I L Ndonga +ndp I L Ndo +ndq I L Ndombe +ndr I L Ndoola +nds nds nds I L Low German +ndt I L Ndunga +ndu I L Dugun +ndv I L Ndut +ndw I L Ndobo +ndx I L Nduga +ndy I L Lutos +ndz I L Ndogo +nea I L Eastern Ngad'a +neb I L Toura (Côte d'Ivoire) +nec I L Nedebang +ned I L Nde-Gbite +nee I L Nêlêmwa-Nixumwak +nef I L Nefamese +neg I L Negidal +neh I L Nyenkha +nei I A Neo-Hittite +nej I L Neko +nek I L Neku +nem I L Nemi +nen I L Nengone +neo I L Ná-Meo +nep nep nep ne M L Nepali (macrolanguage) +neq I L North Central Mixe +ner I L Yahadian +nes I L Bhoti Kinnauri +net I L Nete +neu I C Neo +nev I L Nyaheun +new new new I L Newari +nex I L Neme +ney I L Neyo +nez I L Nez Perce +nfa I L Dhao +nfd I L Ahwai +nfl I L Ayiwo +nfr I L Nafaanra +nfu I L Mfumte +nga I L Ngbaka +ngb I L Northern Ngbandi +ngc I L Ngombe (Democratic Republic of Congo) +ngd I L Ngando (Central African Republic) +nge I L Ngemba +ngg I L Ngbaka Manza +ngh I L N/u +ngi I L Ngizim +ngj I L Ngie +ngk I L Dalabon +ngl I L Lomwe +ngm I L Ngatik Men's Creole +ngn I L Ngwo +ngo I L Ngoni +ngp I L Ngulu +ngq I L Ngurimi +ngr I L Engdewu +ngs I L Gvoko +ngt I L Ngeq +ngu I L Guerrero Nahuatl +ngv I E Nagumi +ngw I L Ngwaba +ngx I L Nggwahyi +ngy I L Tibea +ngz I L Ngungwel +nha I L Nhanda +nhb I L Beng +nhc I E Tabasco Nahuatl +nhd I L Chiripá +nhe I L Eastern Huasteca Nahuatl +nhf I L Nhuwala +nhg I L Tetelcingo Nahuatl +nhh I L Nahari +nhi I L Zacatlán-Ahuacatlán-Tepetzintla Nahuatl +nhk I L Isthmus-Cosoleacaque Nahuatl +nhm I L Morelos Nahuatl +nhn I L Central Nahuatl +nho I L Takuu +nhp I L Isthmus-Pajapan Nahuatl +nhq I L Huaxcaleca Nahuatl +nhr I L Naro +nht I L Ometepec Nahuatl +nhu I L Noone +nhv I L Temascaltepec Nahuatl +nhw I L Western Huasteca Nahuatl +nhx I L Isthmus-Mecayapan Nahuatl +nhy I L Northern Oaxaca Nahuatl +nhz I L Santa María La Alta Nahuatl +nia nia nia I L Nias +nib I L Nakame +nid I E Ngandi +nie I L Niellim +nif I L Nek +nig I E Ngalakan +nih I L Nyiha (Tanzania) +nii I L Nii +nij I L Ngaju +nik I L Southern Nicobarese +nil I L Nila +nim I L Nilamba +nin I L Ninzo +nio I L Nganasan +niq I L Nandi +nir I L Nimboran +nis I L Nimi +nit I L Southeastern Kolami +niu niu niu I L Niuean +niv I L Gilyak +niw I L Nimo +nix I L Hema +niy I L Ngiti +niz I L Ningil +nja I L Nzanyi +njb I L Nocte Naga +njd I L Ndonde Hamba +njh I L Lotha Naga +nji I L Gudanji +njj I L Njen +njl I L Njalgulgule +njm I L Angami Naga +njn I L Liangmai Naga +njo I L Ao Naga +njr I L Njerep +njs I L Nisa +njt I L Ndyuka-Trio Pidgin +nju I L Ngadjunmaya +njx I L Kunyi +njy I L Njyem +njz I L Nyishi +nka I L Nkoya +nkb I L Khoibu Naga +nkc I L Nkongho +nkd I L Koireng +nke I L Duke +nkf I L Inpui Naga +nkg I L Nekgini +nkh I L Khezha Naga +nki I L Thangal Naga +nkj I L Nakai +nkk I L Nokuku +nkm I L Namat +nkn I L Nkangala +nko I L Nkonya +nkp I E Niuatoputapu +nkq I L Nkami +nkr I L Nukuoro +nks I L North Asmat +nkt I L Nyika (Tanzania) +nku I L Bouna Kulango +nkv I L Nyika (Malawi and Zambia) +nkw I L Nkutu +nkx I L Nkoroo +nkz I L Nkari +nla I L Ngombale +nlc I L Nalca +nld dut nld nl I L Dutch +nle I L East Nyala +nlg I L Gela +nli I L Grangali +nlj I L Nyali +nlk I L Ninia Yali +nll I L Nihali +nlo I L Ngul +nlq I L Lao Naga +nlu I L Nchumbulu +nlv I L Orizaba Nahuatl +nlw I E Walangama +nlx I L Nahali +nly I L Nyamal +nlz I L Nalögo +nma I L Maram Naga +nmb I L Big Nambas +nmc I L Ngam +nmd I L Ndumu +nme I L Mzieme Naga +nmf I L Tangkhul Naga (India) +nmg I L Kwasio +nmh I L Monsang Naga +nmi I L Nyam +nmj I L Ngombe (Central African Republic) +nmk I L Namakura +nml I L Ndemli +nmm I L Manangba +nmn I L !Xóõ +nmo I L Moyon Naga +nmp I E Nimanbur +nmq I L Nambya +nmr I E Nimbari +nms I L Letemboi +nmt I L Namonuito +nmu I L Northeast Maidu +nmv I E Ngamini +nmw I L Nimoa +nmx I L Nama (Papua New Guinea) +nmy I L Namuyi +nmz I L Nawdm +nna I L Nyangumarta +nnb I L Nande +nnc I L Nancere +nnd I L West Ambae +nne I L Ngandyera +nnf I L Ngaing +nng I L Maring Naga +nnh I L Ngiemboon +nni I L North Nuaulu +nnj I L Nyangatom +nnk I L Nankina +nnl I L Northern Rengma Naga +nnm I L Namia +nnn I L Ngete +nno nno nno nn I L Norwegian Nynorsk +nnp I L Wancho Naga +nnq I L Ngindo +nnr I E Narungga +nns I L Ningye +nnt I E Nanticoke +nnu I L Dwang +nnv I E Nugunu (Australia) +nnw I L Southern Nuni +nnx I L Ngong +nny I E Nyangga +nnz I L Nda'nda' +noa I L Woun Meu +nob nob nob nb I L Norwegian Bokmål +noc I L Nuk +nod I L Northern Thai +noe I L Nimadi +nof I L Nomane +nog nog nog I L Nogai +noh I L Nomu +noi I L Noiri +noj I L Nonuya +nok I E Nooksack +nol I E Nomlaki +nom I E Nocamán +non non non I H Old Norse +nop I L Numanggang +noq I L Ngongo +nor nor nor no M L Norwegian +nos I L Eastern Nisu +not I L Nomatsiguenga +nou I L Ewage-Notu +nov I C Novial +now I L Nyambo +noy I L Noy +noz I L Nayi +npa I L Nar Phu +npb I L Nupbikha +npg I L Ponyo-Gongwang Naga +nph I L Phom Naga +npi I L Nepali (individual language) +npl I L Southeastern Puebla Nahuatl +npn I L Mondropolon +npo I L Pochuri Naga +nps I L Nipsan +npu I L Puimei Naga +npy I L Napu +nqg I L Southern Nago +nqk I L Kura Ede Nago +nqm I L Ndom +nqn I L Nen +nqo nqo nqo I L N'Ko +nqq I L Kyan-Karyaw Naga +nqy I L Akyaung Ari Naga +nra I L Ngom +nrb I L Nara +nrc I A Noric +nre I L Southern Rengma Naga +nrg I L Narango +nri I L Chokri Naga +nrk I L Ngarla +nrl I L Ngarluma +nrm I L Narom +nrn I E Norn +nrp I A North Picene +nrr I E Norra +nrt I E Northern Kalapuya +nru I L Narua +nrx I E Ngurmbur +nrz I L Lala +nsa I L Sangtam Naga +nsc I L Nshi +nsd I L Southern Nisu +nse I L Nsenga +nsf I L Northwestern Nisu +nsg I L Ngasa +nsh I L Ngoshie +nsi I L Nigerian Sign Language +nsk I L Naskapi +nsl I L Norwegian Sign Language +nsm I L Sumi Naga +nsn I L Nehan +nso nso nso I L Pedi +nsp I L Nepalese Sign Language +nsq I L Northern Sierra Miwok +nsr I L Maritime Sign Language +nss I L Nali +nst I L Tase Naga +nsu I L Sierra Negra Nahuatl +nsv I L Southwestern Nisu +nsw I L Navut +nsx I L Nsongo +nsy I L Nasal +nsz I L Nisenan +nte I L Nathembo +ntg I E Ngantangarra +nti I L Natioro +ntj I L Ngaanyatjarra +ntk I L Ikoma-Nata-Isenye +ntm I L Nateni +nto I L Ntomba +ntp I L Northern Tepehuan +ntr I L Delo +nts I E Natagaimas +ntu I L Natügu +ntw I E Nottoway +ntx I L Tangkhul Naga (Myanmar) +nty I L Mantsi +ntz I L Natanzi +nua I L Yuanga +nuc I E Nukuini +nud I L Ngala +nue I L Ngundu +nuf I L Nusu +nug I E Nungali +nuh I L Ndunda +nui I L Ngumbi +nuj I L Nyole +nuk I L Nuu-chah-nulth +nul I L Nusa Laut +num I L Niuafo'ou +nun I L Anong +nuo I L Nguôn +nup I L Nupe-Nupe-Tako +nuq I L Nukumanu +nur I L Nukuria +nus I L Nuer +nut I L Nung (Viet Nam) +nuu I L Ngbundu +nuv I L Northern Nuni +nuw I L Nguluwan +nux I L Mehek +nuy I L Nunggubuyu +nuz I L Tlamacazapa Nahuatl +nvh I L Nasarian +nvm I L Namiae +nvo I L Nyokon +nwa I E Nawathinehena +nwb I L Nyabwa +nwc nwc nwc I H Classical Newari +nwe I L Ngwe +nwg I E Ngayawung +nwi I L Southwest Tanna +nwm I L Nyamusa-Molo +nwo I E Nauo +nwr I L Nawaru +nwx I H Middle Newar +nwy I E Nottoway-Meherrin +nxa I L Nauete +nxd I L Ngando (Democratic Republic of Congo) +nxe I L Nage +nxg I L Ngad'a +nxi I L Nindi +nxk I L Koki Naga +nxl I L South Nuaulu +nxm I A Numidian +nxn I E Ngawun +nxq I L Naxi +nxr I L Ninggerum +nxu I E Narau +nxx I L Nafri +nya nya nya ny I L Nyanja +nyb I L Nyangbo +nyc I L Nyanga-li +nyd I L Nyore +nye I L Nyengo +nyf I L Giryama +nyg I L Nyindu +nyh I L Nyigina +nyi I L Ama (Sudan) +nyj I L Nyanga +nyk I L Nyaneka +nyl I L Nyeu +nym nym nym I L Nyamwezi +nyn nyn nyn I L Nyankole +nyo nyo nyo I L Nyoro +nyp I E Nyang'i +nyq I L Nayini +nyr I L Nyiha (Malawi) +nys I L Nyunga +nyt I E Nyawaygi +nyu I L Nyungwe +nyv I E Nyulnyul +nyw I L Nyaw +nyx I E Nganyaywana +nyy I L Nyakyusa-Ngonde +nza I L Tigon Mbembe +nzb I L Njebi +nzi nzi nzi I L Nzima +nzk I L Nzakara +nzm I L Zeme Naga +nzs I L New Zealand Sign Language +nzu I L Teke-Nzikou +nzy I L Nzakambay +nzz I L Nanga Dama Dogon +oaa I L Orok +oac I L Oroch +oar I A Old Aramaic (up to 700 BCE) +oav I H Old Avar +obi I E Obispeño +obk I L Southern Bontok +obl I L Oblo +obm I A Moabite +obo I L Obo Manobo +obr I H Old Burmese +obt I H Old Breton +obu I L Obulom +oca I L Ocaina +och I A Old Chinese +oci oci oci oc I L Occitan (post 1500) +oco I H Old Cornish +ocu I L Atzingo Matlatzinca +oda I L Odut +odk I L Od +odt I H Old Dutch +odu I L Odual +ofo I E Ofo +ofs I H Old Frisian +ofu I L Efutop +ogb I L Ogbia +ogc I L Ogbah +oge I H Old Georgian +ogg I L Ogbogolo +ogo I L Khana +ogu I L Ogbronuagum +oht I A Old Hittite +ohu I H Old Hungarian +oia I L Oirata +oin I L Inebu One +ojb I L Northwestern Ojibwa +ojc I L Central Ojibwa +ojg I L Eastern Ojibwa +oji oji oji oj M L Ojibwa +ojp I H Old Japanese +ojs I L Severn Ojibwa +ojv I L Ontong Java +ojw I L Western Ojibwa +oka I L Okanagan +okb I L Okobo +okd I L Okodia +oke I L Okpe (Southwestern Edo) +okg I E Koko Babangk +okh I L Koresh-e Rostam +oki I L Okiek +okj I E Oko-Juwoi +okk I L Kwamtim One +okl I E Old Kentish Sign Language +okm I H Middle Korean (10th-16th cent.) +okn I L Oki-No-Erabu +oko I H Old Korean (3rd-9th cent.) +okr I L Kirike +oks I L Oko-Eni-Osayen +oku I L Oku +okv I L Orokaiva +okx I L Okpe (Northwestern Edo) +ola I L Walungge +old I L Mochi +ole I L Olekha +olk I E Olkol +olm I L Oloma +olo I L Livvi +olr I L Olrat +oma I L Omaha-Ponca +omb I L East Ambae +omc I E Mochica +ome I E Omejes +omg I L Omagua +omi I L Omi +omk I E Omok +oml I L Ombo +omn I A Minoan +omo I L Utarmbung +omp I H Old Manipuri +omr I H Old Marathi +omt I L Omotik +omu I E Omurano +omw I L South Tairora +omx I H Old Mon +ona I L Ona +onb I L Lingao +one I L Oneida +ong I L Olo +oni I L Onin +onj I L Onjob +onk I L Kabore One +onn I L Onobasulu +ono I L Onondaga +onp I L Sartang +onr I L Northern One +ons I L Ono +ont I L Ontenu +onu I L Unua +onw I H Old Nubian +onx I L Onin Based Pidgin +ood I L Tohono O'odham +oog I L Ong +oon I L Önge +oor I L Oorlams +oos I A Old Ossetic +opa I L Okpamheri +opk I L Kopkaka +opm I L Oksapmin +opo I L Opao +opt I E Opata +opy I L Ofayé +ora I L Oroha +orc I L Orma +ore I L Orejón +org I L Oring +orh I L Oroqen +ori ori ori or M L Oriya (macrolanguage) +orm orm orm om M L Oromo +orn I L Orang Kanaq +oro I L Orokolo +orr I L Oruma +ors I L Orang Seletar +ort I L Adivasi Oriya +oru I L Ormuri +orv I H Old Russian +orw I L Oro Win +orx I L Oro +ory I L Oriya (individual language) +orz I L Ormu +osa osa osa I L Osage +osc I A Oscan +osi I L Osing +oso I L Ososo +osp I H Old Spanish +oss oss oss os I L Ossetian +ost I L Osatu +osu I L Southern One +osx I H Old Saxon +ota ota ota I H Ottoman Turkish (1500-1928) +otb I H Old Tibetan +otd I L Ot Danum +ote I L Mezquital Otomi +oti I E Oti +otk I H Old Turkish +otl I L Tilapa Otomi +otm I L Eastern Highland Otomi +otn I L Tenango Otomi +otq I L Querétaro Otomi +otr I L Otoro +ots I L Estado de México Otomi +ott I L Temoaya Otomi +otu I E Otuke +otw I L Ottawa +otx I L Texcatepec Otomi +oty I A Old Tamil +otz I L Ixtenco Otomi +oua I L Tagargrent +oub I L Glio-Oubi +oue I L Oune +oui I H Old Uighur +oum I E Ouma +oun I L !O!ung +owi I L Owiniga +owl I H Old Welsh +oyb I L Oy +oyd I L Oyda +oym I L Wayampi +oyy I L Oya'oya +ozm I L Koonzime +pab I L Parecís +pac I L Pacoh +pad I L Paumarí +pae I L Pagibete +paf I E Paranawát +pag pag pag I L Pangasinan +pah I L Tenharim +pai I L Pe +pak I L Parakanã +pal pal pal I A Pahlavi +pam pam pam I L Pampanga +pan pan pan pa I L Panjabi +pao I L Northern Paiute +pap pap pap I L Papiamento +paq I L Parya +par I L Panamint +pas I L Papasena +pat I L Papitalai +pau pau pau I L Palauan +pav I L Pakaásnovos +paw I L Pawnee +pax I E Pankararé +pay I L Pech +paz I E Pankararú +pbb I L Páez +pbc I L Patamona +pbe I L Mezontla Popoloca +pbf I L Coyotepec Popoloca +pbg I E Paraujano +pbh I L E'ñapa Woromaipu +pbi I L Parkwa +pbl I L Mak (Nigeria) +pbn I L Kpasam +pbo I L Papel +pbp I L Badyara +pbr I L Pangwa +pbs I L Central Pame +pbt I L Southern Pashto +pbu I L Northern Pashto +pbv I L Pnar +pby I L Pyu +pca I L Santa Inés Ahuatempan Popoloca +pcb I L Pear +pcc I L Bouyei +pcd I L Picard +pce I L Ruching Palaung +pcf I L Paliyan +pcg I L Paniya +pch I L Pardhan +pci I L Duruwa +pcj I L Parenga +pck I L Paite Chin +pcl I L Pardhi +pcm I L Nigerian Pidgin +pcn I L Piti +pcp I L Pacahuara +pcw I L Pyapun +pda I L Anam +pdc I L Pennsylvania German +pdi I L Pa Di +pdn I L Podena +pdo I L Padoe +pdt I L Plautdietsch +pdu I L Kayan +pea I L Peranakan Indonesian +peb I E Eastern Pomo +ped I L Mala (Papua New Guinea) +pee I L Taje +pef I E Northeastern Pomo +peg I L Pengo +peh I L Bonan +pei I L Chichimeca-Jonaz +pej I E Northern Pomo +pek I L Penchal +pel I L Pekal +pem I L Phende +peo peo peo I H Old Persian (ca. 600-400 B.C.) +pep I L Kunja +peq I L Southern Pomo +pes I L Iranian Persian +pev I L Pémono +pex I L Petats +pey I L Petjo +pez I L Eastern Penan +pfa I L Pááfang +pfe I L Peere +pfl I L Pfaelzisch +pga I L Sudanese Creole Arabic +pgg I L Pangwali +pgi I L Pagi +pgk I L Rerep +pgl I A Primitive Irish +pgn I A Paelignian +pgs I L Pangseng +pgu I L Pagu +pha I L Pa-Hng +phd I L Phudagi +phg I L Phuong +phh I L Phukha +phk I L Phake +phl I L Phalura +phm I L Phimbi +phn phn phn I A Phoenician +pho I L Phunoi +phq I L Phana' +phr I L Pahari-Potwari +pht I L Phu Thai +phu I L Phuan +phv I L Pahlavani +phw I L Phangduwali +pia I L Pima Bajo +pib I L Yine +pic I L Pinji +pid I L Piaroa +pie I E Piro +pif I L Pingelapese +pig I L Pisabo +pih I L Pitcairn-Norfolk +pii I L Pini +pij I E Pijao +pil I L Yom +pim I E Powhatan +pin I L Piame +pio I L Piapoco +pip I L Pero +pir I L Piratapuyo +pis I L Pijin +pit I E Pitta Pitta +piu I L Pintupi-Luritja +piv I L Pileni +piw I L Pimbwe +pix I L Piu +piy I L Piya-Kwonci +piz I L Pije +pjt I L Pitjantjatjara +pka I H Ardhamāgadhī Prākrit +pkb I L Pokomo +pkc I E Paekche +pkg I L Pak-Tong +pkh I L Pankhu +pkn I L Pakanha +pko I L Pökoot +pkp I L Pukapuka +pkr I L Attapady Kurumba +pks I L Pakistan Sign Language +pkt I L Maleng +pku I L Paku +pla I L Miani +plb I L Polonombauk +plc I L Central Palawano +pld I L Polari +ple I L Palu'e +plg I L Pilagá +plh I L Paulohi +pli pli pli pi I A Pali +plj I L Polci +plk I L Kohistani Shina +pll I L Shwe Palaung +pln I L Palenquero +plo I L Oluta Popoluca +plp I L Palpa +plq I A Palaic +plr I L Palaka Senoufo +pls I L San Marcos Tlalcoyalco Popoloca +plt I L Plateau Malagasy +plu I L Palikúr +plv I L Southwest Palawano +plw I L Brooke's Point Palawano +ply I L Bolyu +plz I L Paluan +pma I L Paama +pmb I L Pambia +pmc I E Palumata +pmd I E Pallanganmiddang +pme I L Pwaamei +pmf I L Pamona +pmh I H Māhārāṣṭri Prākrit +pmi I L Northern Pumi +pmj I L Southern Pumi +pmk I E Pamlico +pml I E Lingua Franca +pmm I L Pomo +pmn I L Pam +pmo I L Pom +pmq I L Northern Pame +pmr I L Paynamar +pms I L Piemontese +pmt I L Tuamotuan +pmu I L Mirpur Panjabi +pmw I L Plains Miwok +pmx I L Poumei Naga +pmy I L Papuan Malay +pmz I E Southern Pame +pna I L Punan Bah-Biau +pnb I L Western Panjabi +pnc I L Pannei +pne I L Western Penan +png I L Pongu +pnh I L Penrhyn +pni I L Aoheng +pnj I E Pinjarup +pnk I L Paunaka +pnl I L Paleni +pnm I L Punan Batu 1 +pnn I L Pinai-Hagahai +pno I E Panobo +pnp I L Pancana +pnq I L Pana (Burkina Faso) +pnr I L Panim +pns I L Ponosakan +pnt I L Pontic +pnu I L Jiongnai Bunu +pnv I L Pinigura +pnw I L Panytyima +pnx I L Phong-Kniang +pny I L Pinyin +pnz I L Pana (Central African Republic) +poc I L Poqomam +pod I E Ponares +poe I L San Juan Atzingo Popoloca +pof I L Poke +pog I E Potiguára +poh I L Poqomchi' +poi I L Highland Popoluca +pok I L Pokangá +pol pol pol pl I L Polish +pom I L Southeastern Pomo +pon pon pon I L Pohnpeian +poo I L Central Pomo +pop I L Pwapwâ +poq I L Texistepec Popoluca +por por por pt I L Portuguese +pos I L Sayula Popoluca +pot I L Potawatomi +pov I L Upper Guinea Crioulo +pow I L San Felipe Otlaltepec Popoloca +pox I E Polabian +poy I L Pogolo +ppa I L Pao +ppe I L Papi +ppi I L Paipai +ppk I L Uma +ppl I L Pipil +ppm I L Papuma +ppn I L Papapana +ppo I L Folopa +ppp I L Pelende +ppq I L Pei +pps I L San Luís Temalacayuca Popoloca +ppt I L Pare +ppu I E Papora +pqa I L Pa'a +pqm I L Malecite-Passamaquoddy +prb I L Lua' +prc I L Parachi +prd I L Parsi-Dari +pre I L Principense +prf I L Paranan +prg I L Prussian +prh I L Porohanon +pri I L Paicî +prk I L Parauk +prl I L Peruvian Sign Language +prm I L Kibiri +prn I L Prasuni +pro pro pro I H Old Provençal (to 1500) +prp I L Parsi +prq I L Ashéninka Perené +prr I E Puri +prs I L Dari +prt I L Phai +pru I L Puragi +prw I L Parawen +prx I L Purik +pry I L Pray 3 +prz I L Providencia Sign Language +psa I L Asue Awyu +psc I L Persian Sign Language +psd I L Plains Indian Sign Language +pse I L Central Malay +psg I L Penang Sign Language +psh I L Southwest Pashayi +psi I L Southeast Pashayi +psl I L Puerto Rican Sign Language +psm I E Pauserna +psn I L Panasuan +pso I L Polish Sign Language +psp I L Philippine Sign Language +psq I L Pasi +psr I L Portuguese Sign Language +pss I L Kaulong +pst I L Central Pashto +psu I H Sauraseni Prākrit +psw I L Port Sandwich +psy I E Piscataway +pta I L Pai Tavytera +pth I E Pataxó Hã-Ha-Hãe +pti I L Pintiini +ptn I L Patani +pto I L Zo'é +ptp I L Patep +ptr I L Piamatsina +ptt I L Enrekang +ptu I L Bambam +ptv I L Port Vato +ptw I E Pentlatch +pty I L Pathiya +pua I L Western Highland Purepecha +pub I L Purum +puc I L Punan Merap +pud I L Punan Aput +pue I L Puelche +puf I L Punan Merah +pug I L Phuie +pui I L Puinave +puj I L Punan Tubu +puk I L Pu Ko +pum I L Puma +puo I L Puoc +pup I L Pulabu +puq I E Puquina +pur I L Puruborá +pus pus pus ps M L Pushto +put I L Putoh +puu I L Punu +puw I L Puluwatese +pux I L Puare +puy I E Purisimeño +puz I L Purum Naga +pwa I L Pawaia +pwb I L Panawa +pwg I L Gapapaiwa +pwi I E Patwin +pwm I L Molbog +pwn I L Paiwan +pwo I L Pwo Western Karen +pwr I L Powari +pww I L Pwo Northern Karen +pxm I L Quetzaltepec Mixe +pye I L Pye Krumen +pym I L Fyam +pyn I L Poyanáwa +pys I L Paraguayan Sign Language +pyu I L Puyuma +pyx I A Pyu (Myanmar) +pyy I L Pyen +pzn I L Para Naga +qua I L Quapaw +qub I L Huallaga Huánuco Quechua +quc I L K'iche' +qud I L Calderón Highland Quichua +que que que qu M L Quechua +quf I L Lambayeque Quechua +qug I L Chimborazo Highland Quichua +quh I L South Bolivian Quechua +qui I L Quileute +quk I L Chachapoyas Quechua +qul I L North Bolivian Quechua +qum I L Sipacapense +qun I E Quinault +qup I L Southern Pastaza Quechua +quq I L Quinqui +qur I L Yanahuanca Pasco Quechua +qus I L Santiago del Estero Quichua +quv I L Sacapulteco +quw I L Tena Lowland Quichua +qux I L Yauyos Quechua +quy I L Ayacucho Quechua +quz I L Cusco Quechua +qva I L Ambo-Pasco Quechua +qvc I L Cajamarca Quechua +qve I L Eastern Apurímac Quechua +qvh I L Huamalíes-Dos de Mayo Huánuco Quechua +qvi I L Imbabura Highland Quichua +qvj I L Loja Highland Quichua +qvl I L Cajatambo North Lima Quechua +qvm I L Margos-Yarowilca-Lauricocha Quechua +qvn I L North Junín Quechua +qvo I L Napo Lowland Quechua +qvp I L Pacaraos Quechua +qvs I L San Martín Quechua +qvw I L Huaylla Wanca Quechua +qvy I L Queyu +qvz I L Northern Pastaza Quichua +qwa I L Corongo Ancash Quechua +qwc I H Classical Quechua +qwh I L Huaylas Ancash Quechua +qwm I E Kuman (Russia) +qws I L Sihuas Ancash Quechua +qwt I E Kwalhioqua-Tlatskanai +qxa I L Chiquián Ancash Quechua +qxc I L Chincha Quechua +qxh I L Panao Huánuco Quechua +qxl I L Salasaca Highland Quichua +qxn I L Northern Conchucos Ancash Quechua +qxo I L Southern Conchucos Ancash Quechua +qxp I L Puno Quechua +qxq I L Qashqa'i +qxr I L Cañar Highland Quichua +qxs I L Southern Qiang +qxt I L Santa Ana de Tusi Pasco Quechua +qxu I L Arequipa-La Unión Quechua +qxw I L Jauja Wanca Quechua +qya I C Quenya +qyp I E Quiripi +raa I L Dungmali +rab I L Camling +rac I L Rasawa +rad I L Rade +raf I L Western Meohang +rag I L Logooli +rah I L Rabha +rai I L Ramoaaina +raj raj raj M L Rajasthani +rak I L Tulu-Bohuai +ral I L Ralte +ram I L Canela +ran I L Riantana +rao I L Rao +rap rap rap I L Rapanui +raq I L Saam +rar rar rar I L Rarotongan +ras I L Tegali +rat I L Razajerdi +rau I L Raute +rav I L Sampang +raw I L Rawang +rax I L Rang +ray I L Rapa +raz I L Rahambuu +rbb I L Rumai Palaung +rbk I L Northern Bontok +rbl I L Miraya Bikol +rbp I E Barababaraba +rcf I L Réunion Creole French +rdb I L Rudbari +rea I L Rerau +reb I L Rembong +ree I L Rejang Kayan +reg I L Kara (Tanzania) +rei I L Reli +rej I L Rejang +rel I L Rendille +rem I E Remo +ren I L Rengao +rer I E Rer Bare +res I L Reshe +ret I L Retta +rey I L Reyesano +rga I L Roria +rge I L Romano-Greek +rgk I E Rangkas +rgn I L Romagnol +rgr I L Resígaro +rgs I L Southern Roglai +rgu I L Ringgou +rhg I L Rohingya +rhp I L Yahang +ria I L Riang (India) +rie I L Rien +rif I L Tarifit +ril I L Riang (Myanmar) +rim I L Nyaturu +rin I L Nungu +rir I L Ribun +rit I L Ritarungo +riu I L Riung +rjg I L Rajong +rji I L Raji +rjs I L Rajbanshi +rka I L Kraol +rkb I L Rikbaktsa +rkh I L Rakahanga-Manihiki +rki I L Rakhine +rkm I L Marka +rkt I L Rangpuri +rkw I E Arakwal +rma I L Rama +rmb I L Rembarunga +rmc I L Carpathian Romani +rmd I E Traveller Danish +rme I L Angloromani +rmf I L Kalo Finnish Romani +rmg I L Traveller Norwegian +rmh I L Murkim +rmi I L Lomavren +rmk I L Romkun +rml I L Baltic Romani +rmm I L Roma +rmn I L Balkan Romani +rmo I L Sinte Romani +rmp I L Rempi +rmq I L Caló +rms I L Romanian Sign Language +rmt I L Domari +rmu I L Tavringer Romani +rmv I C Romanova +rmw I L Welsh Romani +rmx I L Romam +rmy I L Vlax Romani +rmz I L Marma +rna I E Runa +rnd I L Ruund +rng I L Ronga +rnl I L Ranglong +rnn I L Roon +rnp I L Rongpo +rnr I E Nari Nari +rnw I L Rungwa +rob I L Tae' +roc I L Cacgia Roglai +rod I L Rogo +roe I L Ronji +rof I L Rombo +rog I L Northern Roglai +roh roh roh rm I L Romansh +rol I L Romblomanon +rom rom rom M L Romany +ron rum ron ro I L Romanian +roo I L Rotokas +rop I L Kriol +ror I L Rongga +rou I L Runga +row I L Dela-Oenale +rpn I L Repanbitip +rpt I L Rapting +rri I L Ririo +rro I L Waima +rrt I E Arritinngithigh +rsb I L Romano-Serbian +rsi I L Rennellese Sign Language +rsl I L Russian Sign Language +rtc I L Rungtu Chin +rth I L Ratahan +rtm I L Rotuman +rtw I L Rathawi +rub I L Gungu +ruc I L Ruuli +rue I L Rusyn +ruf I L Luguru +rug I L Roviana +ruh I L Ruga +rui I L Rufiji +ruk I L Che +run run run rn I L Rundi +ruo I L Istro Romanian +rup rup rup I L Macedo-Romanian +ruq I L Megleno Romanian +rus rus rus ru I L Russian +rut I L Rutul +ruu I L Lanas Lobu +ruy I L Mala (Nigeria) +ruz I L Ruma +rwa I L Rawo +rwk I L Rwa +rwm I L Amba (Uganda) +rwo I L Rawa +rwr I L Marwari (India) +rxd I L Ngardi +rxw I E Karuwali +ryn I L Northern Amami-Oshima +rys I L Yaeyama +ryu I L Central Okinawan +saa I L Saba +sab I L Buglere +sac I L Meskwaki +sad sad sad I L Sandawe +sae I L Sabanê +saf I L Safaliba +sag sag sag sg I L Sango +sah sah sah I L Yakut +saj I L Sahu +sak I L Sake +sam sam sam I E Samaritan Aramaic +san san san sa I A Sanskrit +sao I L Sause +sap I L Sanapaná +saq I L Samburu +sar I E Saraveca +sas sas sas I L Sasak +sat sat sat I L Santali +sau I L Saleman +sav I L Saafi-Saafi +saw I L Sawi +sax I L Sa +say I L Saya +saz I L Saurashtra +sba I L Ngambay +sbb I L Simbo +sbc I L Kele (Papua New Guinea) +sbd I L Southern Samo +sbe I L Saliba +sbf I L Shabo +sbg I L Seget +sbh I L Sori-Harengan +sbi I L Seti +sbj I L Surbakhal +sbk I L Safwa +sbl I L Botolan Sambal +sbm I L Sagala +sbn I L Sindhi Bhil +sbo I L Sabüm +sbp I L Sangu (Tanzania) +sbq I L Sileibi +sbr I L Sembakung Murut +sbs I L Subiya +sbt I L Kimki +sbu I L Stod Bhoti +sbv I A Sabine +sbw I L Simba +sbx I L Seberuang +sby I L Soli +sbz I L Sara Kaba +scb I L Chut +sce I L Dongxiang +scf I L San Miguel Creole French +scg I L Sanggau +sch I L Sakachep +sci I L Sri Lankan Creole Malay +sck I L Sadri +scl I L Shina +scn scn scn I L Sicilian +sco sco sco I L Scots +scp I L Helambu Sherpa +scq I L Sa'och +scs I L North Slavey +scu I L Shumcho +scv I L Sheni +scw I L Sha +scx I A Sicel +sda I L Toraja-Sa'dan +sdb I L Shabak +sdc I L Sassarese Sardinian +sde I L Surubu +sdf I L Sarli +sdg I L Savi +sdh I L Southern Kurdish +sdj I L Suundi +sdk I L Sos Kundi +sdl I L Saudi Arabian Sign Language +sdm I L Semandang +sdn I L Gallurese Sardinian +sdo I L Bukar-Sadung Bidayuh +sdp I L Sherdukpen +sdr I L Oraon Sadri +sds I E Sened +sdt I E Shuadit +sdu I L Sarudu +sdx I L Sibu Melanau +sdz I L Sallands +sea I L Semai +seb I L Shempire Senoufo +sec I L Sechelt +sed I L Sedang +see I L Seneca +sef I L Cebaara Senoufo +seg I L Segeju +seh I L Sena +sei I L Seri +sej I L Sene +sek I L Sekani +sel sel sel I L Selkup +sen I L Nanerigé Sénoufo +seo I L Suarmin +sep I L Sìcìté Sénoufo +seq I L Senara Sénoufo +ser I L Serrano +ses I L Koyraboro Senni Songhai +set I L Sentani +seu I L Serui-Laut +sev I L Nyarafolo Senoufo +sew I L Sewa Bay +sey I L Secoya +sez I L Senthang Chin +sfb I L Langue des signes de Belgique Francophone +sfe I L Eastern Subanen +sfm I L Small Flowery Miao +sfs I L South African Sign Language +sfw I L Sehwi +sga sga sga I H Old Irish (to 900) +sgb I L Mag-antsi Ayta +sgc I L Kipsigis +sgd I L Surigaonon +sge I L Segai +sgg I L Swiss-German Sign Language +sgh I L Shughni +sgi I L Suga +sgj I L Surgujia +sgk I L Sangkong +sgm I E Singa +sgo I L Songa +sgp I L Singpho +sgr I L Sangisari +sgs I L Samogitian +sgt I L Brokpake +sgu I L Salas +sgw I L Sebat Bet Gurage +sgx I L Sierra Leone Sign Language +sgy I L Sanglechi +sgz I L Sursurunga +sha I L Shall-Zwall +shb I L Ninam +shc I L Sonde +shd I L Kundal Shahi +she I L Sheko +shg I L Shua +shh I L Shoshoni +shi I L Tachelhit +shj I L Shatt +shk I L Shilluk +shl I L Shendu +shm I L Shahrudi +shn shn shn I L Shan +sho I L Shanga +shp I L Shipibo-Conibo +shq I L Sala +shr I L Shi +shs I L Shuswap +sht I E Shasta +shu I L Chadian Arabic +shv I L Shehri +shw I L Shwai +shx I L She +shy I L Tachawit +shz I L Syenara Senoufo +sia I E Akkala Sami +sib I L Sebop +sid sid sid I L Sidamo +sie I L Simaa +sif I L Siamou +sig I L Paasaal +sih I L Zire +sii I L Shom Peng +sij I L Numbami +sik I L Sikiana +sil I L Tumulung Sisaala +sim I L Mende (Papua New Guinea) +sin sin sin si I L Sinhala +sip I L Sikkimese +siq I L Sonia +sir I L Siri +sis I E Siuslaw +siu I L Sinagen +siv I L Sumariup +siw I L Siwai +six I L Sumau +siy I L Sivandi +siz I L Siwi +sja I L Epena +sjb I L Sajau Basap +sjd I L Kildin Sami +sje I L Pite Sami +sjg I L Assangori +sjk I E Kemi Sami +sjl I L Sajalong +sjm I L Mapun +sjn I C Sindarin +sjo I L Xibe +sjp I L Surjapuri +sjr I L Siar-Lak +sjs I E Senhaja De Srair +sjt I L Ter Sami +sju I L Ume Sami +sjw I L Shawnee +ska I L Skagit +skb I L Saek +skc I L Ma Manda +skd I L Southern Sierra Miwok +ske I L Seke (Vanuatu) +skf I L Sakirabiá +skg I L Sakalava Malagasy +skh I L Sikule +ski I L Sika +skj I L Seke (Nepal) +skk I L Sok +skm I L Kutong +skn I L Kolibugan Subanon +sko I L Seko Tengah +skp I L Sekapan +skq I L Sininkere +skr I L Seraiki +sks I L Maia +skt I L Sakata +sku I L Sakao +skv I L Skou +skw I E Skepi Creole Dutch +skx I L Seko Padang +sky I L Sikaiana +skz I L Sekar +slc I L Sáliba +sld I L Sissala +sle I L Sholaga +slf I L Swiss-Italian Sign Language +slg I L Selungai Murut +slh I L Southern Puget Sound Salish +sli I L Lower Silesian +slj I L Salumá +slk slo slk sk I L Slovak +sll I L Salt-Yui +slm I L Pangutaran Sama +sln I E Salinan +slp I L Lamaholot +slq I L Salchuq +slr I L Salar +sls I L Singapore Sign Language +slt I L Sila +slu I L Selaru +slv slv slv sl I L Slovenian +slw I L Sialum +slx I L Salampasu +sly I L Selayar +slz I L Ma'ya +sma sma sma I L Southern Sami +smb I L Simbari +smc I E Som +smd I L Sama +sme sme sme se I L Northern Sami +smf I L Auwe +smg I L Simbali +smh I L Samei +smj smj smj I L Lule Sami +smk I L Bolinao +sml I L Central Sama +smm I L Musasa +smn smn smn I L Inari Sami +smo smo smo sm I L Samoan +smp I E Samaritan +smq I L Samo +smr I L Simeulue +sms sms sms I L Skolt Sami +smt I L Simte +smu I E Somray +smv I L Samvedi +smw I L Sumbawa +smx I L Samba +smy I L Semnani +smz I L Simeku +sna sna sna sn I L Shona +snb I L Sebuyau +snc I L Sinaugoro +snd snd snd sd I L Sindhi +sne I L Bau Bidayuh +snf I L Noon +sng I L Sanga (Democratic Republic of Congo) +snh I E Shinabo +sni I E Sensi +snj I L Riverain Sango +snk snk snk I L Soninke +snl I L Sangil +snm I L Southern Ma'di +snn I L Siona +sno I L Snohomish +snp I L Siane +snq I L Sangu (Gabon) +snr I L Sihan +sns I L South West Bay +snu I L Senggi +snv I L Sa'ban +snw I L Selee +snx I L Sam +sny I L Saniyo-Hiyewe +snz I L Sinsauru +soa I L Thai Song +sob I L Sobei +soc I L So (Democratic Republic of Congo) +sod I L Songoora +soe I L Songomeno +sog sog sog I A Sogdian +soh I L Aka +soi I L Sonha +soj I L Soi +sok I L Sokoro +sol I L Solos +som som som so I L Somali +soo I L Songo +sop I L Songe +soq I L Kanasi +sor I L Somrai +sos I L Seeku +sot sot sot st I L Southern Sotho +sou I L Southern Thai +sov I L Sonsorol +sow I L Sowanda +sox I L Swo +soy I L Miyobe +soz I L Temi +spa spa spa es I L Spanish +spb I L Sepa (Indonesia) +spc I L Sapé +spd I L Saep +spe I L Sepa (Papua New Guinea) +spg I L Sian +spi I L Saponi +spk I L Sengo +spl I L Selepet +spm I L Akukem +spo I L Spokane +spp I L Supyire Senoufo +spq I L Loreto-Ucayali Spanish +spr I L Saparua +sps I L Saposa +spt I L Spiti Bhoti +spu I L Sapuan +spv I L Sambalpuri +spx I A South Picene +spy I L Sabaot +sqa I L Shama-Sambuga +sqh I L Shau +sqi alb sqi sq M L Albanian +sqk I L Albanian Sign Language +sqm I L Suma +sqn I E Susquehannock +sqo I L Sorkhei +sqq I L Sou +sqr I H Siculo Arabic +sqs I L Sri Lankan Sign Language +sqt I L Soqotri +squ I L Squamish +sra I L Saruga +srb I L Sora +src I L Logudorese Sardinian +srd srd srd sc M L Sardinian +sre I L Sara +srf I L Nafi +srg I L Sulod +srh I L Sarikoli +sri I L Siriano +srk I L Serudung Murut +srl I L Isirawa +srm I L Saramaccan +srn srn srn I L Sranan Tongo +sro I L Campidanese Sardinian +srp srp srp sr I L Serbian +srq I L Sirionó +srr srr srr I L Serer +srs I L Sarsi +srt I L Sauri +sru I L Suruí +srv I L Southern Sorsoganon +srw I L Serua +srx I L Sirmauri +sry I L Sera +srz I L Shahmirzadi +ssb I L Southern Sama +ssc I L Suba-Simbiti +ssd I L Siroi +sse I L Balangingi +ssf I L Thao +ssg I L Seimat +ssh I L Shihhi Arabic +ssi I L Sansi +ssj I L Sausi +ssk I L Sunam +ssl I L Western Sisaala +ssm I L Semnam +ssn I L Waata +sso I L Sissano +ssp I L Spanish Sign Language +ssq I L So'a +ssr I L Swiss-French Sign Language +sss I L Sô +sst I L Sinasina +ssu I L Susuami +ssv I L Shark Bay +ssw ssw ssw ss I L Swati +ssx I L Samberigi +ssy I L Saho +ssz I L Sengseng +sta I L Settla +stb I L Northern Subanen +std I L Sentinel +ste I L Liana-Seti +stf I L Seta +stg I L Trieng +sth I L Shelta +sti I L Bulo Stieng +stj I L Matya Samo +stk I L Arammba +stl I L Stellingwerfs +stm I L Setaman +stn I L Owa +sto I L Stoney +stp I L Southeastern Tepehuan +stq I L Saterfriesisch +str I L Straits Salish +sts I L Shumashti +stt I L Budeh Stieng +stu I L Samtao +stv I L Silt'e +stw I L Satawalese +sty I L Siberian Tatar +sua I L Sulka +sub I L Suku +suc I L Western Subanon +sue I L Suena +sug I L Suganga +sui I L Suki +suj I L Shubi +suk suk suk I L Sukuma +sun sun sun su I L Sundanese +suq I L Suri +sur I L Mwaghavul +sus sus sus I L Susu +sut I E Subtiaba +suv I L Puroik +suw I L Sumbwa +sux sux sux I A Sumerian +suy I L Suyá +suz I L Sunwar +sva I L Svan +svb I L Ulau-Suain +svc I L Vincentian Creole English +sve I L Serili +svk I L Slovakian Sign Language +svm I L Slavomolisano +svr I L Savara +svs I L Savosavo +svx I E Skalvian +swa swa swa sw M L Swahili (macrolanguage) +swb I L Maore Comorian +swc I L Congo Swahili +swe swe swe sv I L Swedish +swf I L Sere +swg I L Swabian +swh I L Swahili (individual language) +swi I L Sui +swj I L Sira +swk I L Malawi Sena +swl I L Swedish Sign Language +swm I L Samosa +swn I L Sawknah +swo I L Shanenawa +swp I L Suau +swq I L Sharwa +swr I L Saweru +sws I L Seluwasan +swt I L Sawila +swu I L Suwawa +swv I L Shekhawati +sww I E Sowa +swx I L Suruahá +swy I L Sarua +sxb I L Suba +sxc I A Sicanian +sxe I L Sighu +sxg I L Shixing +sxk I E Southern Kalapuya +sxl I E Selian +sxm I L Samre +sxn I L Sangir +sxo I A Sorothaptic +sxr I L Saaroa +sxs I L Sasaru +sxu I L Upper Saxon +sxw I L Saxwe Gbe +sya I L Siang +syb I L Central Subanen +syc syc syc I H Classical Syriac +syi I L Seki +syk I L Sukur +syl I L Sylheti +sym I L Maya Samo +syn I L Senaya +syo I L Suoy +syr syr syr M L Syriac +sys I L Sinyar +syw I L Kagate +syy I L Al-Sayyid Bedouin Sign Language +sza I L Semelai +szb I L Ngalum +szc I L Semaq Beri +szd I E Seru +sze I L Seze +szg I L Sengele +szl I L Silesian +szn I L Sula +szp I L Suabo +szv I L Isu (Fako Division) +szw I L Sawai +taa I L Lower Tanana +tab I L Tabassaran +tac I L Lowland Tarahumara +tad I L Tause +tae I L Tariana +taf I L Tapirapé +tag I L Tagoi +tah tah tah ty I L Tahitian +taj I L Eastern Tamang +tak I L Tala +tal I L Tal +tam tam tam ta I L Tamil +tan I L Tangale +tao I L Yami +tap I L Taabwa +taq I L Tamasheq +tar I L Central Tarahumara +tas I E Tay Boi +tat tat tat tt I L Tatar +tau I L Upper Tanana +tav I L Tatuyo +taw I L Tai +tax I L Tamki +tay I L Atayal +taz I L Tocho +tba I L Aikanã +tbb I E Tapeba +tbc I L Takia +tbd I L Kaki Ae +tbe I L Tanimbili +tbf I L Mandara +tbg I L North Tairora +tbh I E Thurawal +tbi I L Gaam +tbj I L Tiang +tbk I L Calamian Tagbanwa +tbl I L Tboli +tbm I L Tagbu +tbn I L Barro Negro Tunebo +tbo I L Tawala +tbp I L Taworta +tbr I L Tumtum +tbs I L Tanguat +tbt I L Tembo (Kitembo) +tbu I E Tubar +tbv I L Tobo +tbw I L Tagbanwa +tbx I L Kapin +tby I L Tabaru +tbz I L Ditammari +tca I L Ticuna +tcb I L Tanacross +tcc I L Datooga +tcd I L Tafi +tce I L Southern Tutchone +tcf I L Malinaltepec Me'phaa +tcg I L Tamagario +tch I L Turks And Caicos Creole English +tci I L Wára +tck I L Tchitchege +tcl I E Taman (Myanmar) +tcm I L Tanahmerah +tcn I L Tichurong +tco I L Taungyo +tcp I L Tawr Chin +tcq I L Kaiy +tcs I L Torres Strait Creole +tct I L T'en +tcu I L Southeastern Tarahumara +tcw I L Tecpatlán Totonac +tcx I L Toda +tcy I L Tulu +tcz I L Thado Chin +tda I L Tagdal +tdb I L Panchpargania +tdc I L Emberá-Tadó +tdd I L Tai Nüa +tde I L Tiranige Diga Dogon +tdf I L Talieng +tdg I L Western Tamang +tdh I L Thulung +tdi I L Tomadino +tdj I L Tajio +tdk I L Tambas +tdl I L Sur +tdn I L Tondano +tdo I L Teme +tdq I L Tita +tdr I L Todrah +tds I L Doutai +tdt I L Tetun Dili +tdu I L Tempasuk Dusun +tdv I L Toro +tdx I L Tandroy-Mahafaly Malagasy +tdy I L Tadyawan +tea I L Temiar +teb I E Tetete +tec I L Terik +ted I L Tepo Krumen +tee I L Huehuetla Tepehua +tef I L Teressa +teg I L Teke-Tege +teh I L Tehuelche +tei I L Torricelli +tek I L Ibali Teke +tel tel tel te I L Telugu +tem tem tem I L Timne +ten I E Tama (Colombia) +teo I L Teso +tep I E Tepecano +teq I L Temein +ter ter ter I L Tereno +tes I L Tengger +tet tet tet I L Tetum +teu I L Soo +tev I L Teor +tew I L Tewa (USA) +tex I L Tennet +tey I L Tulishi +tfi I L Tofin Gbe +tfn I L Tanaina +tfo I L Tefaro +tfr I L Teribe +tft I L Ternate +tga I L Sagalla +tgb I L Tobilung +tgc I L Tigak +tgd I L Ciwogai +tge I L Eastern Gorkha Tamang +tgf I L Chalikha +tgh I L Tobagonian Creole English +tgi I L Lawunuia +tgj I L Tagin +tgk tgk tgk tg I L Tajik +tgl tgl tgl tl I L Tagalog +tgn I L Tandaganon +tgo I L Sudest +tgp I L Tangoa +tgq I L Tring +tgr I L Tareng +tgs I L Nume +tgt I L Central Tagbanwa +tgu I L Tanggu +tgv I E Tingui-Boto +tgw I L Tagwana Senoufo +tgx I L Tagish +tgy I E Togoyo +tgz I E Tagalaka +tha tha tha th I L Thai +thc I L Tai Hang Tong +thd I L Thayore +the I L Chitwania Tharu +thf I L Thangmi +thh I L Northern Tarahumara +thi I L Tai Long +thk I L Tharaka +thl I L Dangaura Tharu +thm I L Aheu +thn I L Thachanadan +thp I L Thompson +thq I L Kochila Tharu +thr I L Rana Tharu +ths I L Thakali +tht I L Tahltan +thu I L Thuri +thv I L Tahaggart Tamahaq +thw I L Thudam +thx I L The +thy I L Tha +thz I L Tayart Tamajeq +tia I L Tidikelt Tamazight +tic I L Tira +tid I L Tidong +tif I L Tifal +tig tig tig I L Tigre +tih I L Timugon Murut +tii I L Tiene +tij I L Tilung +tik I L Tikar +til I E Tillamook +tim I L Timbe +tin I L Tindi +tio I L Teop +tip I L Trimuris +tiq I L Tiéfo +tir tir tir ti I L Tigrinya +tis I L Masadiit Itneg +tit I L Tinigua +tiu I L Adasen +tiv tiv tiv I L Tiv +tiw I L Tiwi +tix I L Southern Tiwa +tiy I L Tiruray +tiz I L Tai Hongjin +tja I L Tajuasohn +tjg I L Tunjung +tji I L Northern Tujia +tjl I L Tai Laing +tjm I E Timucua +tjn I E Tonjon +tjo I L Temacine Tamazight +tjs I L Southern Tujia +tju I E Tjurruru +tjw I L Djabwurrung +tka I E Truká +tkb I L Buksa +tkd I L Tukudede +tke I L Takwane +tkf I E Tukumanféd +tkg I L Tesaka Malagasy +tkl tkl tkl I L Tokelau +tkm I E Takelma +tkn I L Toku-No-Shima +tkp I L Tikopia +tkq I L Tee +tkr I L Tsakhur +tks I L Takestani +tkt I L Kathoriya Tharu +tku I L Upper Necaxa Totonac +tkw I L Teanu +tkx I L Tangko +tkz I L Takua +tla I L Southwestern Tepehuan +tlb I L Tobelo +tlc I L Yecuatla Totonac +tld I L Talaud +tlf I L Telefol +tlg I L Tofanma +tlh tlh tlh I C Klingon +tli tli tli I L Tlingit +tlj I L Talinga-Bwisi +tlk I L Taloki +tll I L Tetela +tlm I L Tolomako +tln I L Talondo' +tlo I L Talodi +tlp I L Filomena Mata-Coahuitlán Totonac +tlq I L Tai Loi +tlr I L Talise +tls I L Tambotalo +tlt I L Teluti +tlu I L Tulehu +tlv I L Taliabu +tlx I L Khehek +tly I L Talysh +tma I L Tama (Chad) +tmb I L Katbol +tmc I L Tumak +tmd I L Haruai +tme I E Tremembé +tmf I L Toba-Maskoy +tmg I E Ternateño +tmh tmh tmh M L Tamashek +tmi I L Tutuba +tmj I L Samarokena +tmk I L Northwestern Tamang +tml I L Tamnim Citak +tmm I L Tai Thanh +tmn I L Taman (Indonesia) +tmo I L Temoq +tmp I L Tai Mène +tmq I L Tumleo +tmr I E Jewish Babylonian Aramaic (ca. 200-1200 CE) +tms I L Tima +tmt I L Tasmate +tmu I L Iau +tmv I L Tembo (Motembo) +tmw I L Temuan +tmy I L Tami +tmz I E Tamanaku +tna I L Tacana +tnb I L Western Tunebo +tnc I L Tanimuca-Retuarã +tnd I L Angosturas Tunebo +tne I L Tinoc Kallahan +tng I L Tobanga +tnh I L Maiani +tni I L Tandia +tnk I L Kwamera +tnl I L Lenakel +tnm I L Tabla +tnn I L North Tanna +tno I L Toromono +tnp I L Whitesands +tnq I E Taino +tnr I L Ménik +tns I L Tenis +tnt I L Tontemboan +tnu I L Tay Khang +tnv I L Tangchangya +tnw I L Tonsawang +tnx I L Tanema +tny I L Tongwe +tnz I L Tonga (Thailand) +tob I L Toba +toc I L Coyutla Totonac +tod I L Toma +toe I E Tomedes +tof I L Gizrra +tog tog tog I L Tonga (Nyasa) +toh I L Gitonga +toi I L Tonga (Zambia) +toj I L Tojolabal +tol I L Tolowa +tom I L Tombulu +ton ton ton to I L Tonga (Tonga Islands) +too I L Xicotepec De Juárez Totonac +top I L Papantla Totonac +toq I L Toposa +tor I L Togbo-Vara Banda +tos I L Highland Totonac +tou I L Tho +tov I L Upper Taromi +tow I L Jemez +tox I L Tobian +toy I L Topoiyo +toz I L To +tpa I L Taupota +tpc I L Azoyú Me'phaa +tpe I L Tippera +tpf I L Tarpia +tpg I L Kula +tpi tpi tpi I L Tok Pisin +tpj I L Tapieté +tpk I E Tupinikin +tpl I L Tlacoapa Me'phaa +tpm I L Tampulma +tpn I E Tupinambá +tpo I L Tai Pao +tpp I L Pisaflores Tepehua +tpq I L Tukpa +tpr I L Tuparí +tpt I L Tlachichilco Tepehua +tpu I L Tampuan +tpv I L Tanapag +tpw I E Tupí +tpx I L Acatepec Me'phaa +tpy I L Trumai +tpz I L Tinputz +tqb I L Tembé +tql I L Lehali +tqm I L Turumsa +tqn I L Tenino +tqo I L Toaripi +tqp I L Tomoip +tqq I L Tunni +tqr I E Torona +tqt I L Western Totonac +tqu I L Touo +tqw I E Tonkawa +tra I L Tirahi +trb I L Terebu +trc I L Copala Triqui +trd I L Turi +tre I L East Tarangan +trf I L Trinidadian Creole English +trg I L Lishán Didán +trh I L Turaka +tri I L Trió +trj I L Toram +trl I L Traveller Scottish +trm I L Tregami +trn I L Trinitario +tro I L Tarao Naga +trp I L Kok Borok +trq I L San Martín Itunyoso Triqui +trr I L Taushiro +trs I L Chicahuaxtla Triqui +trt I L Tunggare +tru I L Turoyo +trv I L Taroko +trw I L Torwali +trx I L Tringgus-Sembaan Bidayuh +try I E Turung +trz I E Torá +tsa I L Tsaangi +tsb I L Tsamai +tsc I L Tswa +tsd I L Tsakonian +tse I L Tunisian Sign Language +tsf I L Southwestern Tamang +tsg I L Tausug +tsh I L Tsuvan +tsi tsi tsi I L Tsimshian +tsj I L Tshangla +tsk I L Tseku +tsl I L Ts'ün-Lao +tsm I L Turkish Sign Language +tsn tsn tsn tn I L Tswana +tso tso tso ts I L Tsonga +tsp I L Northern Toussian +tsq I L Thai Sign Language +tsr I L Akei +tss I L Taiwan Sign Language +tst I L Tondi Songway Kiini +tsu I L Tsou +tsv I L Tsogo +tsw I L Tsishingini +tsx I L Mubami +tsy I L Tebul Sign Language +tsz I L Purepecha +tta I E Tutelo +ttb I L Gaa +ttc I L Tektiteko +ttd I L Tauade +tte I L Bwanabwana +ttf I L Tuotomb +ttg I L Tutong +tth I L Upper Ta'oih +tti I L Tobati +ttj I L Tooro +ttk I L Totoro +ttl I L Totela +ttm I L Northern Tutchone +ttn I L Towei +tto I L Lower Ta'oih +ttp I L Tombelala +ttq I L Tawallammat Tamajaq +ttr I L Tera +tts I L Northeastern Thai +ttt I L Muslim Tat +ttu I L Torau +ttv I L Titan +ttw I L Long Wat +tty I L Sikaritai +ttz I L Tsum +tua I L Wiarumus +tub I L Tübatulabal +tuc I L Mutu +tud I E Tuxá +tue I L Tuyuca +tuf I L Central Tunebo +tug I L Tunia +tuh I L Taulil +tui I L Tupuri +tuj I L Tugutil +tuk tuk tuk tk I L Turkmen +tul I L Tula +tum tum tum I L Tumbuka +tun I E Tunica +tuo I L Tucano +tuq I L Tedaga +tur tur tur tr I L Turkish +tus I L Tuscarora +tuu I L Tututni +tuv I L Turkana +tux I E Tuxináwa +tuy I L Tugen +tuz I L Turka +tva I L Vaghua +tvd I L Tsuvadi +tve I L Te'un +tvk I L Southeast Ambrym +tvl tvl tvl I L Tuvalu +tvm I L Tela-Masbuar +tvn I L Tavoyan +tvo I L Tidore +tvs I L Taveta +tvt I L Tutsa Naga +tvu I L Tunen +tvw I L Sedoa +tvy I E Timor Pidgin +twa I E Twana +twb I L Western Tawbuid +twc I E Teshenawa +twd I L Twents +twe I L Tewa (Indonesia) +twf I L Northern Tiwa +twg I L Tereweng +twh I L Tai Dón +twi twi twi tw I L Twi +twl I L Tawara +twm I L Tawang Monpa +twn I L Twendi +two I L Tswapong +twp I L Ere +twq I L Tasawaq +twr I L Southwestern Tarahumara +twt I E Turiwára +twu I L Termanu +tww I L Tuwari +twx I L Tewe +twy I L Tawoyan +txa I L Tombonuo +txb I A Tokharian B +txc I E Tsetsaut +txe I L Totoli +txg I A Tangut +txh I A Thracian +txi I L Ikpeng +txm I L Tomini +txn I L West Tarangan +txo I L Toto +txq I L Tii +txr I A Tartessian +txs I L Tonsea +txt I L Citak +txu I L Kayapó +txx I L Tatana +txy I L Tanosy Malagasy +tya I L Tauya +tye I L Kyanga +tyh I L O'du +tyi I L Teke-Tsaayi +tyj I L Tai Do +tyl I L Thu Lao +tyn I L Kombai +typ I E Thaypan +tyr I L Tai Daeng +tys I L Tày Sa Pa +tyt I L Tày Tac +tyu I L Kua +tyv tyv tyv I L Tuvinian +tyx I L Teke-Tyee +tyz I L Tày +tza I L Tanzanian Sign Language +tzh I L Tzeltal +tzj I L Tz'utujil +tzl I C Talossan +tzm I L Central Atlas Tamazight +tzn I L Tugun +tzo I L Tzotzil +tzx I L Tabriak +uam I E Uamué +uan I L Kuan +uar I L Tairuma +uba I L Ubang +ubi I L Ubi +ubl I L Buhi'non Bikol +ubr I L Ubir +ubu I L Umbu-Ungu +uby I E Ubykh +uda I L Uda +ude I L Udihe +udg I L Muduga +udi I L Udi +udj I L Ujir +udl I L Wuzlam +udm udm udm I L Udmurt +udu I L Uduk +ues I L Kioko +ufi I L Ufim +uga uga uga I A Ugaritic +ugb I E Kuku-Ugbanh +uge I L Ughele +ugn I L Ugandan Sign Language +ugo I L Ugong +ugy I L Uruguayan Sign Language +uha I L Uhami +uhn I L Damal +uig uig uig ug I L Uighur +uis I L Uisai +uiv I L Iyive +uji I L Tanjijili +uka I L Kaburi +ukg I L Ukuriguma +ukh I L Ukhwejo +ukl I L Ukrainian Sign Language +ukp I L Ukpe-Bayobiri +ukq I L Ukwa +ukr ukr ukr uk I L Ukrainian +uks I L Urubú-Kaapor Sign Language +uku I L Ukue +ukw I L Ukwuani-Aboh-Ndoni +uky I E Kuuk-Yak +ula I L Fungwa +ulb I L Ulukwumi +ulc I L Ulch +ule I E Lule +ulf I L Usku +uli I L Ulithian +ulk I L Meriam +ull I L Ullatan +ulm I L Ulumanda' +uln I L Unserdeutsch +ulu I L Uma' Lung +ulw I L Ulwa +uma I L Umatilla +umb umb umb I L Umbundu +umc I A Marrucinian +umd I E Umbindhamu +umg I E Umbuygamu +umi I L Ukit +umm I L Umon +umn I L Makyan Naga +umo I E Umotína +ump I L Umpila +umr I E Umbugarla +ums I L Pendau +umu I L Munsee +una I L North Watut +und und und S S Undetermined +une I L Uneme +ung I L Ngarinyin +unk I L Enawené-Nawé +unm I E Unami +unn I L Kurnai +unr I L Mundari +unu I L Unubahe +unx I L Munda +unz I L Unde Kaili +uok I L Uokha +upi I L Umeda +upv I L Uripiv-Wala-Rano-Atchin +ura I L Urarina +urb I L Urubú-Kaapor +urc I E Urningangg +urd urd urd ur I L Urdu +ure I L Uru +urf I E Uradhi +urg I L Urigina +urh I L Urhobo +uri I L Urim +urk I L Urak Lawoi' +url I L Urali +urm I L Urapmin +urn I L Uruangnirin +uro I L Ura (Papua New Guinea) +urp I L Uru-Pa-In +urr I L Lehalurup +urt I L Urat +uru I E Urumi +urv I E Uruava +urw I L Sop +urx I L Urimo +ury I L Orya +urz I L Uru-Eu-Wau-Wau +usa I L Usarufa +ush I L Ushojo +usi I L Usui +usk I L Usaghade +usp I L Uspanteco +usu I L Uya +uta I L Otank +ute I L Ute-Southern Paiute +utp I L Amba (Solomon Islands) +utr I L Etulo +utu I L Utu +uum I L Urum +uun I L Kulon-Pazeh +uur I L Ura (Vanuatu) +uuu I L U +uve I L West Uvean +uvh I L Uri +uvl I L Lote +uwa I L Kuku-Uwanh +uya I L Doko-Uyanga +uzb uzb uzb uz M L Uzbek +uzn I L Northern Uzbek +uzs I L Southern Uzbek +vaa I L Vaagri Booli +vae I L Vale +vaf I L Vafsi +vag I L Vagla +vah I L Varhadi-Nagpuri +vai vai vai I L Vai +vaj I L Vasekela Bushman +val I L Vehes +vam I L Vanimo +van I L Valman +vao I L Vao +vap I L Vaiphei +var I L Huarijio +vas I L Vasavi +vau I L Vanuma +vav I L Varli +vay I L Wayu +vbb I L Southeast Babar +vbk I L Southwestern Bontok +vec I L Venetian +ved I L Veddah +vel I L Veluws +vem I L Vemgo-Mabas +ven ven ven ve I L Venda +veo I E Ventureño +vep I L Veps +ver I L Mom Jango +vgr I L Vaghri +vgt I L Vlaamse Gebarentaal +vic I L Virgin Islands Creole English +vid I L Vidunda +vie vie vie vi I L Vietnamese +vif I L Vili +vig I L Viemo +vil I L Vilela +vin I L Vinza +vis I L Vishavan +vit I L Viti +viv I L Iduna +vka I E Kariyarra +vki I L Ija-Zuba +vkj I L Kujarge +vkk I L Kaur +vkl I L Kulisusu +vkm I E Kamakan +vko I L Kodeoha +vkp I L Korlai Creole Portuguese +vkt I L Tenggarong Kutai Malay +vku I L Kurrama +vlp I L Valpei +vls I L Vlaams +vma I L Martuyhunira +vmb I E Barbaram +vmc I L Juxtlahuaca Mixtec +vmd I L Mudu Koraga +vme I L East Masela +vmf I L Mainfränkisch +vmg I L Lungalunga +vmh I L Maraghei +vmi I E Miwa +vmj I L Ixtayutla Mixtec +vmk I L Makhuwa-Shirima +vml I E Malgana +vmm I L Mitlatongo Mixtec +vmp I L Soyaltepec Mazatec +vmq I L Soyaltepec Mixtec +vmr I L Marenje +vms I E Moksela +vmu I E Muluridyi +vmv I E Valley Maidu +vmw I L Makhuwa +vmx I L Tamazola Mixtec +vmy I L Ayautla Mazatec +vmz I L Mazatlán Mazatec +vnk I L Vano +vnm I L Vinmavis +vnp I L Vunapu +vol vol vol vo I C Volapük +vor I L Voro +vot vot vot I L Votic +vra I L Vera'a +vro I L Võro +vrs I L Varisi +vrt I L Burmbar +vsi I L Moldova Sign Language +vsl I L Venezuelan Sign Language +vsv I L Valencian Sign Language +vto I L Vitou +vum I L Vumbu +vun I L Vunjo +vut I L Vute +vwa I L Awa (China) +waa I L Walla Walla +wab I L Wab +wac I L Wasco-Wishram +wad I L Wandamen +wae I L Walser +waf I E Wakoná +wag I L Wa'ema +wah I L Watubela +wai I L Wares +waj I L Waffa +wal wal wal I L Wolaytta +wam I E Wampanoag +wan I L Wan +wao I E Wappo +wap I L Wapishana +waq I L Wageman +war war war I L Waray (Philippines) +was was was I L Washo +wat I L Kaninuwa +wau I L Waurá +wav I L Waka +waw I L Waiwai +wax I L Watam +way I L Wayana +waz I L Wampur +wba I L Warao +wbb I L Wabo +wbe I L Waritai +wbf I L Wara +wbh I L Wanda +wbi I L Vwanji +wbj I L Alagwa +wbk I L Waigali +wbl I L Wakhi +wbm I L Wa +wbp I L Warlpiri +wbq I L Waddar +wbr I L Wagdi +wbt I L Wanman +wbv I L Wajarri +wbw I L Woi +wca I L Yanomámi +wci I L Waci Gbe +wdd I L Wandji +wdg I L Wadaginam +wdj I L Wadjiginy +wdk I E Wadikali +wdu I E Wadjigu +wdy I E Wadjabangayi +wea I E Wewaw +wec I L Wè Western +wed I L Wedau +weg I L Wergaia +weh I L Weh +wei I L Kiunum +wem I L Weme Gbe +weo I L Wemale +wep I L Westphalien +wer I L Weri +wes I L Cameroon Pidgin +wet I L Perai +weu I L Rawngtu Chin +wew I L Wejewa +wfg I L Yafi +wga I E Wagaya +wgb I L Wagawaga +wgg I E Wangganguru +wgi I L Wahgi +wgo I L Waigeo +wgu I E Wirangu +wgy I L Warrgamay +wha I L Manusela +whg I L North Wahgi +whk I L Wahau Kenyah +whu I L Wahau Kayan +wib I L Southern Toussian +wic I L Wichita +wie I E Wik-Epa +wif I E Wik-Keyangan +wig I L Wik-Ngathana +wih I L Wik-Me'anha +wii I L Minidien +wij I L Wik-Iiyanh +wik I L Wikalkan +wil I E Wilawila +wim I L Wik-Mungkan +win I L Ho-Chunk +wir I E Wiraféd +wiu I L Wiru +wiv I L Vitu +wiy I E Wiyot +wja I L Waja +wji I L Warji +wka I E Kw'adza +wkb I L Kumbaran +wkd I L Wakde +wkl I L Kalanadi +wku I L Kunduvadi +wkw I E Wakawaka +wky I E Wangkayutyuru +wla I L Walio +wlc I L Mwali Comorian +wle I L Wolane +wlg I L Kunbarlang +wli I L Waioli +wlk I E Wailaki +wll I L Wali (Sudan) +wlm I H Middle Welsh +wln wln wln wa I L Walloon +wlo I L Wolio +wlr I L Wailapa +wls I L Wallisian +wlu I E Wuliwuli +wlv I L Wichí Lhamtés Vejoz +wlw I L Walak +wlx I L Wali (Ghana) +wly I E Waling +wma I E Mawa (Nigeria) +wmb I L Wambaya +wmc I L Wamas +wmd I L Mamaindé +wme I L Wambule +wmh I L Waima'a +wmi I E Wamin +wmm I L Maiwa (Indonesia) +wmn I E Waamwang +wmo I L Wom (Papua New Guinea) +wms I L Wambon +wmt I L Walmajarri +wmw I L Mwani +wmx I L Womo +wnb I L Wanambre +wnc I L Wantoat +wnd I E Wandarang +wne I L Waneci +wng I L Wanggom +wni I L Ndzwani Comorian +wnk I L Wanukaka +wnm I E Wanggamala +wnn I E Wunumara +wno I L Wano +wnp I L Wanap +wnu I L Usan +wnw I L Wintu +wny I L Wanyi +woa I L Tyaraity +wob I L Wè Northern +woc I L Wogeo +wod I L Wolani +woe I L Woleaian +wof I L Gambian Wolof +wog I L Wogamusin +woi I L Kamang +wok I L Longto +wol wol wol wo I L Wolof +wom I L Wom (Nigeria) +won I L Wongo +woo I L Manombai +wor I L Woria +wos I L Hanga Hundi +wow I L Wawonii +woy I E Weyto +wpc I L Maco +wra I L Warapu +wrb I E Warluwara +wrd I L Warduji +wrg I E Warungu +wrh I E Wiradhuri +wri I E Wariyangga +wrk I L Garrwa +wrl I L Warlmanpa +wrm I L Warumungu +wrn I L Warnang +wro I E Worrorra +wrp I L Waropen +wrr I L Wardaman +wrs I L Waris +wru I L Waru +wrv I L Waruna +wrw I E Gugu Warra +wrx I L Wae Rana +wry I L Merwari +wrz I E Waray (Australia) +wsa I L Warembori +wsi I L Wusi +wsk I L Waskia +wsr I L Owenia +wss I L Wasa +wsu I E Wasu +wsv I E Wotapuri-Katarqalai +wtf I L Watiwa +wth I E Wathawurrung +wti I L Berta +wtk I L Watakataui +wtm I L Mewati +wtw I L Wotu +wua I L Wikngenchera +wub I L Wunambal +wud I L Wudu +wuh I L Wutunhua +wul I L Silimo +wum I L Wumbvu +wun I L Bungu +wur I E Wurrugu +wut I L Wutung +wuu I L Wu Chinese +wuv I L Wuvulu-Aua +wux I L Wulna +wuy I L Wauyai +wwa I L Waama +wwb I E Wakabunga +wwo I L Wetamut +wwr I E Warrwa +www I L Wawa +wxa I L Waxianghua +wxw I E Wardandi +wya I L Wyandot +wyb I L Wangaaybuwan-Ngiyambaa +wyi I E Woiwurrung +wym I L Wymysorys +wyr I L Wayoró +wyy I L Western Fijian +xaa I H Andalusian Arabic +xab I L Sambe +xac I L Kachari +xad I E Adai +xae I A Aequian +xag I E Aghwan +xai I E Kaimbé +xal xal xal I L Kalmyk +xam I E /Xam +xan I L Xamtanga +xao I L Khao +xap I E Apalachee +xaq I A Aquitanian +xar I E Karami +xas I E Kamas +xat I L Katawixi +xau I L Kauwera +xav I L Xavánte +xaw I L Kawaiisu +xay I L Kayan Mahakam +xba I E Kamba (Brazil) +xbb I E Lower Burdekin +xbc I A Bactrian +xbd I E Bindal +xbe I E Bigambal +xbg I E Bunganditj +xbi I L Kombio +xbj I E Birrpayi +xbm I H Middle Breton +xbn I E Kenaboi +xbo I E Bolgarian +xbp I E Bibbulman +xbr I L Kambera +xbw I E Kambiwá +xbx I E Kabixí +xby I L Batyala +xcb I E Cumbric +xcc I A Camunic +xce I A Celtiberian +xcg I A Cisalpine Gaulish +xch I E Chemakum +xcl I H Classical Armenian +xcm I E Comecrudo +xcn I E Cotoname +xco I A Chorasmian +xcr I A Carian +xct I H Classical Tibetan +xcu I E Curonian +xcv I E Chuvantsy +xcw I E Coahuilteco +xcy I E Cayuse +xda I L Darkinyung +xdc I A Dacian +xdk I E Dharuk +xdm I A Edomite +xdy I L Malayic Dayak +xeb I A Eblan +xed I L Hdi +xeg I E //Xegwi +xel I L Kelo +xem I L Kembayan +xep I A Epi-Olmec +xer I L Xerénte +xes I L Kesawai +xet I L Xetá +xeu I L Keoru-Ahia +xfa I A Faliscan +xga I A Galatian +xgb I E Gbin +xgd I E Gudang +xgf I E Gabrielino-Fernandeño +xgg I E Goreng +xgi I E Garingbal +xgl I E Galindan +xgm I E Guwinmal +xgr I E Garza +xgu I L Unggumi +xgw I E Guwa +xha I A Harami +xhc I E Hunnic +xhd I A Hadrami +xhe I L Khetrani +xho xho xho xh I L Xhosa +xhr I A Hernican +xht I A Hattic +xhu I A Hurrian +xhv I L Khua +xib I A Iberian +xii I L Xiri +xil I A Illyrian +xin I E Xinca +xip I E Xipináwa +xir I E Xiriâna +xiv I A Indus Valley Language +xiy I L Xipaya +xjb I E Minjungbal +xjt I E Jaitmatang +xka I L Kalkoti +xkb I L Northern Nago +xkc I L Kho'ini +xkd I L Mendalam Kayan +xke I L Kereho +xkf I L Khengkha +xkg I L Kagoro +xkh I L Karahawyana +xki I L Kenyan Sign Language +xkj I L Kajali +xkk I L Kaco' +xkl I L Mainstream Kenyah +xkn I L Kayan River Kayan +xko I L Kiorr +xkp I L Kabatei +xkq I L Koroni +xkr I E Xakriabá +xks I L Kumbewaha +xkt I L Kantosi +xku I L Kaamba +xkv I L Kgalagadi +xkw I L Kembra +xkx I L Karore +xky I L Uma' Lasan +xkz I L Kurtokha +xla I L Kamula +xlb I E Loup B +xlc I A Lycian +xld I A Lydian +xle I A Lemnian +xlg I A Ligurian (Ancient) +xli I A Liburnian +xln I A Alanic +xlo I E Loup A +xlp I A Lepontic +xls I A Lusitanian +xlu I A Cuneiform Luwian +xly I A Elymian +xma I L Mushungulu +xmb I L Mbonga +xmc I L Makhuwa-Marrevone +xmd I L Mbudum +xme I A Median +xmf I L Mingrelian +xmg I L Mengaka +xmh I L Kuku-Muminh +xmj I L Majera +xmk I A Ancient Macedonian +xml I L Malaysian Sign Language +xmm I L Manado Malay +xmn I H Manichaean Middle Persian +xmo I L Morerebi +xmp I E Kuku-Mu'inh +xmq I E Kuku-Mangk +xmr I A Meroitic +xms I L Moroccan Sign Language +xmt I L Matbat +xmu I E Kamu +xmv I L Antankarana Malagasy +xmw I L Tsimihety Malagasy +xmx I L Maden +xmy I L Mayaguduna +xmz I L Mori Bawah +xna I A Ancient North Arabian +xnb I L Kanakanabu +xng I H Middle Mongolian +xnh I L Kuanhua +xni I E Ngarigu +xnk I E Nganakarti +xnn I L Northern Kankanay +xno I H Anglo-Norman +xnr I L Kangri +xns I L Kanashi +xnt I E Narragansett +xnu I E Nukunul +xny I L Nyiyaparli +xnz I L Kenzi +xoc I E O'chi'chi' +xod I L Kokoda +xog I L Soga +xoi I L Kominimung +xok I L Xokleng +xom I L Komo (Sudan) +xon I L Konkomba +xoo I E Xukurú +xop I L Kopar +xor I L Korubo +xow I L Kowaki +xpa I E Pirriya +xpc I E Pecheneg +xpe I L Liberia Kpelle +xpg I A Phrygian +xpi I E Pictish +xpj I E Mpalitjanh +xpk I L Kulina Pano +xpm I E Pumpokol +xpn I E Kapinawá +xpo I E Pochutec +xpp I E Puyo-Paekche +xpq I E Mohegan-Pequot +xpr I A Parthian +xps I E Pisidian +xpt I E Punthamara +xpu I A Punic +xpy I E Puyo +xqa I H Karakhanid +xqt I A Qatabanian +xra I L Krahô +xrb I L Eastern Karaboro +xrd I E Gundungurra +xre I L Kreye +xrg I E Minang +xri I L Krikati-Timbira +xrm I E Armazic +xrn I E Arin +xrq I E Karranga +xrr I A Raetic +xrt I E Aranama-Tamique +xru I L Marriammu +xrw I L Karawa +xsa I A Sabaean +xsb I L Sambal +xsc I A Scythian +xsd I A Sidetic +xse I L Sempan +xsh I L Shamang +xsi I L Sio +xsj I L Subi +xsl I L South Slavey +xsm I L Kasem +xsn I L Sanga (Nigeria) +xso I E Solano +xsp I L Silopi +xsq I L Makhuwa-Saka +xsr I L Sherpa +xss I E Assan +xsu I L Sanumá +xsv I E Sudovian +xsy I L Saisiyat +xta I L Alcozauca Mixtec +xtb I L Chazumba Mixtec +xtc I L Katcha-Kadugli-Miri +xtd I L Diuxi-Tilantongo Mixtec +xte I L Ketengban +xtg I A Transalpine Gaulish +xth I E Yitha Yitha +xti I L Sinicahua Mixtec +xtj I L San Juan Teita Mixtec +xtl I L Tijaltepec Mixtec +xtm I L Magdalena Peñasco Mixtec +xtn I L Northern Tlaxiaco Mixtec +xto I A Tokharian A +xtp I L San Miguel Piedras Mixtec +xtq I H Tumshuqese +xtr I A Early Tripuri +xts I L Sindihui Mixtec +xtt I L Tacahua Mixtec +xtu I L Cuyamecalco Mixtec +xtv I E Thawa +xtw I L Tawandê +xty I L Yoloxochitl Mixtec +xtz I E Tasmanian +xua I L Alu Kurumba +xub I L Betta Kurumba +xud I E Umiida +xug I L Kunigami +xuj I L Jennu Kurumba +xul I E Ngunawal +xum I A Umbrian +xun I E Unggaranggu +xuo I L Kuo +xup I E Upper Umpqua +xur I A Urartian +xut I E Kuthant +xuu I L Kxoe +xve I A Venetic +xvi I L Kamviri +xvn I A Vandalic +xvo I A Volscian +xvs I A Vestinian +xwa I L Kwaza +xwc I E Woccon +xwd I E Wadi Wadi +xwe I L Xwela Gbe +xwg I L Kwegu +xwj I E Wajuk +xwk I E Wangkumara +xwl I L Western Xwla Gbe +xwo I E Written Oirat +xwr I L Kwerba Mamberamo +xwt I E Wotjobaluk +xww I E Wemba Wemba +xxb I E Boro (Ghana) +xxk I L Ke'o +xxm I E Minkin +xxr I E Koropó +xxt I E Tambora +xya I E Yaygir +xyb I E Yandjibara +xyj I E Mayi-Yapi +xyk I E Mayi-Kulan +xyl I E Yalakalore +xyt I E Mayi-Thakurti +xyy I L Yorta Yorta +xzh I A Zhang-Zhung +xzm I E Zemgalian +xzp I H Ancient Zapotec +yaa I L Yaminahua +yab I L Yuhup +yac I L Pass Valley Yali +yad I L Yagua +yae I L Pumé +yaf I L Yaka (Democratic Republic of Congo) +yag I L Yámana +yah I L Yazgulyam +yai I L Yagnobi +yaj I L Banda-Yangere +yak I L Yakama +yal I L Yalunka +yam I L Yamba +yan I L Mayangna +yao yao yao I L Yao +yap yap yap I L Yapese +yaq I L Yaqui +yar I L Yabarana +yas I L Nugunu (Cameroon) +yat I L Yambeta +yau I L Yuwana +yav I L Yangben +yaw I L Yawalapití +yax I L Yauma +yay I L Agwagwune +yaz I L Lokaa +yba I L Yala +ybb I L Yemba +ybe I L West Yugur +ybh I L Yakha +ybi I L Yamphu +ybj I L Hasha +ybk I L Bokha +ybl I L Yukuben +ybm I L Yaben +ybn I E Yabaâna +ybo I L Yabong +ybx I L Yawiyo +yby I L Yaweyuha +ych I L Chesu +ycl I L Lolopo +ycn I L Yucuna +ycp I L Chepya +yda I E Yanda +ydd I L Eastern Yiddish +yde I L Yangum Dey +ydg I L Yidgha +ydk I L Yoidik +yds I L Yiddish Sign Language +yea I L Ravula +yec I L Yeniche +yee I L Yimas +yei I E Yeni +yej I L Yevanic +yel I L Yela +yer I L Tarok +yes I L Nyankpa +yet I L Yetfa +yeu I L Yerukula +yev I L Yapunda +yey I L Yeyi +yga I E Malyangapa +ygi I E Yiningayi +ygl I L Yangum Gel +ygm I L Yagomi +ygp I L Gepo +ygr I L Yagaria +ygu I L Yugul +ygw I L Yagwoia +yha I L Baha Buyang +yhd I L Judeo-Iraqi Arabic +yhl I L Hlepho Phowa +yia I L Yinggarda +yid yid yid yi M L Yiddish +yif I L Ache +yig I L Wusa Nasu +yih I L Western Yiddish +yii I L Yidiny +yij I L Yindjibarndi +yik I L Dongshanba Lalo +yil I E Yindjilandji +yim I L Yimchungru Naga +yin I L Yinchia +yip I L Pholo +yiq I L Miqie +yir I L North Awyu +yis I L Yis +yit I L Eastern Lalu +yiu I L Awu +yiv I L Northern Nisu +yix I L Axi Yi +yiz I L Azhe +yka I L Yakan +ykg I L Northern Yukaghir +yki I L Yoke +ykk I L Yakaikeke +ykl I L Khlula +ykm I L Kap +ykn I L Kua-nsi +yko I L Yasa +ykr I L Yekora +ykt I L Kathu +yku I L Kuamasi +yky I L Yakoma +yla I L Yaul +ylb I L Yaleba +yle I L Yele +ylg I L Yelogu +yli I L Angguruk Yali +yll I L Yil +ylm I L Limi +yln I L Langnian Buyang +ylo I L Naluo Yi +ylr I E Yalarnnga +ylu I L Aribwaung +yly I L Nyâlayu +ymb I L Yambes +ymc I L Southern Muji +ymd I L Muda +yme I E Yameo +ymg I L Yamongeri +ymh I L Mili +ymi I L Moji +ymk I L Makwe +yml I L Iamalele +ymm I L Maay +ymn I L Yamna +ymo I L Yangum Mon +ymp I L Yamap +ymq I L Qila Muji +ymr I L Malasar +yms I A Mysian +ymt I E Mator-Taygi-Karagas +ymx I L Northern Muji +ymz I L Muzi +yna I L Aluo +ynd I E Yandruwandha +yne I L Lang'e +yng I L Yango +ynh I L Yangho +ynk I L Naukan Yupik +ynl I L Yangulam +ynn I E Yana +yno I L Yong +ynq I L Yendang +yns I L Yansi +ynu I E Yahuna +yob I E Yoba +yog I L Yogad +yoi I L Yonaguni +yok I L Yokuts +yol I E Yola +yom I L Yombe +yon I L Yongkom +yor yor yor yo I L Yoruba +yot I L Yotti +yox I L Yoron +yoy I L Yoy +ypa I L Phala +ypb I L Labo Phowa +ypg I L Phola +yph I L Phupha +ypm I L Phuma +ypn I L Ani Phowa +ypo I L Alo Phola +ypp I L Phupa +ypz I L Phuza +yra I L Yerakai +yrb I L Yareba +yre I L Yaouré +yri I L Yarí +yrk I L Nenets +yrl I L Nhengatu +yrm I L Yirrk-Mel +yrn I L Yerong +yrs I L Yarsun +yrw I L Yarawata +yry I L Yarluyandi +ysc I E Yassic +ysd I L Samatao +ysg I L Sonaga +ysl I L Yugoslavian Sign Language +ysn I L Sani +yso I L Nisi (China) +ysp I L Southern Lolopo +ysr I E Sirenik Yupik +yss I L Yessan-Mayo +ysy I L Sanie +yta I L Talu +ytl I L Tanglang +ytp I L Thopho +ytw I L Yout Wam +yty I E Yatay +yua I L Yucateco +yub I E Yugambal +yuc I L Yuchi +yud I L Judeo-Tripolitanian Arabic +yue I L Yue Chinese +yuf I L Havasupai-Walapai-Yavapai +yug I E Yug +yui I L Yurutí +yuj I L Karkar-Yuri +yuk I E Yuki +yul I L Yulu +yum I L Quechan +yun I L Bena (Nigeria) +yup I L Yukpa +yuq I L Yuqui +yur I L Yurok +yut I L Yopno +yuu I L Yugh +yuw I L Yau (Morobe Province) +yux I L Southern Yukaghir +yuy I L East Yugur +yuz I L Yuracare +yva I L Yawa +yvt I E Yavitero +ywa I L Kalou +ywg I L Yinhawangka +ywl I L Western Lalu +ywn I L Yawanawa +ywq I L Wuding-Luquan Yi +ywr I L Yawuru +ywt I L Xishanba Lalo +ywu I L Wumeng Nasu +yww I E Yawarawarga +yxa I E Mayawali +yxg I E Yagara +yxl I E Yardliyawarra +yxm I E Yinwum +yxu I E Yuyu +yxy I E Yabula Yabula +yyr I E Yir Yoront +yyu I L Yau (Sandaun Province) +yyz I L Ayizi +yzg I L E'ma Buyang +yzk I L Zokhuo +zaa I L Sierra de Juárez Zapotec +zab I L San Juan Guelavía Zapotec +zac I L Ocotlán Zapotec +zad I L Cajonos Zapotec +zae I L Yareni Zapotec +zaf I L Ayoquesco Zapotec +zag I L Zaghawa +zah I L Zangwal +zai I L Isthmus Zapotec +zaj I L Zaramo +zak I L Zanaki +zal I L Zauzou +zam I L Miahuatlán Zapotec +zao I L Ozolotepec Zapotec +zap zap zap M L Zapotec +zaq I L Aloápam Zapotec +zar I L Rincón Zapotec +zas I L Santo Domingo Albarradas Zapotec +zat I L Tabaa Zapotec +zau I L Zangskari +zav I L Yatzachi Zapotec +zaw I L Mitla Zapotec +zax I L Xadani Zapotec +zay I L Zayse-Zergulla +zaz I L Zari +zbc I L Central Berawan +zbe I L East Berawan +zbl zbl zbl I C Blissymbols +zbt I L Batui +zbw I L West Berawan +zca I L Coatecas Altas Zapotec +zch I L Central Hongshuihe Zhuang +zdj I L Ngazidja Comorian +zea I L Zeeuws +zeg I L Zenag +zeh I L Eastern Hongshuihe Zhuang +zen zen zen I L Zenaga +zga I L Kinga +zgb I L Guibei Zhuang +zgh I L Standard Moroccan Tamazight +zgm I L Minz Zhuang +zgn I L Guibian Zhuang +zgr I L Magori +zha zha zha za M L Zhuang +zhb I L Zhaba +zhd I L Dai Zhuang +zhi I L Zhire +zhn I L Nong Zhuang +zho chi zho zh M L Chinese +zhw I L Zhoa +zia I L Zia +zib I L Zimbabwe Sign Language +zik I L Zimakani +zil I L Zialo +zim I L Mesme +zin I L Zinza +zir I E Ziriya +ziw I L Zigula +ziz I L Zizilivakan +zka I L Kaimbulawa +zkb I E Koibal +zkd I L Kadu +zkg I E Koguryo +zkh I E Khorezmian +zkk I E Karankawa +zkn I L Kanan +zko I E Kott +zkp I E São Paulo Kaingáng +zkr I L Zakhring +zkt I E Kitan +zku I E Kaurna +zkv I E Krevinian +zkz I E Khazar +zlj I L Liujiang Zhuang +zlm I L Malay (individual language) +zln I L Lianshan Zhuang +zlq I L Liuqian Zhuang +zma I L Manda (Australia) +zmb I L Zimba +zmc I E Margany +zmd I L Maridan +zme I E Mangerr +zmf I L Mfinu +zmg I L Marti Ke +zmh I E Makolkol +zmi I L Negeri Sembilan Malay +zmj I L Maridjabin +zmk I E Mandandanyi +zml I L Madngele +zmm I L Marimanindji +zmn I L Mbangwe +zmo I L Molo +zmp I L Mpuono +zmq I L Mituku +zmr I L Maranunggu +zms I L Mbesa +zmt I L Maringarr +zmu I E Muruwari +zmv I E Mbariman-Gudhinma +zmw I L Mbo (Democratic Republic of Congo) +zmx I L Bomitaba +zmy I L Mariyedi +zmz I L Mbandja +zna I L Zan Gula +zne I L Zande (individual language) +zng I L Mang +znk I E Manangkari +zns I L Mangas +zoc I L Copainalá Zoque +zoh I L Chimalapa Zoque +zom I L Zou +zoo I L Asunción Mixtepec Zapotec +zoq I L Tabasco Zoque +zor I L Rayón Zoque +zos I L Francisco León Zoque +zpa I L Lachiguiri Zapotec +zpb I L Yautepec Zapotec +zpc I L Choapan Zapotec +zpd I L Southeastern Ixtlán Zapotec +zpe I L Petapa Zapotec +zpf I L San Pedro Quiatoni Zapotec +zpg I L Guevea De Humboldt Zapotec +zph I L Totomachapan Zapotec +zpi I L Santa María Quiegolani Zapotec +zpj I L Quiavicuzas Zapotec +zpk I L Tlacolulita Zapotec +zpl I L Lachixío Zapotec +zpm I L Mixtepec Zapotec +zpn I L Santa Inés Yatzechi Zapotec +zpo I L Amatlán Zapotec +zpp I L El Alto Zapotec +zpq I L Zoogocho Zapotec +zpr I L Santiago Xanica Zapotec +zps I L Coatlán Zapotec +zpt I L San Vicente Coatlán Zapotec +zpu I L Yalálag Zapotec +zpv I L Chichicapan Zapotec +zpw I L Zaniza Zapotec +zpx I L San Baltazar Loxicha Zapotec +zpy I L Mazaltepec Zapotec +zpz I L Texmelucan Zapotec +zqe I L Qiubei Zhuang +zra I E Kara (Korea) +zrg I L Mirgan +zrn I L Zerenkel +zro I L Záparo +zrp I E Zarphatic +zrs I L Mairasi +zsa I L Sarasira +zsk I A Kaskean +zsl I L Zambian Sign Language +zsm I L Standard Malay +zsr I L Southern Rincon Zapotec +zsu I L Sukurum +zte I L Elotepec Zapotec +ztg I L Xanaguía Zapotec +ztl I L Lapaguía-Guivini Zapotec +ztm I L San Agustín Mixtepec Zapotec +ztn I L Santa Catarina Albarradas Zapotec +ztp I L Loxicha Zapotec +ztq I L Quioquitani-Quierí Zapotec +zts I L Tilquiapan Zapotec +ztt I L Tejalapan Zapotec +ztu I L Güilá Zapotec +ztx I L Zaachila Zapotec +zty I L Yatee Zapotec +zua I L Zeem +zuh I L Tokano +zul zul zul zu I L Zulu +zum I L Kumzari +zun zun zun I L Zuni +zuy I L Zumaya +zwa I L Zay +zxx zxx zxx S S No linguistic content +zyb I L Yongbei Zhuang +zyg I L Yang Zhuang +zyj I L Youjiang Zhuang +zyn I L Yongnan Zhuang +zyp I L Zyphe Chin +zza zza zza M L Zaza +zzj I L Zuojiang Zhuang \ No newline at end of file diff --git a/lib/babelfish/data/iso15924-utf8-20131012.txt b/lib/babelfish/data/iso15924-utf8-20131012.txt new file mode 100755 index 00000000..4b6ff471 --- /dev/null +++ b/lib/babelfish/data/iso15924-utf8-20131012.txt @@ -0,0 +1,176 @@ +# +# ISO 15924 - Codes for the representation of names of scripts +# Codes pour la représentation des noms d’écritures +# Format: +# Code;N°;English Name;Nom français;PVA;Date +# + +Afak;439;Afaka;afaka;;2010-12-21 +Aghb;239;Caucasian Albanian;aghbanien;;2012-10-16 +Ahom;338;Ahom, Tai Ahom;âhom;;2012-11-01 +Arab;160;Arabic;arabe;Arabic;2004-05-01 +Armi;124;Imperial Aramaic;araméen impérial;Imperial_Aramaic;2009-06-01 +Armn;230;Armenian;arménien;Armenian;2004-05-01 +Avst;134;Avestan;avestique;Avestan;2009-06-01 +Bali;360;Balinese;balinais;Balinese;2006-10-10 +Bamu;435;Bamum;bamoum;Bamum;2009-06-01 +Bass;259;Bassa Vah;bassa;;2010-03-26 +Batk;365;Batak;batik;Batak;2010-07-23 +Beng;325;Bengali;bengalî;Bengali;2004-05-01 +Blis;550;Blissymbols;symboles Bliss;;2004-05-01 +Bopo;285;Bopomofo;bopomofo;Bopomofo;2004-05-01 +Brah;300;Brahmi;brahma;Brahmi;2010-07-23 +Brai;570;Braille;braille;Braille;2004-05-01 +Bugi;367;Buginese;bouguis;Buginese;2006-06-21 +Buhd;372;Buhid;bouhide;Buhid;2004-05-01 +Cakm;349;Chakma;chakma;Chakma;2012-02-06 +Cans;440;Unified Canadian Aboriginal Syllabics;syllabaire autochtone canadien unifié;Canadian_Aboriginal;2004-05-29 +Cari;201;Carian;carien;Carian;2007-07-02 +Cham;358;Cham;cham (čam, tcham);Cham;2009-11-11 +Cher;445;Cherokee;tchérokî;Cherokee;2004-05-01 +Cirt;291;Cirth;cirth;;2004-05-01 +Copt;204;Coptic;copte;Coptic;2006-06-21 +Cprt;403;Cypriot;syllabaire chypriote;Cypriot;2004-05-01 +Cyrl;220;Cyrillic;cyrillique;Cyrillic;2004-05-01 +Cyrs;221;Cyrillic (Old Church Slavonic variant);cyrillique (variante slavonne);;2004-05-01 +Deva;315;Devanagari (Nagari);dévanâgarî;Devanagari;2004-05-01 +Dsrt;250;Deseret (Mormon);déseret (mormon);Deseret;2004-05-01 +Dupl;755;Duployan shorthand, Duployan stenography;sténographie Duployé;;2010-07-18 +Egyd;070;Egyptian demotic;démotique égyptien;;2004-05-01 +Egyh;060;Egyptian hieratic;hiératique égyptien;;2004-05-01 +Egyp;050;Egyptian hieroglyphs;hiéroglyphes égyptiens;Egyptian_Hieroglyphs;2009-06-01 +Elba;226;Elbasan;elbasan;;2010-07-18 +Ethi;430;Ethiopic (Geʻez);éthiopien (geʻez, guèze);Ethiopic;2004-10-25 +Geor;240;Georgian (Mkhedruli);géorgien (mkhédrouli);Georgian;2004-05-29 +Geok;241;Khutsuri (Asomtavruli and Nuskhuri);khoutsouri (assomtavrouli et nouskhouri);Georgian;2012-10-16 +Glag;225;Glagolitic;glagolitique;Glagolitic;2006-06-21 +Goth;206;Gothic;gotique;Gothic;2004-05-01 +Gran;343;Grantha;grantha;;2009-11-11 +Grek;200;Greek;grec;Greek;2004-05-01 +Gujr;320;Gujarati;goudjarâtî (gujrâtî);Gujarati;2004-05-01 +Guru;310;Gurmukhi;gourmoukhî;Gurmukhi;2004-05-01 +Hang;286;Hangul (Hangŭl, Hangeul);hangûl (hangŭl, hangeul);Hangul;2004-05-29 +Hani;500;Han (Hanzi, Kanji, Hanja);idéogrammes han (sinogrammes);Han;2009-02-23 +Hano;371;Hanunoo (Hanunóo);hanounóo;Hanunoo;2004-05-29 +Hans;501;Han (Simplified variant);idéogrammes han (variante simplifiée);;2004-05-29 +Hant;502;Han (Traditional variant);idéogrammes han (variante traditionnelle);;2004-05-29 +Hatr;127;Hatran;hatrénien;;2012-11-01 +Hebr;125;Hebrew;hébreu;Hebrew;2004-05-01 +Hira;410;Hiragana;hiragana;Hiragana;2004-05-01 +Hluw;080;Anatolian Hieroglyphs (Luwian Hieroglyphs, Hittite Hieroglyphs);hiéroglyphes anatoliens (hiéroglyphes louvites, hiéroglyphes hittites);;2011-12-09 +Hmng;450;Pahawh Hmong;pahawh hmong;;2004-05-01 +Hrkt;412;Japanese syllabaries (alias for Hiragana + Katakana);syllabaires japonais (alias pour hiragana + katakana);Katakana_Or_Hiragana;2011-06-21 +Hung;176;Old Hungarian (Hungarian Runic);runes hongroises (ancien hongrois);;2012-10-16 +Inds;610;Indus (Harappan);indus;;2004-05-01 +Ital;210;Old Italic (Etruscan, Oscan, etc.);ancien italique (étrusque, osque, etc.);Old_Italic;2004-05-29 +Java;361;Javanese;javanais;Javanese;2009-06-01 +Jpan;413;Japanese (alias for Han + Hiragana + Katakana);japonais (alias pour han + hiragana + katakana);;2006-06-21 +Jurc;510;Jurchen;jurchen;;2010-12-21 +Kali;357;Kayah Li;kayah li;Kayah_Li;2007-07-02 +Kana;411;Katakana;katakana;Katakana;2004-05-01 +Khar;305;Kharoshthi;kharochthî;Kharoshthi;2006-06-21 +Khmr;355;Khmer;khmer;Khmer;2004-05-29 +Khoj;322;Khojki;khojkî;;2011-06-21 +Knda;345;Kannada;kannara (canara);Kannada;2004-05-29 +Kore;287;Korean (alias for Hangul + Han);coréen (alias pour hangûl + han);;2007-06-13 +Kpel;436;Kpelle;kpèllé;;2010-03-26 +Kthi;317;Kaithi;kaithî;Kaithi;2009-06-01 +Lana;351;Tai Tham (Lanna);taï tham (lanna);Tai_Tham;2009-06-01 +Laoo;356;Lao;laotien;Lao;2004-05-01 +Latf;217;Latin (Fraktur variant);latin (variante brisée);;2004-05-01 +Latg;216;Latin (Gaelic variant);latin (variante gaélique);;2004-05-01 +Latn;215;Latin;latin;Latin;2004-05-01 +Lepc;335;Lepcha (Róng);lepcha (róng);Lepcha;2007-07-02 +Limb;336;Limbu;limbou;Limbu;2004-05-29 +Lina;400;Linear A;linéaire A;;2004-05-01 +Linb;401;Linear B;linéaire B;Linear_B;2004-05-29 +Lisu;399;Lisu (Fraser);lisu (Fraser);Lisu;2009-06-01 +Loma;437;Loma;loma;;2010-03-26 +Lyci;202;Lycian;lycien;Lycian;2007-07-02 +Lydi;116;Lydian;lydien;Lydian;2007-07-02 +Mahj;314;Mahajani;mahâjanî;;2012-10-16 +Mand;140;Mandaic, Mandaean;mandéen;Mandaic;2010-07-23 +Mani;139;Manichaean;manichéen;;2007-07-15 +Maya;090;Mayan hieroglyphs;hiéroglyphes mayas;;2004-05-01 +Mend;438;Mende Kikakui;mendé kikakui;;2013-10-12 +Merc;101;Meroitic Cursive;cursif méroïtique;Meroitic_Cursive;2012-02-06 +Mero;100;Meroitic Hieroglyphs;hiéroglyphes méroïtiques;Meroitic_Hieroglyphs;2012-02-06 +Mlym;347;Malayalam;malayâlam;Malayalam;2004-05-01 +Modi;323;Modi, Moḍī;modî;;2013-10-12 +Moon;218;Moon (Moon code, Moon script, Moon type);écriture Moon;;2006-12-11 +Mong;145;Mongolian;mongol;Mongolian;2004-05-01 +Mroo;199;Mro, Mru;mro;;2010-12-21 +Mtei;337;Meitei Mayek (Meithei, Meetei);meitei mayek;Meetei_Mayek;2009-06-01 +Mult;323; Multani;multanî;;2012-11-01 +Mymr;350;Myanmar (Burmese);birman;Myanmar;2004-05-01 +Narb;106;Old North Arabian (Ancient North Arabian);nord-arabique;;2010-03-26 +Nbat;159;Nabataean;nabatéen;;2010-03-26 +Nkgb;420;Nakhi Geba ('Na-'Khi ²Ggŏ-¹baw, Naxi Geba);nakhi géba;;2009-02-23 +Nkoo;165;N’Ko;n’ko;Nko;2006-10-10 +Nshu;499;Nüshu;nüshu;;2010-12-21 +Ogam;212;Ogham;ogam;Ogham;2004-05-01 +Olck;261;Ol Chiki (Ol Cemet’, Ol, Santali);ol tchiki;Ol_Chiki;2007-07-02 +Orkh;175;Old Turkic, Orkhon Runic;orkhon;Old_Turkic;2009-06-01 +Orya;327;Oriya;oriyâ;Oriya;2004-05-01 +Osma;260;Osmanya;osmanais;Osmanya;2004-05-01 +Palm;126;Palmyrene;palmyrénien;;2010-03-26 +Pauc;263;Pau Cin Hau;paou chin haou;;2013-10-12 +Perm;227;Old Permic;ancien permien;;2004-05-01 +Phag;331;Phags-pa;’phags pa;Phags_Pa;2006-10-10 +Phli;131;Inscriptional Pahlavi;pehlevi des inscriptions;Inscriptional_Pahlavi;2009-06-01 +Phlp;132;Psalter Pahlavi;pehlevi des psautiers;;2007-11-26 +Phlv;133;Book Pahlavi;pehlevi des livres;;2007-07-15 +Phnx;115;Phoenician;phénicien;Phoenician;2006-10-10 +Plrd;282;Miao (Pollard);miao (Pollard);Miao;2012-02-06 +Prti;130;Inscriptional Parthian;parthe des inscriptions;Inscriptional_Parthian;2009-06-01 +Qaaa;900;Reserved for private use (start);réservé à l’usage privé (début);;2004-05-29 +Qabx;949;Reserved for private use (end);réservé à l’usage privé (fin);;2004-05-29 +Rjng;363;Rejang (Redjang, Kaganga);redjang (kaganga);Rejang;2009-02-23 +Roro;620;Rongorongo;rongorongo;;2004-05-01 +Runr;211;Runic;runique;Runic;2004-05-01 +Samr;123;Samaritan;samaritain;Samaritan;2009-06-01 +Sara;292;Sarati;sarati;;2004-05-29 +Sarb;105;Old South Arabian;sud-arabique, himyarite;Old_South_Arabian;2009-06-01 +Saur;344;Saurashtra;saurachtra;Saurashtra;2007-07-02 +Sgnw;095;SignWriting;SignÉcriture, SignWriting;;2006-10-10 +Shaw;281;Shavian (Shaw);shavien (Shaw);Shavian;2004-05-01 +Shrd;319;Sharada, Śāradā;charada, shard;Sharada;2012-02-06 +Sidd;302;Siddham, Siddhaṃ, Siddhamātṛkā;siddham;;2013-10-12 +Sind;318;Khudawadi, Sindhi;khoudawadî, sindhî;;2010-12-21 +Sinh;348;Sinhala;singhalais;Sinhala;2004-05-01 +Sora;398;Sora Sompeng;sora sompeng;Sora_Sompeng;2012-02-06 +Sund;362;Sundanese;sundanais;Sundanese;2007-07-02 +Sylo;316;Syloti Nagri;sylotî nâgrî;Syloti_Nagri;2006-06-21 +Syrc;135;Syriac;syriaque;Syriac;2004-05-01 +Syre;138;Syriac (Estrangelo variant);syriaque (variante estranghélo);;2004-05-01 +Syrj;137;Syriac (Western variant);syriaque (variante occidentale);;2004-05-01 +Syrn;136;Syriac (Eastern variant);syriaque (variante orientale);;2004-05-01 +Tagb;373;Tagbanwa;tagbanoua;Tagbanwa;2004-05-01 +Takr;321;Takri, Ṭākrī, Ṭāṅkrī;tâkrî;Takri;2012-02-06 +Tale;353;Tai Le;taï-le;Tai_Le;2004-10-25 +Talu;354;New Tai Lue;nouveau taï-lue;New_Tai_Lue;2006-06-21 +Taml;346;Tamil;tamoul;Tamil;2004-05-01 +Tang;520;Tangut;tangoute;;2010-12-21 +Tavt;359;Tai Viet;taï viêt;Tai_Viet;2009-06-01 +Telu;340;Telugu;télougou;Telugu;2004-05-01 +Teng;290;Tengwar;tengwar;;2004-05-01 +Tfng;120;Tifinagh (Berber);tifinagh (berbère);Tifinagh;2006-06-21 +Tglg;370;Tagalog (Baybayin, Alibata);tagal (baybayin, alibata);Tagalog;2009-02-23 +Thaa;170;Thaana;thâna;Thaana;2004-05-01 +Thai;352;Thai;thaï;Thai;2004-05-01 +Tibt;330;Tibetan;tibétain;Tibetan;2004-05-01 +Tirh;326;Tirhuta;tirhouta;;2011-12-09 +Ugar;040;Ugaritic;ougaritique;Ugaritic;2004-05-01 +Vaii;470;Vai;vaï;Vai;2007-07-02 +Visp;280;Visible Speech;parole visible;;2004-05-01 +Wara;262;Warang Citi (Varang Kshiti);warang citi;;2009-11-11 +Wole;480;Woleai;woléaï;;2010-12-21 +Xpeo;030;Old Persian;cunéiforme persépolitain;Old_Persian;2006-06-21 +Xsux;020;Cuneiform, Sumero-Akkadian;cunéiforme suméro-akkadien;Cuneiform;2006-10-10 +Yiii;460;Yi;yi;Yi;2004-05-01 +Zinh;994;Code for inherited script;codet pour écriture héritée;Inherited;2009-02-23 +Zmth;995;Mathematical notation;notation mathématique;;2007-11-26 +Zsym;996;Symbols;symboles;;2007-11-26 +Zxxx;997;Code for unwritten documents;codet pour les documents non écrits;;2011-06-21 +Zyyy;998;Code for undetermined script;codet pour écriture indéterminée;Common;2004-05-29 +Zzzz;999;Code for uncoded script;codet pour écriture non codée;Unknown;2006-10-10 diff --git a/lib/babelfish/data/opensubtitles_languages.txt b/lib/babelfish/data/opensubtitles_languages.txt new file mode 100755 index 00000000..1bd35063 --- /dev/null +++ b/lib/babelfish/data/opensubtitles_languages.txt @@ -0,0 +1,474 @@ +IdSubLanguage ISO639 LanguageName UploadEnabled WebEnabled +aar aa Afar, afar 0 0 +abk ab Abkhazian 0 0 +ace Achinese 0 0 +ach Acoli 0 0 +ada Adangme 0 0 +ady adyghé 0 0 +afa Afro-Asiatic (Other) 0 0 +afh Afrihili 0 0 +afr af Afrikaans 1 0 +ain Ainu 0 0 +aka ak Akan 0 0 +akk Akkadian 0 0 +alb sq Albanian 1 1 +ale Aleut 0 0 +alg Algonquian languages 0 0 +alt Southern Altai 0 0 +amh am Amharic 0 0 +ang English, Old (ca.450-1100) 0 0 +apa Apache languages 0 0 +ara ar Arabic 1 1 +arc Aramaic 0 0 +arg an Aragonese 0 0 +arm hy Armenian 1 0 +arn Araucanian 0 0 +arp Arapaho 0 0 +art Artificial (Other) 0 0 +arw Arawak 0 0 +asm as Assamese 0 0 +ast Asturian, Bable 0 0 +ath Athapascan languages 0 0 +aus Australian languages 0 0 +ava av Avaric 0 0 +ave ae Avestan 0 0 +awa Awadhi 0 0 +aym ay Aymara 0 0 +aze az Azerbaijani 0 0 +bad Banda 0 0 +bai Bamileke languages 0 0 +bak ba Bashkir 0 0 +bal Baluchi 0 0 +bam bm Bambara 0 0 +ban Balinese 0 0 +baq eu Basque 1 1 +bas Basa 0 0 +bat Baltic (Other) 0 0 +bej Beja 0 0 +bel be Belarusian 0 0 +bem Bemba 0 0 +ben bn Bengali 1 0 +ber Berber (Other) 0 0 +bho Bhojpuri 0 0 +bih bh Bihari 0 0 +bik Bikol 0 0 +bin Bini 0 0 +bis bi Bislama 0 0 +bla Siksika 0 0 +bnt Bantu (Other) 0 0 +bos bs Bosnian 1 0 +bra Braj 0 0 +bre br Breton 1 0 +btk Batak (Indonesia) 0 0 +bua Buriat 0 0 +bug Buginese 0 0 +bul bg Bulgarian 1 1 +bur my Burmese 1 0 +byn Blin 0 0 +cad Caddo 0 0 +cai Central American Indian (Other) 0 0 +car Carib 0 0 +cat ca Catalan 1 1 +cau Caucasian (Other) 0 0 +ceb Cebuano 0 0 +cel Celtic (Other) 0 0 +cha ch Chamorro 0 0 +chb Chibcha 0 0 +che ce Chechen 0 0 +chg Chagatai 0 0 +chi zh Chinese 1 1 +chk Chuukese 0 0 +chm Mari 0 0 +chn Chinook jargon 0 0 +cho Choctaw 0 0 +chp Chipewyan 0 0 +chr Cherokee 0 0 +chu cu Church Slavic 0 0 +chv cv Chuvash 0 0 +chy Cheyenne 0 0 +cmc Chamic languages 0 0 +cop Coptic 0 0 +cor kw Cornish 0 0 +cos co Corsican 0 0 +cpe Creoles and pidgins, English based (Other) 0 0 +cpf Creoles and pidgins, French-based (Other) 0 0 +cpp Creoles and pidgins, Portuguese-based (Other) 0 0 +cre cr Cree 0 0 +crh Crimean Tatar 0 0 +crp Creoles and pidgins (Other) 0 0 +csb Kashubian 0 0 +cus Cushitic (Other)' couchitiques, autres langues 0 0 +cze cs Czech 1 1 +dak Dakota 0 0 +dan da Danish 1 1 +dar Dargwa 0 0 +day Dayak 0 0 +del Delaware 0 0 +den Slave (Athapascan) 0 0 +dgr Dogrib 0 0 +din Dinka 0 0 +div dv Divehi 0 0 +doi Dogri 0 0 +dra Dravidian (Other) 0 0 +dua Duala 0 0 +dum Dutch, Middle (ca.1050-1350) 0 0 +dut nl Dutch 1 1 +dyu Dyula 0 0 +dzo dz Dzongkha 0 0 +efi Efik 0 0 +egy Egyptian (Ancient) 0 0 +eka Ekajuk 0 0 +elx Elamite 0 0 +eng en English 1 1 +enm English, Middle (1100-1500) 0 0 +epo eo Esperanto 1 0 +est et Estonian 1 1 +ewe ee Ewe 0 0 +ewo Ewondo 0 0 +fan Fang 0 0 +fao fo Faroese 0 0 +fat Fanti 0 0 +fij fj Fijian 0 0 +fil Filipino 0 0 +fin fi Finnish 1 1 +fiu Finno-Ugrian (Other) 0 0 +fon Fon 0 0 +fre fr French 1 1 +frm French, Middle (ca.1400-1600) 0 0 +fro French, Old (842-ca.1400) 0 0 +fry fy Frisian 0 0 +ful ff Fulah 0 0 +fur Friulian 0 0 +gaa Ga 0 0 +gay Gayo 0 0 +gba Gbaya 0 0 +gem Germanic (Other) 0 0 +geo ka Georgian 1 1 +ger de German 1 1 +gez Geez 0 0 +gil Gilbertese 0 0 +gla gd Gaelic 0 0 +gle ga Irish 0 0 +glg gl Galician 1 1 +glv gv Manx 0 0 +gmh German, Middle High (ca.1050-1500) 0 0 +goh German, Old High (ca.750-1050) 0 0 +gon Gondi 0 0 +gor Gorontalo 0 0 +got Gothic 0 0 +grb Grebo 0 0 +grc Greek, Ancient (to 1453) 0 0 +ell el Greek 1 1 +grn gn Guarani 0 0 +guj gu Gujarati 0 0 +gwi Gwich´in 0 0 +hai Haida 0 0 +hat ht Haitian 0 0 +hau ha Hausa 0 0 +haw Hawaiian 0 0 +heb he Hebrew 1 1 +her hz Herero 0 0 +hil Hiligaynon 0 0 +him Himachali 0 0 +hin hi Hindi 1 1 +hit Hittite 0 0 +hmn Hmong 0 0 +hmo ho Hiri Motu 0 0 +hrv hr Croatian 1 1 +hun hu Hungarian 1 1 +hup Hupa 0 0 +iba Iban 0 0 +ibo ig Igbo 0 0 +ice is Icelandic 1 1 +ido io Ido 0 0 +iii ii Sichuan Yi 0 0 +ijo Ijo 0 0 +iku iu Inuktitut 0 0 +ile ie Interlingue 0 0 +ilo Iloko 0 0 +ina ia Interlingua (International Auxiliary Language Asso 0 0 +inc Indic (Other) 0 0 +ind id Indonesian 1 1 +ine Indo-European (Other) 0 0 +inh Ingush 0 0 +ipk ik Inupiaq 0 0 +ira Iranian (Other) 0 0 +iro Iroquoian languages 0 0 +ita it Italian 1 1 +jav jv Javanese 0 0 +jpn ja Japanese 1 1 +jpr Judeo-Persian 0 0 +jrb Judeo-Arabic 0 0 +kaa Kara-Kalpak 0 0 +kab Kabyle 0 0 +kac Kachin 0 0 +kal kl Kalaallisut 0 0 +kam Kamba 0 0 +kan kn Kannada 0 0 +kar Karen 0 0 +kas ks Kashmiri 0 0 +kau kr Kanuri 0 0 +kaw Kawi 0 0 +kaz kk Kazakh 1 0 +kbd Kabardian 0 0 +kha Khasi 0 0 +khi Khoisan (Other) 0 0 +khm km Khmer 1 1 +kho Khotanese 0 0 +kik ki Kikuyu 0 0 +kin rw Kinyarwanda 0 0 +kir ky Kirghiz 0 0 +kmb Kimbundu 0 0 +kok Konkani 0 0 +kom kv Komi 0 0 +kon kg Kongo 0 0 +kor ko Korean 1 1 +kos Kosraean 0 0 +kpe Kpelle 0 0 +krc Karachay-Balkar 0 0 +kro Kru 0 0 +kru Kurukh 0 0 +kua kj Kuanyama 0 0 +kum Kumyk 0 0 +kur ku Kurdish 0 0 +kut Kutenai 0 0 +lad Ladino 0 0 +lah Lahnda 0 0 +lam Lamba 0 0 +lao lo Lao 0 0 +lat la Latin 0 0 +lav lv Latvian 1 0 +lez Lezghian 0 0 +lim li Limburgan 0 0 +lin ln Lingala 0 0 +lit lt Lithuanian 1 0 +lol Mongo 0 0 +loz Lozi 0 0 +ltz lb Luxembourgish 1 0 +lua Luba-Lulua 0 0 +lub lu Luba-Katanga 0 0 +lug lg Ganda 0 0 +lui Luiseno 0 0 +lun Lunda 0 0 +luo Luo (Kenya and Tanzania) 0 0 +lus lushai 0 0 +mac mk Macedonian 1 1 +mad Madurese 0 0 +mag Magahi 0 0 +mah mh Marshallese 0 0 +mai Maithili 0 0 +mak Makasar 0 0 +mal ml Malayalam 1 0 +man Mandingo 0 0 +mao mi Maori 0 0 +map Austronesian (Other) 0 0 +mar mr Marathi 0 0 +mas Masai 0 0 +may ms Malay 1 1 +mdf Moksha 0 0 +mdr Mandar 0 0 +men Mende 0 0 +mga Irish, Middle (900-1200) 0 0 +mic Mi'kmaq 0 0 +min Minangkabau 0 0 +mis Miscellaneous languages 0 0 +mkh Mon-Khmer (Other) 0 0 +mlg mg Malagasy 0 0 +mlt mt Maltese 0 0 +mnc Manchu 0 0 +mni Manipuri 0 0 +mno Manobo languages 0 0 +moh Mohawk 0 0 +mol mo Moldavian 0 0 +mon mn Mongolian 1 0 +mos Mossi 0 0 +mwl Mirandese 0 0 +mul Multiple languages 0 0 +mun Munda languages 0 0 +mus Creek 0 0 +mwr Marwari 0 0 +myn Mayan languages 0 0 +myv Erzya 0 0 +nah Nahuatl 0 0 +nai North American Indian 0 0 +nap Neapolitan 0 0 +nau na Nauru 0 0 +nav nv Navajo 0 0 +nbl nr Ndebele, South 0 0 +nde nd Ndebele, North 0 0 +ndo ng Ndonga 0 0 +nds Low German 0 0 +nep ne Nepali 0 0 +new Nepal Bhasa 0 0 +nia Nias 0 0 +nic Niger-Kordofanian (Other) 0 0 +niu Niuean 0 0 +nno nn Norwegian Nynorsk 0 0 +nob nb Norwegian Bokmal 0 0 +nog Nogai 0 0 +non Norse, Old 0 0 +nor no Norwegian 1 1 +nso Northern Sotho 0 0 +nub Nubian languages 0 0 +nwc Classical Newari 0 0 +nya ny Chichewa 0 0 +nym Nyamwezi 0 0 +nyn Nyankole 0 0 +nyo Nyoro 0 0 +nzi Nzima 0 0 +oci oc Occitan 1 1 +oji oj Ojibwa 0 0 +ori or Oriya 0 0 +orm om Oromo 0 0 +osa Osage 0 0 +oss os Ossetian 0 0 +ota Turkish, Ottoman (1500-1928) 0 0 +oto Otomian languages 0 0 +paa Papuan (Other) 0 0 +pag Pangasinan 0 0 +pal Pahlavi 0 0 +pam Pampanga 0 0 +pan pa Panjabi 0 0 +pap Papiamento 0 0 +pau Palauan 0 0 +peo Persian, Old (ca.600-400 B.C.) 0 0 +per fa Persian 1 1 +phi Philippine (Other) 0 0 +phn Phoenician 0 0 +pli pi Pali 0 0 +pol pl Polish 1 1 +pon Pohnpeian 0 0 +por pt Portuguese 1 1 +pra Prakrit languages 0 0 +pro Provençal, Old (to 1500) 0 0 +pus ps Pushto 0 0 +que qu Quechua 0 0 +raj Rajasthani 0 0 +rap Rapanui 0 0 +rar Rarotongan 0 0 +roa Romance (Other) 0 0 +roh rm Raeto-Romance 0 0 +rom Romany 0 0 +run rn Rundi 0 0 +rup Aromanian 0 0 +rus ru Russian 1 1 +sad Sandawe 0 0 +sag sg Sango 0 0 +sah Yakut 0 0 +sai South American Indian (Other) 0 0 +sal Salishan languages 0 0 +sam Samaritan Aramaic 0 0 +san sa Sanskrit 0 0 +sas Sasak 0 0 +sat Santali 0 0 +scc sr Serbian 1 1 +scn Sicilian 0 0 +sco Scots 0 0 +sel Selkup 0 0 +sem Semitic (Other) 0 0 +sga Irish, Old (to 900) 0 0 +sgn Sign Languages 0 0 +shn Shan 0 0 +sid Sidamo 0 0 +sin si Sinhalese 1 1 +sio Siouan languages 0 0 +sit Sino-Tibetan (Other) 0 0 +sla Slavic (Other) 0 0 +slo sk Slovak 1 1 +slv sl Slovenian 1 1 +sma Southern Sami 0 0 +sme se Northern Sami 0 0 +smi Sami languages (Other) 0 0 +smj Lule Sami 0 0 +smn Inari Sami 0 0 +smo sm Samoan 0 0 +sms Skolt Sami 0 0 +sna sn Shona 0 0 +snd sd Sindhi 0 0 +snk Soninke 0 0 +sog Sogdian 0 0 +som so Somali 0 0 +son Songhai 0 0 +sot st Sotho, Southern 0 0 +spa es Spanish 1 1 +srd sc Sardinian 0 0 +srr Serer 0 0 +ssa Nilo-Saharan (Other) 0 0 +ssw ss Swati 0 0 +suk Sukuma 0 0 +sun su Sundanese 0 0 +sus Susu 0 0 +sux Sumerian 0 0 +swa sw Swahili 1 0 +swe sv Swedish 1 1 +syr Syriac 1 0 +tah ty Tahitian 0 0 +tai Tai (Other) 0 0 +tam ta Tamil 1 0 +tat tt Tatar 0 0 +tel te Telugu 1 0 +tem Timne 0 0 +ter Tereno 0 0 +tet Tetum 0 0 +tgk tg Tajik 0 0 +tgl tl Tagalog 1 1 +tha th Thai 1 1 +tib bo Tibetan 0 0 +tig Tigre 0 0 +tir ti Tigrinya 0 0 +tiv Tiv 0 0 +tkl Tokelau 0 0 +tlh Klingon 0 0 +tli Tlingit 0 0 +tmh Tamashek 0 0 +tog Tonga (Nyasa) 0 0 +ton to Tonga (Tonga Islands) 0 0 +tpi Tok Pisin 0 0 +tsi Tsimshian 0 0 +tsn tn Tswana 0 0 +tso ts Tsonga 0 0 +tuk tk Turkmen 0 0 +tum Tumbuka 0 0 +tup Tupi languages 0 0 +tur tr Turkish 1 1 +tut Altaic (Other) 0 0 +tvl Tuvalu 0 0 +twi tw Twi 0 0 +tyv Tuvinian 0 0 +udm Udmurt 0 0 +uga Ugaritic 0 0 +uig ug Uighur 0 0 +ukr uk Ukrainian 1 1 +umb Umbundu 0 0 +und Undetermined 0 0 +urd ur Urdu 1 0 +uzb uz Uzbek 0 0 +vai Vai 0 0 +ven ve Venda 0 0 +vie vi Vietnamese 1 1 +vol vo Volapük 0 0 +vot Votic 0 0 +wak Wakashan languages 0 0 +wal Walamo 0 0 +war Waray 0 0 +was Washo 0 0 +wel cy Welsh 0 0 +wen Sorbian languages 0 0 +wln wa Walloon 0 0 +wol wo Wolof 0 0 +xal Kalmyk 0 0 +xho xh Xhosa 0 0 +yao Yao 0 0 +yap Yapese 0 0 +yid yi Yiddish 0 0 +yor yo Yoruba 0 0 +ypk Yupik languages 0 0 +zap Zapotec 0 0 +zen Zenaga 0 0 +zha za Zhuang 0 0 +znd Zande 0 0 +zul zu Zulu 0 0 +zun Zuni 0 0 +rum ro Romanian 1 1 +pob pb Brazilian 1 1 +mne Montenegrin 1 0 diff --git a/lib/babelfish/exceptions.py b/lib/babelfish/exceptions.py new file mode 100755 index 00000000..bbc6efe3 --- /dev/null +++ b/lib/babelfish/exceptions.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals + + +class Error(Exception): + """Base class for all exceptions in babelfish""" + pass + + +class LanguageError(Error, AttributeError): + """Base class for all language exceptions in babelfish""" + pass + + +class LanguageConvertError(LanguageError): + """Exception raised by converters when :meth:`~babelfish.converters.LanguageConverter.convert` fails + + :param string alpha3: alpha3 code that failed conversion + :param country: country code that failed conversion, if any + :type country: string or None + :param script: script code that failed conversion, if any + :type script: string or None + + """ + def __init__(self, alpha3, country=None, script=None): + self.alpha3 = alpha3 + self.country = country + self.script = script + + def __str__(self): + s = self.alpha3 + if self.country is not None: + s += '-' + self.country + if self.script is not None: + s += '-' + self.script + return s + + +class LanguageReverseError(LanguageError): + """Exception raised by converters when :meth:`~babelfish.converters.LanguageReverseConverter.reverse` fails + + :param string code: code that failed reverse conversion + + """ + def __init__(self, code): + self.code = code + + def __str__(self): + return repr(self.code) + + +class CountryError(Error, AttributeError): + """Base class for all country exceptions in babelfish""" + pass + + +class CountryConvertError(CountryError): + """Exception raised by converters when :meth:`~babelfish.converters.CountryConverter.convert` fails + + :param string alpha2: alpha2 code that failed conversion + + """ + def __init__(self, alpha2): + self.alpha2 = alpha2 + + def __str__(self): + return self.alpha2 + + +class CountryReverseError(CountryError): + """Exception raised by converters when :meth:`~babelfish.converters.CountryReverseConverter.reverse` fails + + :param string code: code that failed reverse conversion + + """ + def __init__(self, code): + self.code = code + + def __str__(self): + return repr(self.code) diff --git a/lib/babelfish/language.py b/lib/babelfish/language.py new file mode 100755 index 00000000..9b5e7825 --- /dev/null +++ b/lib/babelfish/language.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals +from collections import namedtuple +from functools import partial +import os +# from pkg_resources import resource_stream # @UnresolvedImport +from .converters import ConverterManager +from .country import Country +from .exceptions import LanguageConvertError +from .script import Script +from . import basestr + + +LANGUAGES = set() +LANGUAGE_MATRIX = [] + +#: The namedtuple used in the :data:`LANGUAGE_MATRIX` +IsoLanguage = namedtuple('IsoLanguage', ['alpha3', 'alpha3b', 'alpha3t', 'alpha2', 'scope', 'type', 'name', 'comment']) + +f = open(os.path.join(os.path.dirname(__file__), 'data/iso-639-3.tab')) +f.readline() +for l in f: + iso_language = IsoLanguage(*l.decode('utf-8').split('\t')) + LANGUAGES.add(iso_language.alpha3) + LANGUAGE_MATRIX.append(iso_language) +f.close() + + +class LanguageConverterManager(ConverterManager): + """:class:`~babelfish.converters.ConverterManager` for language converters""" + entry_point = 'babelfish.language_converters' + internal_converters = ['alpha2 = babelfish.converters.alpha2:Alpha2Converter', + 'alpha3b = babelfish.converters.alpha3b:Alpha3BConverter', + 'alpha3t = babelfish.converters.alpha3t:Alpha3TConverter', + 'name = babelfish.converters.name:NameConverter', + 'scope = babelfish.converters.scope:ScopeConverter', + 'type = babelfish.converters.type:LanguageTypeConverter', + 'opensubtitles = babelfish.converters.opensubtitles:OpenSubtitlesConverter'] + +language_converters = LanguageConverterManager() + + +class LanguageMeta(type): + """The :class:`Language` metaclass + + Dynamically redirect :meth:`Language.frommycode` to :meth:`Language.fromcode` with the ``mycode`` `converter` + + """ + def __getattr__(cls, name): + if name.startswith('from'): + return partial(cls.fromcode, converter=name[4:]) + return type.__getattribute__(cls, name) + + +class Language(LanguageMeta(str('LanguageBase'), (object,), {})): + """A human language + + A human language is composed of a language part following the ISO-639 + standard and can be country-specific when a :class:`~babelfish.country.Country` + is specified. + + The :class:`Language` is extensible with custom converters (see :ref:`custom_converters`) + + :param string language: the language as a 3-letter ISO-639-3 code + :param country: the country (if any) as a 2-letter ISO-3166 code or :class:`~babelfish.country.Country` instance + :type country: string or :class:`~babelfish.country.Country` or None + :param script: the script (if any) as a 4-letter ISO-15924 code or :class:`~babelfish.script.Script` instance + :type script: string or :class:`~babelfish.script.Script` or None + :param unknown: the unknown language as a three-letters ISO-639-3 code to use as fallback + :type unknown: string or None + :raise: ValueError if the language could not be recognized and `unknown` is ``None`` + + """ + def __init__(self, language, country=None, script=None, unknown=None): + if unknown is not None and language not in LANGUAGES: + language = unknown + if language not in LANGUAGES: + raise ValueError('%r is not a valid language' % language) + self.alpha3 = language + self.country = None + if isinstance(country, Country): + self.country = country + elif country is None: + self.country = None + else: + self.country = Country(country) + self.script = None + if isinstance(script, Script): + self.script = script + elif script is None: + self.script = None + else: + self.script = Script(script) + + @classmethod + def fromcode(cls, code, converter): + """Create a :class:`Language` by its `code` using `converter` to + :meth:`~babelfish.converters.LanguageReverseConverter.reverse` it + + :param string code: the code to reverse + :param string converter: name of the :class:`~babelfish.converters.LanguageReverseConverter` to use + :return: the corresponding :class:`Language` instance + :rtype: :class:`Language` + + """ + return cls(*language_converters[converter].reverse(code)) + + @classmethod + def fromietf(cls, ietf): + """Create a :class:`Language` by from an IETF language code + + :param string ietf: the ietf code + :return: the corresponding :class:`Language` instance + :rtype: :class:`Language` + + """ + subtags = ietf.split('-') + language_subtag = subtags.pop(0).lower() + if len(language_subtag) == 2: + language = cls.fromalpha2(language_subtag) + else: + language = cls(language_subtag) + while subtags: + subtag = subtags.pop(0) + if len(subtag) == 2: + language.country = Country(subtag.upper()) + else: + language.script = Script(subtag.capitalize()) + if language.script is not None: + if subtags: + raise ValueError('Wrong IETF format. Unmatched subtags: %r' % subtags) + break + return language + + def __getstate__(self): + return self.alpha3, self.country, self.script + + def __setstate__(self, state): + self.alpha3, self.country, self.script = state + + def __getattr__(self, name): + alpha3 = self.alpha3 + country = self.country.alpha2 if self.country is not None else None + script = self.script.code if self.script is not None else None + try: + return language_converters[name].convert(alpha3, country, script) + except KeyError: + raise AttributeError(name) + + def __hash__(self): + return hash(str(self)) + + def __eq__(self, other): + if isinstance(other, basestr): + return str(self) == other + if not isinstance(other, Language): + return False + return (self.alpha3 == other.alpha3 and + self.country == other.country and + self.script == other.script) + + def __ne__(self, other): + return not self == other + + def __bool__(self): + return self.alpha3 != 'und' + __nonzero__ = __bool__ + + def __repr__(self): + return '<Language [%s]>' % self + + def __str__(self): + try: + s = self.alpha2 + except LanguageConvertError: + s = self.alpha3 + if self.country is not None: + s += '-' + str(self.country) + if self.script is not None: + s += '-' + str(self.script) + return s diff --git a/lib/babelfish/script.py b/lib/babelfish/script.py new file mode 100755 index 00000000..5b3970a2 --- /dev/null +++ b/lib/babelfish/script.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2013 the BabelFish authors. All rights reserved. +# Use of this source code is governed by the 3-clause BSD license +# that can be found in the LICENSE file. +# +from __future__ import unicode_literals + +import os +from collections import namedtuple +# from pkg_resources import resource_stream # @UnresolvedImport +from . import basestr + +#: Script code to script name mapping +SCRIPTS = {} + +#: List of countries in the ISO-15924 as namedtuple of code, number, name, french_name, pva and date +SCRIPT_MATRIX = [] + +#: The namedtuple used in the :data:`SCRIPT_MATRIX` +IsoScript = namedtuple('IsoScript', ['code', 'number', 'name', 'french_name', 'pva', 'date']) + +f = open(os.path.join(os.path.dirname(__file__), 'data/iso15924-utf8-20131012.txt')) +f.readline() +for l in f: + l = l.decode('utf-8').strip() + if not l or l.startswith('#'): + continue + script = IsoScript._make(l.split(';')) + SCRIPT_MATRIX.append(script) + SCRIPTS[script.code] = script.name +f.close() + + +class Script(object): + """A human writing system + + A script is represented by a 4-letter code from the ISO-15924 standard + + :param string script: 4-letter ISO-15924 script code + + """ + def __init__(self, script): + if script not in SCRIPTS: + raise ValueError('%r is not a valid script' % script) + + #: ISO-15924 4-letter script code + self.code = script + + @property + def name(self): + """English name of the script""" + return SCRIPTS[self.code] + + def __getstate__(self): + return self.code + + def __setstate__(self, state): + self.code = state + + def __hash__(self): + return hash(self.code) + + def __eq__(self, other): + if isinstance(other, basestr): + return self.code == other + if not isinstance(other, Script): + return False + return self.code == other.code + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return '<Script [%s]>' % self + + def __str__(self): + return self.code diff --git a/lib/btserver/__init__.py b/lib/btserver/__init__.py deleted file mode 100644 index d8828c0f..00000000 --- a/lib/btserver/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- - -from client import Client - -__all__ = ["Client"] diff --git a/lib/btserver/cache.py b/lib/btserver/cache.py deleted file mode 100644 index 81605bdb..00000000 --- a/lib/btserver/cache.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -# Gestiona el cache del servidor torrent: -# Guarda los .torrent generado -# Guarda los .resume de cada torrent -# ------------------------------------------------------------ -import base64 -import os.path -import re -import traceback - -try: - import xbmc, xbmcgui -except: - pass - -from platformcode import config -LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default='') - -from servers import torrent as torr -lt, e, e1, e2 = torr.import_libtorrent(LIBTORRENT_PATH) - - -class Cache(object): - CACHE_DIR = '.cache' - - def __init__(self, path): - - if not os.path.isdir(path): - os.makedirs(path) - self.path = os.path.join(path, Cache.CACHE_DIR) - if not os.path.isdir(self.path): - os.makedirs(self.path) - - def _tname(self, info_hash): - return os.path.join(self.path, info_hash.upper() + '.torrent') - - def _rname(self, info_hash): - return os.path.join(self.path, info_hash.upper() + '.resume') - - def save_resume(self, info_hash, data): - f = open(self._rname(info_hash), 'wb') - f.write(data) - f.close() - - def get_resume(self, url=None, info_hash=None): - if url: - info_hash = self._index.get(url) - if not info_hash: - return - rname = self._rname(info_hash) - if os.access(rname, os.R_OK): - f = open(rname, 'rb') - v = f.read() - f.close() - return v - - def file_complete(self, torrent): - info_hash = str(torrent.info_hash()) - nt = lt.create_torrent(torrent) - tname = self._tname(info_hash) - f = open(tname, 'wb') - f.write(lt.bencode(nt.generate())) - f.close() - - def get_torrent(self, url=None, info_hash=None): - if url: - info_hash = self._index.get(url) - if not info_hash: - return - tname = self._tname(info_hash) - if os.access(tname, os.R_OK): - return tname - - magnet_re = re.compile('xt=urn:btih:([0-9A-Za-z]+)') - hexa_chars = re.compile('^[0-9A-F]+$') - - @staticmethod - def hash_from_magnet(m): - res = Cache.magnet_re.search(m) - if res: - ih = res.group(1).upper() - if len(ih) == 40 and Cache.hexa_chars.match(ih): - return res.group(1).upper() - elif len(ih) == 32: - s = base64.b32decode(ih) - return "".join("{:02X}".format(ord(c)) for c in s) - else: - raise ValueError('Not BT magnet link') - - else: - raise ValueError('Not BT magnet link') diff --git a/lib/btserver/client.py b/lib/btserver/client.py deleted file mode 100644 index 3233dfaf..00000000 --- a/lib/btserver/client.py +++ /dev/null @@ -1,669 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import pickle -import random -import time -import urllib - -try: - import xbmc, xbmcgui -except: - pass - -from platformcode import config, logger -LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default='') - -from servers import torrent as torr -lt, e, e1, e2 = torr.import_libtorrent(LIBTORRENT_PATH) - -from cache import Cache -from dispatcher import Dispatcher -from file import File -from handler import Handler -from monitor import Monitor -from resume_data import ResumeData -from server import Server - -try: - BUFFER = int(config.get_setting("bt_buffer", server="torrent", default="50")) -except: - BUFFER = 50 - config.set_setting("bt_buffer", "50", server="torrent") -DOWNLOAD_PATH = config.get_setting("bt_download_path", server="torrent", default=config.get_setting("downloadpath")) -BACKGROUND = config.get_setting("mct_background_download", server="torrent", default=True) -RAR = config.get_setting("mct_rar_unpack", server="torrent", default=True) -msg_header = 'Client Torrent BT' - - -class Client(object): - INITIAL_TRACKERS = ['udp://tracker.openbittorrent.com:80', - 'udp://tracker.istole.it:80', - 'udp://open.demonii.com:80', - 'udp://tracker.coppersurfer.tk:80', - 'udp://tracker.leechers-paradise.org:6969', - 'udp://exodus.desync.com:6969', - 'udp://tracker.publicbt.com:80', - 'http://tracker.torrentbay.to:6969/announce', - 'http://tracker.pow7.com/announce', - 'udp://tracker.ccc.de:80/announce', - 'udp://open.demonii.com:1337', - 'http://9.rarbg.com:2710/announce', - 'http://bt.careland.com.cn:6969/announce', - 'http://explodie.org:6969/announce', - 'http://mgtracker.org:2710/announce', - 'http://tracker.best-torrents.net:6969/announce', - 'http://tracker.tfile.me/announce', - 'http://tracker1.wasabii.com.tw:6969/announce', - 'udp://9.rarbg.com:2710/announce', - 'udp://9.rarbg.me:2710/announce', - 'udp://coppersurfer.tk:6969/announce', - 'http://www.spanishtracker.com:2710/announce', - 'http://www.todotorrents.com:2710/announce' - ] ### Added some trackers from MCT - - VIDEO_EXTS = {'.avi': 'video/x-msvideo', '.mp4': 'video/mp4', '.mkv': 'video/x-matroska', - '.m4v': 'video/mp4', '.mov': 'video/quicktime', '.mpg': 'video/mpeg', '.ogv': 'video/ogg', - '.ogg': 'video/ogg', '.webm': 'video/webm', '.ts': 'video/mp2t', '.3gp': 'video/3gpp', - '.rar': 'video/unrar'} - - def __init__(self, url=None, port=None, ip=None, auto_shutdown=True, wait_time=20, timeout=5, auto_delete=True, - temp_path=None, is_playing_fnc=None, print_status=False): - - # server - if port: - self.port = port - else: - self.port = random.randint(8000, 8099) - if ip: - self.ip = ip - else: - self.ip = "127.0.0.1" - self.server = Server((self.ip, self.port), Handler, client=self) - - # Options - if temp_path: - self.temp_path = temp_path - else: - self.temp_path = DOWNLOAD_PATH - self.is_playing_fnc = is_playing_fnc - self.timeout = timeout - self.auto_delete = auto_delete - self.wait_time = wait_time - self.auto_shutdown = auto_shutdown - self.buffer_size = BUFFER - self.first_pieces_priorize = BUFFER - self.last_pieces_priorize = 5 - self.state_file = "state" - try: - self.torrent_paramss = {'save_path': self.temp_path, 'storage_mode': lt.storage_mode_t.storage_mode_allocate} - except Exception as e: - try: - do = xbmcgui.Dialog() - e = e1 or e2 - do.ok(config.get_localized_string(30035) + 'BT Libtorrent', config.get_localized_string(30036), config.get_localized_string(60015), str(e)) - except: - pass - return - - # State - self.has_meta = False - self.meta = None - self.start_time = None - self.last_connect = 0 - self.connected = False - self.closed = False - self.file = None - self.files = None - self._th = None - self.seleccion = 0 - self.index = 0 - - # Sesion - self._cache = Cache(self.temp_path) - self._ses = lt.session() - #self._ses.listen_on(0, 0) ### ALFA: it blocks repro of some .torrents - # Cargamos el archivo de estado (si existe) - """ ### ALFA: it blocks repro of some .torrents - if os.path.exists(os.path.join(self.temp_path, self.state_file)): - try: - f = open(os.path.join(self.temp_path, self.state_file), "rb") - state = pickle.load(f) - self._ses.load_state(state) - f.close() - except: - pass - """ - - self._start_services() - - # Monitor & Dispatcher - self._monitor = Monitor(self) - if print_status: - self._monitor.add_listener(self.print_status) - self._monitor.add_listener(self._check_meta) - self._monitor.add_listener(self.save_state) - self._monitor.add_listener(self.priorize_start_file) - self._monitor.add_listener(self.announce_torrent) - - if self.auto_shutdown: - self._monitor.add_listener(self._auto_shutdown) - - self._dispatcher = Dispatcher(self) - self._dispatcher.add_listener(self._update_ready_pieces) - - # Iniciamos la URL - if url: - self.start_url(url) - - def set_speed_limits(self, download=0, upload=0): - """ - Función encargada de poner límites a la velocidad de descarga o subida - """ - if isinstance(download, int) and download > 0: - self._th.set_download_limit(download * 1024) - if isinstance(upload, int) and download > 0: - self._th.set_upload_limit(upload * 1024) - - def get_play_list(self): - """ - Función encargada de generar el playlist - """ - # Esperamos a lo metadatos - while not self.has_meta: - time.sleep(1) - - # Comprobamos que haya archivos de video - if self.files: - if len(self.files) > 1: - return "http://" + self.ip + ":" + str(self.port) + "/playlist.pls" - else: - return "http://" + self.ip + ":" + str(self.port) + "/" + urllib.quote(self.files[0].path) - - def get_files(self): - """ - Función encargada de genera el listado de archivos - """ - # Esperamos a lo metadatos - while not self.has_meta: - time.sleep(1) - files = [] - - # Comprobamos que haya archivos de video - if self.files: - # Creamos el dict con los archivos - for file in self.files: - n = file.path - u = "http://" + self.ip + ":" + str(self.port) + "/" + urllib.quote(n) - s = file.size - files.append({"name": n, "url": u, "size": s}) - - return files - - def _find_files(self, files, search=None): - """ - Función encargada de buscar los archivos reproducibles del torrent - """ - self.total_size = 0 - # Obtenemos los archivos que la extension este en la lista - videos = filter(lambda f: self.VIDEO_EXTS.has_key(os.path.splitext(f.path)[1]), files) - - if not videos: - raise Exception('No video files in torrent') - for v in videos: - self.total_size += v.size ### ALFA - videos[videos.index(v)].index = files.index(v) - return videos - - def set_file(self, f): - """ - Función encargada de seleccionar el archivo que vamos a servir y por tanto, priorizar su descarga - """ - # Seleccionamos el archivo que vamos a servir - fmap = self.meta.map_file(f.index, 0, 1) - self.file = File(f.path, self.temp_path, f.index, f.size, fmap, self.meta.piece_length(), self) - if self.seleccion < 0: ### ALFA - self.file.first_piece = 0 ### ALFA - self.file.last_piece = self.meta.num_pieces() ### ALFA - self.file.size = self.total_size ### ALFA - self.prioritize_file() - - def prioritize_piece(self, pc, idx): - """ - Función encargada de priorizar una determinada pieza - """ - piece_duration = 1000 - min_deadline = 2000 - dl = idx * piece_duration + min_deadline - """ ### ALFA - try: - self._th.set_piece_deadline(pc, dl, lt.deadline_flags.alert_when_available) - except: - pass - """ - - if idx == 0: - tail_pieces = 9 - # Piezas anteriores a la primera se desactivan - if (self.file.last_piece - pc) > tail_pieces: - for i in xrange(self.file.first_piece, pc): - self._th.piece_priority(i, 0) - self._th.reset_piece_deadline(i) - - # Piezas siguientes a la primera se activan - for i in xrange(pc + 1, self.file.last_piece + 1): - #self._th.piece_priority(i, 0) - self._th.piece_priority(i, 1) - - def prioritize_file(self): - """ - Función encargada de priorizar las piezas correspondientes al archivo seleccionado en la funcion set_file() - """ - priorities = [] - for i in xrange(self.meta.num_pieces()): - if i >= self.file.first_piece and i <= self.file.last_piece: - priorities.append(1) - else: - if self.index < 0: - priorities.append(1) ### ALFA - else: - priorities.append(0) ### ALFA - - self._th.prioritize_pieces(priorities) - - x = 0 - for i, _set in enumerate(self._th.piece_priorities()): - if _set > 0: x += 1 - #logger.info("***** Nº Pieza: %s: %s" % (i, str(_set))) - logger.info("***** Piezas %s : Activas: %s" % (str(i+1), str(x))) - logger.info("***** first_piece %s : last_piece: %s" % (str(self.file.first_piece), str(self.file.last_piece))) - - def download_torrent(self, url): - """ - Función encargada de descargar un archivo .torrent - """ - from core import httptools - - data = httptools.downloadpage(url).data - return data - - def start_url(self, uri): - """ - Función encargada iniciar la descarga del torrent desde la url, permite: - - Url apuntando a un .torrent - - Url magnet - - Archivo .torrent local - """ - - if self._th: - raise Exception('Torrent is already started') - - if uri.startswith('http://') or uri.startswith('https://'): - torrent_data = self.download_torrent(uri) - info = lt.torrent_info(lt.bdecode(torrent_data)) - tp = {'ti': info} - resume_data = self._cache.get_resume(info_hash=str(info.info_hash())) - if resume_data: - tp['resume_data'] = resume_data - - elif uri.startswith('magnet:'): - tp = {'url': uri} - resume_data = self._cache.get_resume(info_hash=Cache.hash_from_magnet(uri)) - if resume_data: - tp['resume_data'] = resume_data - - elif os.path.isfile(uri): - if os.access(uri, os.R_OK): - info = lt.torrent_info(uri) - tp = {'ti': info} - resume_data = self._cache.get_resume(info_hash=str(info.info_hash())) - if resume_data: - tp['resume_data'] = resume_data - else: - raise ValueError('Invalid torrent path %s' % uri) - else: - raise ValueError("Invalid torrent %s" % uri) - - tp.update(self.torrent_paramss) - self._th = self._ses.add_torrent(tp) - - for tr in self.INITIAL_TRACKERS: - self._th.add_tracker({'url': tr}) - - self._th.set_sequential_download(True) - self._th.force_reannounce() - self._th.force_dht_announce() - - self._monitor.start() - self._dispatcher.do_start(self._th, self._ses) - self.server.run() - - def stop(self): - """ - Función encargada de de detener el torrent y salir - """ - self._dispatcher.stop() - self._dispatcher.join() - self._monitor.stop() - self.server.stop() - self._dispatcher.stop() - if self._ses: - self._ses.pause() - if self._th: - self.save_resume() - self.save_state() - self._stop_services() - self._ses.remove_torrent(self._th, self.auto_delete) - del self._ses - self.closed = True - - def pause(self): - """ - Función encargada de de pausar el torrent - """ - self._ses.pause() - - def _start_services(self): - """ - Función encargada de iniciar los servicios de libtorrent: dht, lsd, upnp, natpnp - """ - self._ses.add_dht_router("router.bittorrent.com", 6881) - self._ses.add_dht_router("router.bitcomet.com", 554) - self._ses.add_dht_router("router.utorrent.com", 6881) - self._ses.add_dht_router("dht.transmissionbt.com",6881) ### from MCT - self._ses.start_dht() - self._ses.start_lsd() - self._ses.start_upnp() - self._ses.start_natpmp() - - def _stop_services(self): - """ - Función encargada de detener los servicios de libtorrent: dht, lsd, upnp, natpnp - """ - self._ses.stop_natpmp() - self._ses.stop_upnp() - self._ses.stop_lsd() - self._ses.stop_dht() - - def save_resume(self): - """ - Función encargada guardar los metadatos para continuar una descarga mas rapidamente - """ - if self._th.need_save_resume_data() and self._th.is_valid() and self.meta: - r = ResumeData(self) - start = time.time() - while (time.time() - start) <= 5: - if r.data or r.failed: - break - time.sleep(0.1) - if r.data: - self._cache.save_resume(self.unique_file_id, lt.bencode(r.data)) - - @property - def status(self): - """ - Función encargada de devolver el estado del torrent - """ - if self._th: - s = self._th.status() - # Download Rate - s._download_rate = s.download_rate / 1024 - - # Progreso del archivo - if self.file: - pieces = s.pieces[self.file.first_piece:self.file.last_piece] ### ALFA - progress = float(sum(pieces)) / len(pieces) - s.pieces_len = len(pieces) ### ALFA - s.pieces_sum = sum(pieces) ### ALFA - #logger.info('***** Estado piezas: %s' % pieces) - else: - progress = 0 - s.pieces_len = 0 ### ALFA - s.pieces_sum = 0 ### ALFA - - s.progress_file = progress * 100 - - # Tamaño del archivo - s.file_name = '' ### ALFA - s.seleccion = '' ### ALFA - - if self.file: - s.seleccion = self.seleccion ### ALFA - s.file_name = self.file.path ### ALFA - s.file_size = self.file.size / 1048576.0 - else: - s.file_size = 0 - - # Estado del buffer - if self.file and self.file.cursor: # Con una conexion activa: Disponible vs Posicion del reproductor - percent = len(self.file.cursor.cache) - percent = percent * 100 / self.buffer_size - s.buffer = int(percent) - - elif self.file: # Sin una conexion activa: Pre-buffer antes de iniciar - # El Pre-buffer consta de dos partes_ - # 1. Buffer al inicio del archivo para que el reproductor empieze sin cortes - # 2. Buffer al final del archivo (en algunos archivos el reproductor mira el final del archivo antes de comenzar) - bp = [] - - # El tamaño del buffer de inicio es el tamaño del buffer menos el tamaño del buffer del final - first_pieces_priorize = self.buffer_size - self.last_pieces_priorize - - # Comprobamos qué partes del buffer del inicio estan disponibles - for x in range(first_pieces_priorize): - if self._th.have_piece(self.file.first_piece + x): - bp.append(True) - else: - bp.append(False) - - # Comprobamos qué partes del buffer del final estan disponibles - for x in range(self.last_pieces_priorize): - if self._th.have_piece(self.file.last_piece - x): - bp.append(True) - else: - bp.append(False) - - s.buffer = int(sum(bp) * 100 / self.buffer_size) - - else: # Si no hay ningun archivo seleccionado: No hay buffer - s.buffer = 0 - - # Tiempo restante para cerrar en caso de tener el timeout activo - if self.auto_shutdown: - if self.connected: - if self.timeout: - s.timeout = int(self.timeout - (time.time() - self.last_connect - 1)) - if self.file and self.file.cursor: - s.timeout = self.timeout - if s.timeout < 0: s.timeout = "Cerrando" - else: - s.timeout = "---" - else: - if self.start_time and self.wait_time: - s.timeout = int(self.wait_time - (time.time() - self.start_time - 1)) - if s.timeout < 0: s.timeout = "Cerrando" - else: - s.timeout = "---" - - else: - s.timeout = "Off" - - # Estado de la descarga - STATE_STR = ['Queued', 'Checking', 'Downloading Metadata', \ - 'Downloading', 'Finalized', 'Seeding', 'Allocating', 'Checking Fastresume'] - s.str_state = STATE_STR[s.state] - - # Estado DHT - if self._ses.dht_state() is not None: - s.dht_state = "On" - s.dht_nodes = self._ses.status().dht_nodes - else: - s.dht_state = "Off" - s.dht_nodes = 0 - - # Cantidad de Trackers - s.trackers = len(self._th.trackers()) - - # Origen de los peers - s.dht_peers = 0 - s.trk_peers = 0 - s.pex_peers = 0 - s.lsd_peers = 0 - - for peer in self._th.get_peer_info(): - if peer.source & 1: - s.trk_peers += 1 - if peer.source & 2: - s.dht_peers += 1 - if peer.source & 4: - s.pex_peers += 1 - if peer.source & 8: - s.lsd_peers += 1 - - return s - - """ - Servicios: - - Estas funciones se ejecutan de forma automatica cada x tiempo en otro Thread. - - Estas funciones son ejecutadas mientras el torrent esta activo algunas pueden desactivarse - segun la configuracion como por ejemplo la escritura en el log - """ - - def _auto_shutdown(self, *args, **kwargs): - """ - Servicio encargado de autoapagar el servidor - """ - if self.file and self.file.cursor: - self.last_connect = time.time() - self.connected = True - - if self.is_playing_fnc and self.is_playing_fnc(): - self.last_connect = time.time() - self.connected = True - - if self.auto_shutdown: - # shudown por haber cerrado el reproductor - if self.connected and self.is_playing_fnc and not self.is_playing_fnc(): - if time.time() - self.last_connect - 1 > self.timeout: - self.stop() - - # shutdown por no realizar ninguna conexion - if (not self.file or not self.file.cursor) and self.start_time and self.wait_time and not self.connected: - if time.time() - self.start_time - 1 > self.wait_time: - self.stop() - - # shutdown tras la ultima conexion - if (not self.file or not self.file.cursor) and self.timeout and self.connected and not self.is_playing_fnc: - if time.time() - self.last_connect - 1 > self.timeout: - self.stop() - - def announce_torrent(self): - """ - Servicio encargado de anunciar el torrent - """ - self._th.force_reannounce() - self._th.force_dht_announce() - - def save_state(self): - """ - Servicio encargado de guardar el estado - """ - state = self._ses.save_state() - f = open(os.path.join(self.temp_path, self.state_file), 'wb') - pickle.dump(state, f) - f.close() - - def _update_ready_pieces(self, alert_type, alert): - """ - Servicio encargado de informar que hay una pieza disponible - """ - if alert_type == 'read_piece_alert' and self.file: - self.file.update_piece(alert.piece, alert.buffer) - - def _check_meta(self): - """ - Servicio encargado de comprobar si los metadatos se han descargado - """ - if self.status.state >= 3 and self.status.state <= 5 and not self.has_meta: - - # Guardamos los metadatos - self.meta = self._th.get_torrent_info() - - # Obtenemos la lista de archivos del meta - fs = self.meta.files() - if isinstance(fs, list): - files = fs - else: - files = [fs.at(i) for i in xrange(fs.num_files())] - - # Guardamos la lista de archivos - self.files = self._find_files(files) - - # Si hay varios vídeos (no RAR), se selecciona el vídeo o "todos" - lista = [] - seleccion = 0 - for file in self.files: - if '.rar' in str(file.path): - seleccion = -9 - lista += [os.path.split(str(file.path))[1]] - if len(lista) > 1 and seleccion >= 0: - d = xbmcgui.Dialog() - seleccion = d.select(msg_header + config.get_localized_string(30034), lista) - - if seleccion < 0: - index = 0 - self.index = seleccion - else: - index = seleccion - self.index = self.files[index].index - self.seleccion = seleccion - - # Marcamos el primer archivo como activo - self.set_file(self.files[index]) - - # Damos por iniciada la descarga - self.start_time = time.time() - - # Guardamos el .torrent en el cache - self._cache.file_complete(self._th.get_torrent_info()) - - self.has_meta = True - - def priorize_start_file(self): - ''' - Servicio encargado de priorizar el principio y final de archivo cuando no hay conexion - ''' - if self.file and not self.file.cursor: - num_start_pieces = self.buffer_size - self.last_pieces_priorize # Cantidad de piezas a priorizar al inicio - num_end_pieces = self.last_pieces_priorize # Cantidad de piezas a priorizar al final - - pieces_count = 0 - # Priorizamos las ultimas piezas - for y in range(self.file.last_piece - num_end_pieces, self.file.last_piece + 1): - if not self._th.have_piece(y): - self.prioritize_piece(y, pieces_count) - pieces_count += 1 - - # Priorizamos las primeras piezas - for y in range(self.file.first_piece, self.file.last_piece + 1): - if not self._th.have_piece(y): - if pieces_count == self.buffer_size: - break - self.prioritize_piece(y, pieces_count) - pieces_count += 1 - - def print_status(self): - ''' - Servicio encargado de mostrar en el log el estado de la descarga - ''' - s = self.status ### ALFA - if self.seleccion >= 0: - archivo = self.seleccion + 1 - else: - archivo = self.seleccion - - logger.info( - '%.2f%% de %.1fMB %s | %.1f kB/s | #%s %d%% | AutoClose: %s | S: %d(%d) P: %d(%d)) | TRK: %d DHT: %d PEX: %d LSD %d | DHT:%s (%d) | Trakers: %d | Pieces: %d (%d)' % \ - (s.progress_file, s.file_size, s.str_state, s._download_rate, archivo, s.buffer, s.timeout, s.num_seeds, \ - s.num_complete, s.num_peers, s.num_incomplete, s.trk_peers, s.dht_peers, s.pex_peers, s.lsd_peers, - s.dht_state, s.dht_nodes, s.trackers, s.pieces_sum, s.pieces_len)) ### ALFA diff --git a/lib/btserver/cursor.py b/lib/btserver/cursor.py deleted file mode 100644 index 5ecccfda..00000000 --- a/lib/btserver/cursor.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- - -from threading import Lock, Event - - -class Cursor(object): - def __init__(self, file): - self._file = file - self.pos = 0 - self.timeout = 30 - self.cache_size = self._file._client.buffer_size - self.cache = {} - self.lock = Lock() - self.event = Event() - self.cache_first = 0 - - def fill_cache(self, first): - self.cache_first = first - - with self.lock: - for p in sorted(self.cache): - if p < first: del self.cache[p] - - self.event.clear() - for i in xrange(first, first + self.cache_size): - if i <= self._file.last_piece: - self._file._client.prioritize_piece(i, i - first) - - def has_piece(self, n): - with self.lock: - return n in self.cache - - def _wait_piece(self, pc_no): - while not self.has_piece(pc_no): - self.fill_cache(pc_no) - self.event.wait(self.timeout) - - def _get_piece(self, n): - with self.lock: - if not n in self.cache: - raise ValueError('index of of scope of current cache') - return self.cache[n] - - def get_piece(self, n): - self._wait_piece(n) - return self._get_piece(n) - - def close(self): - self._file.cursor = None - - def read(self, size=None): - data = "" - max_size = self._file.size - self.pos - if not size: - size = max_size - else: - size = min(size, max_size) - - if size: - pc_no, ofs = self._file.map_piece(self.pos) - data = self.get_piece(pc_no)[ofs: ofs + size] - - if len(data) < size: - remains = size - len(data) - pc_no += 1 - self.fill_cache(pc_no) - while remains and self.has_piece(pc_no): - sz = min(remains, self._file.piece_size) - data += self.get_piece(pc_no)[:sz] - remains -= sz - if remains: - pc_no += 1 - self.fill_cache(pc_no) - - self.pos += len(data) - - return data - - def seek(self, n): - if n > self._file.size: - n = self._file.size - elif n < 0: - raise ValueError('Seeking negative') - self.pos = n - - def tell(self): - return self.pos - - def update_piece(self, n, data): - with self.lock: - pcs = sorted(self.cache) - if len(pcs) < self.cache_size: - if len(pcs): - new = max(pcs) + 1 - else: - new = self.cache_first - if n == new: - self.cache[n] = data - if n == self.cache_first: - self.event.set() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() diff --git a/lib/btserver/dispatcher.py b/lib/btserver/dispatcher.py deleted file mode 100644 index c0edfa01..00000000 --- a/lib/btserver/dispatcher.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- - -from monitor import Monitor - -import traceback - -try: - import xbmc, xbmcgui -except: - pass - -from platformcode import config -LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default='') - -from servers import torrent as torr -lt, e, e1, e2 = torr.import_libtorrent(LIBTORRENT_PATH) - - -class Dispatcher(Monitor): - def __init__(self, client): - super(Dispatcher, self).__init__(client) - - def do_start(self, th, ses): - self._th = th - self._ses = ses - self.start() - - def run(self): - if not self._ses: - raise Exception('Invalid state, session is not initialized') - - while self.running: - a = self._ses.wait_for_alert(1000) - if a: - alerts = self._ses.pop_alerts() - for alert in alerts: - with self.lock: - for cb in self.listeners: - cb(lt.alert.what(alert), alert) diff --git a/lib/btserver/file.py b/lib/btserver/file.py deleted file mode 100644 index 0d8d9742..00000000 --- a/lib/btserver/file.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- - -import os - -from cursor import Cursor - - -class File(object): - def __init__(self, path, base, index, size, fmap, piece_size, client): - self._client = client - self.path = path - self.base = base - self.index = index - self.size = size - - self.piece_size = piece_size - - self.full_path = os.path.join(base, path) - self.first_piece = fmap.piece - self.offset = fmap.start - self.last_piece = self.first_piece + max((size - 1 + fmap.start), 0) // piece_size - - self.cursor = None - - def create_cursor(self, offset=None): - self.cursor = Cursor(self) - if offset: - self.cursor.seek(offset) - return self.cursor - - def map_piece(self, ofs): - return self.first_piece + (ofs + self.offset) // self.piece_size, (ofs + self.offset) % self.piece_size - - def update_piece(self, n, data): - if self.cursor: - self.cursor.update_piece(n, data) - - def __str__(self): - return self.path diff --git a/lib/btserver/handler.py b/lib/btserver/handler.py deleted file mode 100644 index 7ddb9705..00000000 --- a/lib/btserver/handler.py +++ /dev/null @@ -1,135 +0,0 @@ -# -*- coding: utf-8 -*- - -import BaseHTTPServer -import os -import re -import time -import types -import urllib -import urlparse - -RANGE_RE = re.compile(r'bytes=(\d+)-') - - -def parse_range(range): # @ReservedAssignment - if range: - m = RANGE_RE.match(range) - if m: - try: - return int(m.group(1)) - except: - pass - return 0 - - -class Handler(BaseHTTPServer.BaseHTTPRequestHandler): - protocol_version = 'HTTP/1.1' - - def log_message(self, format, *args): - pass - - def do_GET(self): - if self.server.request: - self.server.request.wfile.close() - self.server.request = self - - if self.do_HEAD(): - f = self.server.file.create_cursor(self.offset) - while f == self.server.file.cursor: - buf = f.read(1024) - if buf: - try: - self.wfile.write(buf) - except: - break - else: - break - f.close() - - def send_pls(self, files): - playlist = "[playlist]\n\n" - for x, f in enumerate(files): - playlist += "File" + str(x + 1) + "=http://127.0.0.1:" + str(self.server._client.port) + "/" + urllib.quote( - f.path) + "\n" - playlist += "Title" + str(x + 1) + "=" + f.path + "\n" - playlist += "NumberOfEntries=" + str(len(files)) - playlist += "Version=2" - self.send_response(200, 'OK') - self.send_header("Content-Length", str(len(playlist))) - self.finish_header() - self.wfile.write(playlist) - - def do_HEAD(self): - url = urlparse.urlparse(self.path).path - - '''Whait to list of files ''' - while not self.server._client.files: - time.sleep(1) - - files = self.server._client.files - self.server.file = self.server._client.file - - '''Creates PLS playlist ''' - if url == "/playlist.pls": - self.send_pls(files) - return False - - '''Change File to download ''' - if not self.server.file or urllib.unquote(url) != '/' + self.server.file.path: - file = urllib.unquote(url) - client = self.server._client - for f in client.files: - if file == '/' + f.path: - client.set_file(f) - self.server.file = client.file - break - - while not self.server._client.has_meta: - time.sleep(1) - if self.server.file and urllib.unquote(url) == '/' + self.server.file.path: - self.offset = 0 - size, mime = self._file_info() - range = parse_range(self.headers.get('Range', None)) - if range: - self.offset = range - range = (range, size - 1, size) - - self.send_resp_header(mime, size, range) - return True - - else: - self.send_error(404, 'Not Found') - - def _file_info(self): - size = self.server.file.size - ext = os.path.splitext(self.server.file.path)[1] - mime = self.server._client.VIDEO_EXTS.get(ext) - if not mime: - mime = 'application/octet-stream' - return size, mime - - def send_resp_header(self, cont_type, cont_length, range=False): # @ReservedAssignment - if range: - self.send_response(206, 'Partial Content') - else: - self.send_response(200, 'OK') - - self.send_header('Content-Type', cont_type) - self.send_header('transferMode.dlna.org', 'Streaming') - self.send_header('contentFeatures.dlna.org', - 'DLNA.ORG_OP=01;DLNA.ORG_CI=0;DLNA.ORG_FLAGS=01700000000000000000000000000000') - self.send_header('Accept-Ranges', 'bytes') - - if range: - if isinstance(range, (types.TupleType, types.ListType)) and len(range) == 3: - self.send_header('Content-Range', 'bytes %d-%d/%d' % range) - self.send_header('Content-Length', range[1] - range[0] + 1) - else: - raise ValueError('Invalid range value') - else: - self.send_header('Content-Length', cont_length) - self.finish_header() - - def finish_header(self): - self.send_header('Connection', 'close') - self.end_headers() diff --git a/lib/btserver/monitor.py b/lib/btserver/monitor.py deleted file mode 100644 index 9fd40c70..00000000 --- a/lib/btserver/monitor.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- - -from threading import Thread, Lock, Event - - -class Monitor(Thread): - def __init__(self, client): - Thread.__init__(self) - self.daemon = True - self.listeners = [] - self.lock = Lock() - self.wait_event = Event() - self.running = True - self.client = client - self.ses = None - self.client = client - - def stop(self): - self.running = False - self.wait_event.set() - - def add_listener(self, cb): - with self.lock: - if not cb in self.listeners: - self.listeners.append(cb) - - def remove_listener(self, cb): - with self.lock: - try: - self.listeners.remove(cb) - except ValueError: - pass - - def remove_all_listeners(self): - with self.lock: - self.listeners = [] - - def run(self): - while (self.running): - with self.lock: - for cb in self.listeners: - cb() - - self.wait_event.wait(1.0) diff --git a/lib/btserver/resume_data.py b/lib/btserver/resume_data.py deleted file mode 100644 index a0c9d3eb..00000000 --- a/lib/btserver/resume_data.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- - -class ResumeData(object): - def __init__(self, client): - self.data = None - self.failed = False - client._dispatcher.add_listener(self._process_alert) - client._th.save_resume_data() - - def _process_alert(self, t, alert): - if t == 'save_resume_data_failed_alert': - self.failed = True - - elif t == 'save_resume_data_alert': - self.data = alert.resume_data diff --git a/lib/btserver/server.py b/lib/btserver/server.py deleted file mode 100644 index 17afecd4..00000000 --- a/lib/btserver/server.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- - -import BaseHTTPServer -import traceback -from SocketServer import ThreadingMixIn -from threading import Thread - - -class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer): - daemon_threads = True - timeout = 1 - - def __init__(self, address, handler, client): - BaseHTTPServer.HTTPServer.__init__(self, address, handler) - self._client = client - self.file = None - self.running = True - self.request = None - - def stop(self): - self.running = False - - def serve(self): - while self.running: - try: - self.handle_request() - except: - print traceback.format_exc() - - def run(self): - t = Thread(target=self.serve, name='HTTP Server') - t.daemon = True - t.start() - - def handle_error(self, request, client_address): - if not "socket.py" in traceback.format_exc(): - print traceback.format_exc() diff --git a/lib/cloudscraper/__init__.py b/lib/cloudscraper/__init__.py index 389788bf..eb957a63 100644 --- a/lib/cloudscraper/__init__.py +++ b/lib/cloudscraper/__init__.py @@ -55,7 +55,7 @@ except ImportError: # ------------------------------------------------------------------------------- # -__version__ = '1.2.35' +__version__ = '1.2.36' # ------------------------------------------------------------------------------- # @@ -78,6 +78,7 @@ class CipherSuiteAdapter(HTTPAdapter): if not self.ssl_context: self.ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) self.ssl_context.set_ciphers(self.cipherSuite) + self.ssl_context.set_ecdh_curve('prime256v1') self.ssl_context.options |= (ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1) super(CipherSuiteAdapter, self).__init__(**kwargs) @@ -256,7 +257,7 @@ class CloudScraper(Session): resp.headers.get('Server', '').startswith('cloudflare') and resp.status_code in [429, 503] and re.search( - r'<form id="challenge-form" action="/.*?__cf_chl_jschl_tk__=\S+"', + r'<form .*?="challenge-form" action="/.*?__cf_chl_jschl_tk__=\S+"', resp.text, re.M | re.DOTALL ) @@ -347,6 +348,8 @@ class CloudScraper(Session): ) if self.is_reCaptcha_Challenge(resp) or self.is_IUAM_Challenge(resp): + if self.debug: + print('Detected Challenge.') return True return False @@ -358,7 +361,8 @@ class CloudScraper(Session): def IUAM_Challenge_Response(self, body, url, interpreter): try: formPayload = re.search( - r'<form (?P<form>id="challenge-form" action="(?P<challengeUUID>.*?' + r'<form (?P<form>.*?="challenge-form" ' + r'action="(?P<challengeUUID>.*?' r'__cf_chl_jschl_tk__=\S+)"(.*?)</form>)', body, re.M | re.DOTALL @@ -412,7 +416,7 @@ class CloudScraper(Session): def reCaptcha_Challenge_Response(self, provider, provider_params, body, url): try: formPayload = re.search( - r'<form class="challenge-form" (?P<form>id="challenge-form" ' + r'<form (?P<form>.*?="challenge-form" ' r'action="(?P<challengeUUID>.*?__cf_chl_captcha_tk__=\S+)"(.*?)</form>)', body, re.M | re.DOTALL diff --git a/lib/cloudscraper/interpreters/encapsulated.py b/lib/cloudscraper/interpreters/encapsulated.py index c3a4a0c5..98faf48f 100644 --- a/lib/cloudscraper/interpreters/encapsulated.py +++ b/lib/cloudscraper/interpreters/encapsulated.py @@ -9,15 +9,13 @@ def template(body, domain): try: js = re.search( - r'setTimeout\(function\(\){\s+(var s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n', - body + r'setTimeout\(function\(\){\s+(.*?a\.value = \S+)', + body, + re.M | re.S ).group(1) except Exception: raise ValueError('Unable to identify Cloudflare IUAM Javascript on website. {}'.format(BUG_REPORT)) - js = re.sub(r'\s{2,}', ' ', js, flags=re.MULTILINE | re.DOTALL).replace('\'; 121\'', '') - js += '\na.value;' - jsEnv = ''' String.prototype.italics=function(str) {{return "<i>" + this + "</i>";}}; var document = {{ diff --git a/lib/cloudscraper/interpreters/native.py b/lib/cloudscraper/interpreters/native.py index 69828705..94d238bb 100644 --- a/lib/cloudscraper/interpreters/native.py +++ b/lib/cloudscraper/interpreters/native.py @@ -1,21 +1,95 @@ from __future__ import absolute_import +import ast import re import operator as op +import pyparsing from ..exceptions import CloudflareSolveError from . import JavaScriptInterpreter # ------------------------------------------------------------------------------- # +_OP_MAP = { + ast.Add: op.add, + ast.Sub: op.sub, + ast.Mult: op.mul, + ast.Div: op.truediv, + ast.Invert: op.neg, +} + +# ------------------------------------------------------------------------------- # + + +class Calc(ast.NodeVisitor): + + def visit_BinOp(self, node): + return _OP_MAP[type(node.op)](self.visit(node.left), self.visit(node.right)) + + # ------------------------------------------------------------------------------- # + + def visit_Num(self, node): + return node.n + + # ------------------------------------------------------------------------------- # + + def visit_Expr(self, node): + return self.visit(node.value) + + # ------------------------------------------------------------------------------- # + + @classmethod + def doMath(cls, expression): + tree = ast.parse(expression) + calc = cls() + return calc.visit(tree.body[0]) + +# ------------------------------------------------------------------------------- # + + +class Parentheses(object): + + def fix(self, s): + res = [] + self.visited = set([s]) + self.dfs(s, self.invalid(s), res) + return res + + # ------------------------------------------------------------------------------- # + + def dfs(self, s, n, res): + if n == 0: + res.append(s) + return + for i in range(len(s)): + if s[i] in ['(', ')']: + s_new = s[:i] + s[i + 1:] + if s_new not in self.visited and self.invalid(s_new) < n: + self.visited.add(s_new) + self.dfs(s_new, self.invalid(s_new), res) + + # ------------------------------------------------------------------------------- # + + def invalid(self, s): + plus = minus = 0 + memo = {"(": 1, ")": -1} + for c in s: + plus += memo.get(c, 0) + minus += 1 if plus < 0 else 0 + plus = max(0, plus) + return plus + minus + +# ------------------------------------------------------------------------------- # + class ChallengeInterpreter(JavaScriptInterpreter): def __init__(self): super(ChallengeInterpreter, self).__init__('native') + # ------------------------------------------------------------------------------- # + def eval(self, body, domain): - # ------------------------------------------------------------------------------- # operators = { '+': op.add, @@ -26,18 +100,29 @@ class ChallengeInterpreter(JavaScriptInterpreter): # ------------------------------------------------------------------------------- # + def flatten(l): + return sum(map(flatten, l), []) if isinstance(l, list) else [l] + + # ------------------------------------------------------------------------------- # + def jsfuckToNumber(jsFuck): - t = '' + # "Clean Up" JSFuck + jsFuck = jsFuck.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0') + jsFuck = jsFuck.lstrip('+').replace('(+', '(').replace(' ', '') + jsFuck = Parentheses().fix(jsFuck)[0] - split_numbers = re.compile(r'-?\d+').findall + # Hackery Parser for Math + stack = [] + bstack = [] + for i in flatten(pyparsing.nestedExpr().parseString(jsFuck).asList()): + if i == '+': + stack.append(bstack) + bstack = [] + continue + bstack.append(i) + stack.append(bstack) - for i in re.findall( - r'\((?:\d|\+|\-)*\)', - jsFuck.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0').lstrip('+').replace('(+', '(') - ): - t = '{}{}'.format(t, sum(int(x) for x in split_numbers(i))) - - return int(t) + return int(''.join([str(Calc.doMath(''.join(i))) for i in stack])) # ------------------------------------------------------------------------------- # @@ -45,6 +130,7 @@ class ChallengeInterpreter(JavaScriptInterpreter): jsfuckMath = payload.split('/') if needle in jsfuckMath[1]: expression = re.findall(r"^(.*?)(.)\(function", jsfuckMath[1])[0] + expression_value = operators[expression[1]]( float(jsfuckToNumber(expression[0])), float(ord(domain[jsfuckToNumber(jsfuckMath[1][ @@ -118,7 +204,7 @@ class ChallengeInterpreter(JavaScriptInterpreter): return challengeSolve(body, domain) - # ------------------------------------------------------------------------------- # + ChallengeInterpreter() diff --git a/lib/dateutil/__init__.py b/lib/dateutil/__init__.py new file mode 100644 index 00000000..9a42dcea --- /dev/null +++ b/lib/dateutil/__init__.py @@ -0,0 +1,9 @@ +""" +Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net> + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" +__license__ = "PSF License" +__version__ = "1.5.0.1" diff --git a/lib/dateutil/easter.py b/lib/dateutil/easter.py new file mode 100644 index 00000000..d7944104 --- /dev/null +++ b/lib/dateutil/easter.py @@ -0,0 +1,92 @@ +""" +Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net> + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" +__license__ = "PSF License" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different easter + calculation methods: + + 1 - Original calculation in Julian calendar, valid in + dates after 326 AD + 2 - Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3 - Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + EASTER_JULIAN = 1 + EASTER_ORTHODOX = 2 + EASTER_WESTERN = 3 + + The default method is method 3. + + More about the algorithm may be found at: + + http://users.chariot.net.au/~gmarts/eastalg.htm + + and + + http://www.tondering.dk/claus/calendar.html + + """ + + if not (1 <= method <= 3): + raise ValueError, "invalid method" + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g+15)%30 + j = (y+y//4+i)%7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e+y//100-16-(y//100-16)//4 + else: + # New method + c = y//100 + h = (c-c//4-(8*c+13)//25+19*g+15)%30 + i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11)) + j = (y+y//4+i+2-c+c//4)%7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i-j+e + d = 1+(p+27+(p+6)//40)%31 + m = 3+(p+26)//30 + return datetime.date(int(y),int(m),int(d)) + diff --git a/lib/dateutil/parser.py b/lib/dateutil/parser.py new file mode 100644 index 00000000..5d824e41 --- /dev/null +++ b/lib/dateutil/parser.py @@ -0,0 +1,886 @@ +# -*- coding:iso-8859-1 -*- +""" +Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net> + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" +__license__ = "PSF License" + +import datetime +import string +import time +import sys +import os + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +import relativedelta +import tz + + +__all__ = ["parse", "parserinfo"] + + +# Some pointers: +# +# http://www.cl.cam.ac.uk/~mgk25/iso-time.html +# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html +# http://www.w3.org/TR/NOTE-datetime +# http://ringmaster.arc.nasa.gov/tools/time_formats.html +# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm +# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html + + +class _timelex(object): + + def __init__(self, instream): + if isinstance(instream, basestring): + instream = StringIO(instream) + self.instream = instream + self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_' + '' + '') + self.numchars = '0123456789' + self.whitespace = ' \t\r\n' + self.charstack = [] + self.tokenstack = [] + self.eof = False + + def get_token(self): + if self.tokenstack: + return self.tokenstack.pop(0) + seenletters = False + token = None + state = None + wordchars = self.wordchars + numchars = self.numchars + whitespace = self.whitespace + while not self.eof: + if self.charstack: + nextchar = self.charstack.pop(0) + else: + nextchar = self.instream.read(1) + while nextchar == '\x00': + nextchar = self.instream.read(1) + if not nextchar: + self.eof = True + break + elif not state: + token = nextchar + if nextchar in wordchars: + state = 'a' + elif nextchar in numchars: + state = '0' + elif nextchar in whitespace: + token = ' ' + break # emit token + else: + break # emit token + elif state == 'a': + seenletters = True + if nextchar in wordchars: + token += nextchar + elif nextchar == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0': + if nextchar in numchars: + token += nextchar + elif nextchar == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == 'a.': + seenletters = True + if nextchar == '.' or nextchar in wordchars: + token += nextchar + elif nextchar in numchars and token[-1] == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0.': + if nextchar == '.' or nextchar in numchars: + token += nextchar + elif nextchar in wordchars and token[-1] == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + if (state in ('a.', '0.') and + (seenletters or token.count('.') > 1 or token[-1] == '.')): + l = token.split('.') + token = l[0] + for tok in l[1:]: + self.tokenstack.append('.') + if tok: + self.tokenstack.append(tok) + return token + + def __iter__(self): + return self + + def next(self): + token = self.get_token() + if token is None: + raise StopIteration + return token + + def split(cls, s): + return list(cls(s)) + split = classmethod(split) + + +class _resultbase(object): + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def _repr(self, classname): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, `value`)) + return "%s(%s)" % (classname, ", ".join(l)) + + def __repr__(self): + return self._repr(self.__class__.__name__) + + +class parserinfo(object): + + # m from a.m/p.m, t from ISO T separator + JUMP = [" ", ".", ",", ";", "-", "/", "'", + "at", "on", "and", "ad", "m", "t", "of", + "st", "nd", "rd", "th"] + + WEEKDAYS = [("Mon", "Monday"), + ("Tue", "Tuesday"), + ("Wed", "Wednesday"), + ("Thu", "Thursday"), + ("Fri", "Friday"), + ("Sat", "Saturday"), + ("Sun", "Sunday")] + MONTHS = [("Jan", "January"), + ("Feb", "February"), + ("Mar", "March"), + ("Apr", "April"), + ("May", "May"), + ("Jun", "June"), + ("Jul", "July"), + ("Aug", "August"), + ("Sep", "September"), + ("Oct", "October"), + ("Nov", "November"), + ("Dec", "December")] + HMS = [("h", "hour", "hours"), + ("m", "minute", "minutes"), + ("s", "second", "seconds")] + AMPM = [("am", "a"), + ("pm", "p")] + UTCZONE = ["UTC", "GMT", "Z"] + PERTAIN = ["of"] + TZOFFSET = {} + + def __init__(self, dayfirst=False, yearfirst=False): + self._jump = self._convert(self.JUMP) + self._weekdays = self._convert(self.WEEKDAYS) + self._months = self._convert(self.MONTHS) + self._hms = self._convert(self.HMS) + self._ampm = self._convert(self.AMPM) + self._utczone = self._convert(self.UTCZONE) + self._pertain = self._convert(self.PERTAIN) + + self.dayfirst = dayfirst + self.yearfirst = yearfirst + + self._year = time.localtime().tm_year + self._century = self._year//100*100 + + def _convert(self, lst): + dct = {} + for i in range(len(lst)): + v = lst[i] + if isinstance(v, tuple): + for v in v: + dct[v.lower()] = i + else: + dct[v.lower()] = i + return dct + + def jump(self, name): + return name.lower() in self._jump + + def weekday(self, name): + if len(name) >= 3: + try: + return self._weekdays[name.lower()] + except KeyError: + pass + return None + + def month(self, name): + if len(name) >= 3: + try: + return self._months[name.lower()]+1 + except KeyError: + pass + return None + + def hms(self, name): + try: + return self._hms[name.lower()] + except KeyError: + return None + + def ampm(self, name): + try: + return self._ampm[name.lower()] + except KeyError: + return None + + def pertain(self, name): + return name.lower() in self._pertain + + def utczone(self, name): + return name.lower() in self._utczone + + def tzoffset(self, name): + if name in self._utczone: + return 0 + return self.TZOFFSET.get(name) + + def convertyear(self, year): + if year < 100: + year += self._century + if abs(year-self._year) >= 50: + if year < self._year: + year += 100 + else: + year -= 100 + return year + + def validate(self, res): + # move to info + if res.year is not None: + res.year = self.convertyear(res.year) + if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z': + res.tzname = "UTC" + res.tzoffset = 0 + elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): + res.tzoffset = 0 + return True + + +class parser(object): + + def __init__(self, info=None): + self.info = info or parserinfo() + + def parse(self, timestr, default=None, + ignoretz=False, tzinfos=None, + **kwargs): + if not default: + default = datetime.datetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) + res = self._parse(timestr, **kwargs) + if res is None: + raise ValueError, "unknown string format" + repl = {} + for attr in ["year", "month", "day", "hour", + "minute", "second", "microsecond"]: + value = getattr(res, attr) + if value is not None: + repl[attr] = value + ret = default.replace(**repl) + if res.weekday is not None and not res.day: + ret = ret+relativedelta.relativedelta(weekday=res.weekday) + if not ignoretz: + if callable(tzinfos) or tzinfos and res.tzname in tzinfos: + if callable(tzinfos): + tzdata = tzinfos(res.tzname, res.tzoffset) + else: + tzdata = tzinfos.get(res.tzname) + if isinstance(tzdata, datetime.tzinfo): + tzinfo = tzdata + elif isinstance(tzdata, basestring): + tzinfo = tz.tzstr(tzdata) + elif isinstance(tzdata, int): + tzinfo = tz.tzoffset(res.tzname, tzdata) + else: + raise ValueError, "offset must be tzinfo subclass, " \ + "tz string, or int offset" + ret = ret.replace(tzinfo=tzinfo) + elif res.tzname and res.tzname in time.tzname: + ret = ret.replace(tzinfo=tz.tzlocal()) + elif res.tzoffset == 0: + ret = ret.replace(tzinfo=tz.tzutc()) + elif res.tzoffset: + ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + return ret + + class _result(_resultbase): + __slots__ = ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond", + "tzname", "tzoffset"] + + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False): + info = self.info + if dayfirst is None: + dayfirst = info.dayfirst + if yearfirst is None: + yearfirst = info.yearfirst + res = self._result() + l = _timelex.split(timestr) + try: + + # year/month/day list + ymd = [] + + # Index of the month string in ymd + mstridx = -1 + + len_l = len(l) + i = 0 + while i < len_l: + + # Check if it's a number + try: + value_repr = l[i] + value = float(value_repr) + except ValueError: + value = None + + if value is not None: + # Token is a number + len_li = len(l[i]) + i += 1 + if (len(ymd) == 3 and len_li in (2, 4) + and (i >= len_l or (l[i] != ':' and + info.hms(l[i]) is None))): + # 19990101T23[59] + s = l[i-1] + res.hour = int(s[:2]) + if len_li == 4: + res.minute = int(s[2:]) + elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6): + # YYMMDD or HHMMSS[.ss] + s = l[i-1] + if not ymd and l[i-1].find('.') == -1: + ymd.append(info.convertyear(int(s[:2]))) + ymd.append(int(s[2:4])) + ymd.append(int(s[4:])) + else: + # 19990101T235959[.59] + res.hour = int(s[:2]) + res.minute = int(s[2:4]) + res.second, res.microsecond = _parsems(s[4:]) + elif len_li == 8: + # YYYYMMDD + s = l[i-1] + ymd.append(int(s[:4])) + ymd.append(int(s[4:6])) + ymd.append(int(s[6:])) + elif len_li in (12, 14): + # YYYYMMDDhhmm[ss] + s = l[i-1] + ymd.append(int(s[:4])) + ymd.append(int(s[4:6])) + ymd.append(int(s[6:8])) + res.hour = int(s[8:10]) + res.minute = int(s[10:12]) + if len_li == 14: + res.second = int(s[12:]) + elif ((i < len_l and info.hms(l[i]) is not None) or + (i+1 < len_l and l[i] == ' ' and + info.hms(l[i+1]) is not None)): + # HH[ ]h or MM[ ]m or SS[.ss][ ]s + if l[i] == ' ': + i += 1 + idx = info.hms(l[i]) + while True: + if idx == 0: + res.hour = int(value) + if value%1: + res.minute = int(60*(value%1)) + elif idx == 1: + res.minute = int(value) + if value%1: + res.second = int(60*(value%1)) + elif idx == 2: + res.second, res.microsecond = \ + _parsems(value_repr) + i += 1 + if i >= len_l or idx == 2: + break + # 12h00 + try: + value_repr = l[i] + value = float(value_repr) + except ValueError: + break + else: + i += 1 + idx += 1 + if i < len_l: + newidx = info.hms(l[i]) + if newidx is not None: + idx = newidx + elif i+1 < len_l and l[i] == ':': + # HH:MM[:SS[.ss]] + res.hour = int(value) + i += 1 + value = float(l[i]) + res.minute = int(value) + if value%1: + res.second = int(60*(value%1)) + i += 1 + if i < len_l and l[i] == ':': + res.second, res.microsecond = _parsems(l[i+1]) + i += 2 + elif i < len_l and l[i] in ('-', '/', '.'): + sep = l[i] + ymd.append(int(value)) + i += 1 + if i < len_l and not info.jump(l[i]): + try: + # 01-01[-01] + ymd.append(int(l[i])) + except ValueError: + # 01-Jan[-01] + value = info.month(l[i]) + if value is not None: + ymd.append(value) + assert mstridx == -1 + mstridx = len(ymd)-1 + else: + return None + i += 1 + if i < len_l and l[i] == sep: + # We have three members + i += 1 + value = info.month(l[i]) + if value is not None: + ymd.append(value) + mstridx = len(ymd)-1 + assert mstridx == -1 + else: + ymd.append(int(l[i])) + i += 1 + elif i >= len_l or info.jump(l[i]): + if i+1 < len_l and info.ampm(l[i+1]) is not None: + # 12 am + res.hour = int(value) + if res.hour < 12 and info.ampm(l[i+1]) == 1: + res.hour += 12 + elif res.hour == 12 and info.ampm(l[i+1]) == 0: + res.hour = 0 + i += 1 + else: + # Year, month or day + ymd.append(int(value)) + i += 1 + elif info.ampm(l[i]) is not None: + # 12am + res.hour = int(value) + if res.hour < 12 and info.ampm(l[i]) == 1: + res.hour += 12 + elif res.hour == 12 and info.ampm(l[i]) == 0: + res.hour = 0 + i += 1 + elif not fuzzy: + return None + else: + i += 1 + continue + + # Check weekday + value = info.weekday(l[i]) + if value is not None: + res.weekday = value + i += 1 + continue + + # Check month name + value = info.month(l[i]) + if value is not None: + ymd.append(value) + assert mstridx == -1 + mstridx = len(ymd)-1 + i += 1 + if i < len_l: + if l[i] in ('-', '/'): + # Jan-01[-99] + sep = l[i] + i += 1 + ymd.append(int(l[i])) + i += 1 + if i < len_l and l[i] == sep: + # Jan-01-99 + i += 1 + ymd.append(int(l[i])) + i += 1 + elif (i+3 < len_l and l[i] == l[i+2] == ' ' + and info.pertain(l[i+1])): + # Jan of 01 + # In this case, 01 is clearly year + try: + value = int(l[i+3]) + except ValueError: + # Wrong guess + pass + else: + # Convert it here to become unambiguous + ymd.append(info.convertyear(value)) + i += 4 + continue + + # Check am/pm + value = info.ampm(l[i]) + if value is not None: + if value == 1 and res.hour < 12: + res.hour += 12 + elif value == 0 and res.hour == 12: + res.hour = 0 + i += 1 + continue + + # Check for a timezone name + if (res.hour is not None and len(l[i]) <= 5 and + res.tzname is None and res.tzoffset is None and + not [x for x in l[i] if x not in string.ascii_uppercase]): + res.tzname = l[i] + res.tzoffset = info.tzoffset(res.tzname) + i += 1 + + # Check for something like GMT+3, or BRST+3. Notice + # that it doesn't mean "I am 3 hours after GMT", but + # "my time +3 is GMT". If found, we reverse the + # logic so that timezone parsing code will get it + # right. + if i < len_l and l[i] in ('+', '-'): + l[i] = ('+', '-')[l[i] == '+'] + res.tzoffset = None + if info.utczone(res.tzname): + # With something like GMT+3, the timezone + # is *not* GMT. + res.tzname = None + + continue + + # Check for a numbered timezone + if res.hour is not None and l[i] in ('+', '-'): + signal = (-1,1)[l[i] == '+'] + i += 1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60 + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + res.tzoffset = int(l[i])*3600+int(l[i+2])*60 + i += 2 + elif len_li <= 2: + # -[0]3 + res.tzoffset = int(l[i][:2])*3600 + else: + return None + i += 1 + res.tzoffset *= signal + + # Look for a timezone name between parenthesis + if (i+3 < len_l and + info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and + 3 <= len(l[i+2]) <= 5 and + not [x for x in l[i+2] + if x not in string.ascii_uppercase]): + # -0300 (BRST) + res.tzname = l[i+2] + i += 4 + continue + + # Check jumps + if not (info.jump(l[i]) or fuzzy): + return None + + i += 1 + + # Process year/month/day + len_ymd = len(ymd) + if len_ymd > 3: + # More than three members!? + return None + elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2): + # One member, or two members with a month string + if mstridx != -1: + res.month = ymd[mstridx] + del ymd[mstridx] + if len_ymd > 1 or mstridx == -1: + if ymd[0] > 31: + res.year = ymd[0] + else: + res.day = ymd[0] + elif len_ymd == 2: + # Two members with numbers + if ymd[0] > 31: + # 99-01 + res.year, res.month = ymd + elif ymd[1] > 31: + # 01-99 + res.month, res.year = ymd + elif dayfirst and ymd[1] <= 12: + # 13-01 + res.day, res.month = ymd + else: + # 01-13 + res.month, res.day = ymd + if len_ymd == 3: + # Three members + if mstridx == 0: + res.month, res.day, res.year = ymd + elif mstridx == 1: + if ymd[0] > 31 or (yearfirst and ymd[2] <= 31): + # 99-Jan-01 + res.year, res.month, res.day = ymd + else: + # 01-Jan-01 + # Give precendence to day-first, since + # two-digit years is usually hand-written. + res.day, res.month, res.year = ymd + elif mstridx == 2: + # WTF!? + if ymd[1] > 31: + # 01-99-Jan + res.day, res.year, res.month = ymd + else: + # 99-01-Jan + res.year, res.day, res.month = ymd + else: + if ymd[0] > 31 or \ + (yearfirst and ymd[1] <= 12 and ymd[2] <= 31): + # 99-01-01 + res.year, res.month, res.day = ymd + elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12): + # 13-01-01 + res.day, res.month, res.year = ymd + else: + # 01-13-01 + res.month, res.day, res.year = ymd + + except (IndexError, ValueError, AssertionError): + return None + + if not info.validate(res): + return None + return res + +DEFAULTPARSER = parser() +def parse(timestr, parserinfo=None, **kwargs): + if parserinfo: + return parser(parserinfo).parse(timestr, **kwargs) + else: + return DEFAULTPARSER.parse(timestr, **kwargs) + + +class _tzparser(object): + + class _result(_resultbase): + + __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", + "start", "end"] + + class _attr(_resultbase): + __slots__ = ["month", "week", "weekday", + "yday", "jyday", "day", "time"] + + def __repr__(self): + return self._repr("") + + def __init__(self): + _resultbase.__init__(self) + self.start = self._attr() + self.end = self._attr() + + def parse(self, tzstr): + res = self._result() + l = _timelex.split(tzstr) + try: + + len_l = len(l) + + i = 0 + while i < len_l: + # BRST+3[BRDT[+2]] + j = i + while j < len_l and not [x for x in l[j] + if x in "0123456789:,-+"]: + j += 1 + if j != i: + if not res.stdabbr: + offattr = "stdoffset" + res.stdabbr = "".join(l[i:j]) + else: + offattr = "dstoffset" + res.dstabbr = "".join(l[i:j]) + i = j + if (i < len_l and + (l[i] in ('+', '-') or l[i][0] in "0123456789")): + if l[i] in ('+', '-'): + # Yes, that's right. See the TZ variable + # documentation. + signal = (1,-1)[l[i] == '+'] + i += 1 + else: + signal = -1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + setattr(res, offattr, + (int(l[i][:2])*3600+int(l[i][2:])*60)*signal) + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + setattr(res, offattr, + (int(l[i])*3600+int(l[i+2])*60)*signal) + i += 2 + elif len_li <= 2: + # -[0]3 + setattr(res, offattr, + int(l[i][:2])*3600*signal) + else: + return None + i += 1 + if res.dstabbr: + break + else: + break + + if i < len_l: + for j in range(i, len_l): + if l[j] == ';': l[j] = ',' + + assert l[i] == ',' + + i += 1 + + if i >= len_l: + pass + elif (8 <= l.count(',') <= 9 and + not [y for x in l[i:] if x != ',' + for y in x if y not in "0123456789"]): + # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] + for x in (res.start, res.end): + x.month = int(l[i]) + i += 2 + if l[i] == '-': + value = int(l[i+1])*-1 + i += 1 + else: + value = int(l[i]) + i += 2 + if value: + x.week = value + x.weekday = (int(l[i])-1)%7 + else: + x.day = int(l[i]) + i += 2 + x.time = int(l[i]) + i += 2 + if i < len_l: + if l[i] in ('-','+'): + signal = (-1,1)[l[i] == "+"] + i += 1 + else: + signal = 1 + res.dstoffset = (res.stdoffset+int(l[i]))*signal + elif (l.count(',') == 2 and l[i:].count('/') <= 2 and + not [y for x in l[i:] if x not in (',','/','J','M', + '.','-',':') + for y in x if y not in "0123456789"]): + for x in (res.start, res.end): + if l[i] == 'J': + # non-leap year day (1 based) + i += 1 + x.jyday = int(l[i]) + elif l[i] == 'M': + # month[-.]week[-.]weekday + i += 1 + x.month = int(l[i]) + i += 1 + assert l[i] in ('-', '.') + i += 1 + x.week = int(l[i]) + if x.week == 5: + x.week = -1 + i += 1 + assert l[i] in ('-', '.') + i += 1 + x.weekday = (int(l[i])-1)%7 + else: + # year day (zero based) + x.yday = int(l[i])+1 + + i += 1 + + if i < len_l and l[i] == '/': + i += 1 + # start time + len_li = len(l[i]) + if len_li == 4: + # -0300 + x.time = (int(l[i][:2])*3600+int(l[i][2:])*60) + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + x.time = int(l[i])*3600+int(l[i+2])*60 + i += 2 + if i+1 < len_l and l[i+1] == ':': + i += 2 + x.time += int(l[i]) + elif len_li <= 2: + # -[0]3 + x.time = (int(l[i][:2])*3600) + else: + return None + i += 1 + + assert i == len_l or l[i] == ',' + + i += 1 + + assert i >= len_l + + except (IndexError, ValueError, AssertionError): + return None + + return res + + +DEFAULTTZPARSER = _tzparser() +def _parsetz(tzstr): + return DEFAULTTZPARSER.parse(tzstr) + + +def _parsems(value): + """Parse a I[.F] seconds value into (seconds, microseconds).""" + if "." not in value: + return int(value), 0 + else: + i, f = value.split(".") + return int(i), int(f.ljust(6, "0")[:6]) + + +# vim:ts=4:sw=4:et diff --git a/lib/dateutil/relativedelta.py b/lib/dateutil/relativedelta.py new file mode 100644 index 00000000..9735819b --- /dev/null +++ b/lib/dateutil/relativedelta.py @@ -0,0 +1,432 @@ +""" +Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net> + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" +__license__ = "PSF License" + +import datetime +import calendar + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) + +class relativedelta: + """ +The relativedelta type is based on the specification of the excelent +work done by M.-A. Lemburg in his mx.DateTime extension. However, +notice that this type does *NOT* implement the same algorithm as +his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + +There's two different ways to build a relativedelta instance. The +first one is passing it two date/datetime classes: + + relativedelta(datetime1, datetime2) + +And the other way is to use the following keyword arguments: + + year, month, day, hour, minute, second, microsecond: + Absolute information. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative. + + weekday: + One of the weekday instances (MO, TU, etc). These instances may + receive a parameter N, specifying the Nth weekday, which could + be positive or negative (like MO(+1) or MO(-2). Not specifying + it is the same as specifying +1. You can also use an integer, + where 0=MO. + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + +Here is the behavior of operations with relativedelta: + +1) Calculate the absolute year, using the 'year' argument, or the + original datetime year, if the argument is not present. + +2) Add the relative 'years' argument to the absolute year. + +3) Do steps 1 and 2 for month/months. + +4) Calculate the absolute day, using the 'day' argument, or the + original datetime day, if the argument is not present. Then, + subtract from the day until it fits in the year and month + found after their operations. + +5) Add the relative 'days' argument to the absolute day. Notice + that the 'weeks' argument is multiplied by 7 and added to + 'days'. + +6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds, + microsecond/microseconds. + +7) If the 'weekday' argument is present, calculate the weekday, + with the given (wday, nth) tuple. wday is the index of the + weekday (0-6, 0=Mon), and nth is the number of weeks to add + forward or backward, depending on its signal. Notice that if + the calculated date is already Monday, for example, using + (0, 1) or (0, -1) won't change the day. + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + if dt1 and dt2: + if not isinstance(dt1, datetime.date) or \ + not isinstance(dt2, datetime.date): + raise TypeError, "relativedelta only diffs datetime/date" + if type(dt1) is not type(dt2): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month) + self._set_months(months) + dtm = self.__radd__(dt2) + if dt1 < dt2: + while dt1 > dtm: + months += 1 + self._set_months(months) + dtm = self.__radd__(dt2) + else: + while dt1 < dtm: + months -= 1 + self._set_months(months) + dtm = self.__radd__(dt2) + delta = dt1 - dtm + self.seconds = delta.seconds+delta.days*86400 + self.microseconds = delta.microseconds + else: + self.years = years + self.months = months + self.days = days+weeks*7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if type(weekday) is int: + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError, "invalid year day (%d)" % yday + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = self.microseconds//abs(self.microseconds) + div, mod = divmod(self.microseconds*s, 1000000) + self.microseconds = mod*s + self.seconds += div*s + if abs(self.seconds) > 59: + s = self.seconds//abs(self.seconds) + div, mod = divmod(self.seconds*s, 60) + self.seconds = mod*s + self.minutes += div*s + if abs(self.minutes) > 59: + s = self.minutes//abs(self.minutes) + div, mod = divmod(self.minutes*s, 60) + self.minutes = mod*s + self.hours += div*s + if abs(self.hours) > 23: + s = self.hours//abs(self.hours) + div, mod = divmod(self.hours*s, 24) + self.hours = mod*s + self.days += div*s + if abs(self.months) > 11: + s = self.months//abs(self.months) + div, mod = divmod(self.months*s, 12) + self.months = mod*s + self.years += div*s + if (self.hours or self.minutes or self.seconds or self.microseconds or + self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = self.months//abs(self.months) + div, mod = divmod(self.months*s, 12) + self.months = mod*s + self.years = div*s + else: + self.years = 0 + + def __radd__(self, other): + if not isinstance(other, datetime.date): + raise TypeError, "unsupported type for add operation" + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth)-1)*7 + if nth > 0: + jumpdays += (7-ret.weekday()+weekday)%7 + else: + jumpdays += (ret.weekday()-weekday)%7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __add__(self, other): + if not isinstance(other, relativedelta): + raise TypeError, "unsupported type for add operation" + return relativedelta(years=other.years+self.years, + months=other.months+self.months, + days=other.days+self.days, + hours=other.hours+self.hours, + minutes=other.minutes+self.minutes, + seconds=other.seconds+self.seconds, + microseconds=other.microseconds+self.microseconds, + leapdays=other.leapdays or self.leapdays, + year=other.year or self.year, + month=other.month or self.month, + day=other.day or self.day, + weekday=other.weekday or self.weekday, + hour=other.hour or self.hour, + minute=other.minute or self.minute, + second=other.second or self.second, + microsecond=other.second or self.microsecond) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + raise TypeError, "unsupported type for sub operation" + return relativedelta(years=other.years-self.years, + months=other.months-self.months, + days=other.days-self.days, + hours=other.hours-self.hours, + minutes=other.minutes-self.minutes, + seconds=other.seconds-self.seconds, + microseconds=other.microseconds-self.microseconds, + leapdays=other.leapdays or self.leapdays, + year=other.year or self.year, + month=other.month or self.month, + day=other.day or self.day, + weekday=other.weekday or self.weekday, + hour=other.hour or self.hour, + minute=other.minute or self.minute, + second=other.second or self.second, + microsecond=other.second or self.microsecond) + + def __neg__(self): + return relativedelta(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __nonzero__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + + def __mul__(self, other): + f = float(other) + return relativedelta(years = int(round(self.years*f)), + months = int(round(self.months*f)), + days = int(round(self.days*f)), + hours = int(round(self.hours*f)), + minutes = int(round(self.minutes*f)), + seconds = int(round(self.seconds*f)), + microseconds = self.microseconds*f, + leapdays = self.leapdays, + year = self.year, + month = self.month, + day = self.day, + weekday = self.weekday, + hour = self.hour, + minute = self.minute, + second = self.second, + microsecond = self.microsecond) + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return False + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + return self.__mul__(1/float(other)) + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("%s=%+d" % (attr, value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, `value`)) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + +# vim:ts=4:sw=4:et diff --git a/lib/dateutil/rrule.py b/lib/dateutil/rrule.py new file mode 100644 index 00000000..34722dff --- /dev/null +++ b/lib/dateutil/rrule.py @@ -0,0 +1,1108 @@ +""" +Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net> + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" +__license__ = "PSF License" + +import itertools +import datetime +import calendar +import thread +import heapq +import sys + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+ + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = range(1,30), range(1,31), range(1,32) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366) +M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365) +WDAYMASK = [0,1,2,3,4,5,6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = range(7) + +# Imported on demand. +easter = None +parser = None + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + if n == 0: + raise ValueError, "Can't create weekday with n == 0" + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) + +class rrulebase: + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = thread.allocate_lock() + self._cache_gen = self._iter() + self._cache_complete = False + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(gen.next()) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxint, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = gen.next() + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penality. + def count(self): + if self._len is None: + for x in self: pass + return self._len + + def before(self, dt, inc=False): + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def between(self, after, before, inc=False): + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + +class rrule(rrulebase): + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + rrulebase.__init__(self, cache) + global easter + if not dtstart: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + if wkst is None: + self._wkst = calendar.firstweekday() + elif type(wkst) is int: + self._wkst = wkst + else: + self._wkst = wkst.weekday + if bysetpos is None: + self._bysetpos = None + elif type(bysetpos) is int: + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + if not (byweekno or byyearday or bymonthday or + byweekday is not None or byeaster is not None): + if freq == YEARLY: + if not bymonth: + bymonth = dtstart.month + bymonthday = dtstart.day + elif freq == MONTHLY: + bymonthday = dtstart.day + elif freq == WEEKLY: + byweekday = dtstart.weekday() + # bymonth + if not bymonth: + self._bymonth = None + elif type(bymonth) is int: + self._bymonth = (bymonth,) + else: + self._bymonth = tuple(bymonth) + # byyearday + if not byyearday: + self._byyearday = None + elif type(byyearday) is int: + self._byyearday = (byyearday,) + else: + self._byyearday = tuple(byyearday) + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if type(byeaster) is int: + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(byeaster) + else: + self._byeaster = None + # bymonthay + if not bymonthday: + self._bymonthday = () + self._bynmonthday = () + elif type(bymonthday) is int: + if bymonthday < 0: + self._bynmonthday = (bymonthday,) + self._bymonthday = () + else: + self._bymonthday = (bymonthday,) + self._bynmonthday = () + else: + self._bymonthday = tuple([x for x in bymonthday if x > 0]) + self._bynmonthday = tuple([x for x in bymonthday if x < 0]) + # byweekno + if byweekno is None: + self._byweekno = None + elif type(byweekno) is int: + self._byweekno = (byweekno,) + else: + self._byweekno = tuple(byweekno) + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + elif type(byweekday) is int: + self._byweekday = (byweekday,) + self._bynweekday = None + elif hasattr(byweekday, "n"): + if not byweekday.n or freq > MONTHLY: + self._byweekday = (byweekday.weekday,) + self._bynweekday = None + else: + self._bynweekday = ((byweekday.weekday, byweekday.n),) + self._byweekday = None + else: + self._byweekday = [] + self._bynweekday = [] + for wday in byweekday: + if type(wday) is int: + self._byweekday.append(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.append(wday.weekday) + else: + self._bynweekday.append((wday.weekday, wday.n)) + self._byweekday = tuple(self._byweekday) + self._bynweekday = tuple(self._bynweekday) + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = (dtstart.hour,) + else: + self._byhour = None + elif type(byhour) is int: + self._byhour = (byhour,) + else: + self._byhour = tuple(byhour) + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = (dtstart.minute,) + else: + self._byminute = None + elif type(byminute) is int: + self._byminute = (byminute,) + else: + self._byminute = tuple(byminute) + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = (dtstart.second,) + else: + self._bysecond = None + elif type(bysecond) is int: + self._bysecond = (bysecond,) + else: + self._bysecond = tuple(bysecond) + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY:ii.ydayset, + MONTHLY:ii.mdayset, + WEEKLY:ii.wdayset, + DAILY:ii.ddayset, + HOURLY:ii.ddayset, + MINUTELY:ii.ddayset, + SECONDLY:ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY:ii.htimeset, + MINUTELY:ii.mtimeset, + SECONDLY:ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday + and -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday + and -ii.nextyearlen+i-ii.yearlen + not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + total += 1 + yield res + if count: + count -= 1 + if not count: + self._len = total + return + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal+i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + total += 1 + yield res + if count: + count -= 1 + if not count: + self._len = total + return + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + while True: + hour += interval + div, mod = divmod(hour, 24) + if div: + hour = mod + day += div + fixday = True + if not byhour or hour in byhour: + break + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + while True: + minute += interval + div, mod = divmod(minute, 60) + if div: + minute = mod + hour += div + div, mod = divmod(hour, 24) + if div: + hour = mod + day += div + fixday = True + filtered = False + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute)): + break + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399-(hour*3600+minute*60+second)) + //interval)*interval) + while True: + second += self._interval + div, mod = divmod(second, 60) + if div: + second = mod + minute += div + div, mod = divmod(minute, 60) + if div: + minute = mod + hour += div + div, mod = divmod(hour, 24) + if div: + hour = mod + day += div + fixday = True + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + break + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365+calendar.isleap(year) + self.nextyearlen = 365+calendar.isleap(year+1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + #no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1,1,1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst)%7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen+ + (lyearweekday-rr._wkst)%7)%7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst)%7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and + (month != self.lastmonth or year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday)%7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday)%7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return range(self.yearlen), 0, self.yearlen + + def mdayset(self, year, month, day): + set = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + set[i] = i + return set, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + set = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + set[i] = i + i += 1 + #if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return set, start, i + + def ddayset(self, year, month, day): + set = [None]*self.yearlen + i = datetime.date(year, month, day).toordinal()-self.yearordinal + set[i] = i + return set, i, i+1 + + def htimeset(self, hour, minute, second): + set = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + set.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + set.sort() + return set + + def mtimeset(self, hour, minute, second): + set = [] + rr = self.rrule + for second in rr._bysecond: + set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + set.sort() + return set + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + + class _genitem: + def __init__(self, genlist, gen): + try: + self.dt = gen() + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def next(self): + try: + self.dt = self.gen() + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + def __cmp__(self, other): + return cmp(self.dt, other.dt) + + def __init__(self, cache=False): + rrulebase.__init__(self, cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + def rrule(self, rrule): + self._rrule.append(rrule) + + def rdate(self, rdate): + self._rdate.append(rdate) + + def exrule(self, exrule): + self._exrule.append(exrule) + + def exdate(self, exdate): + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate).next) + for gen in [iter(x).next for x in self._rrule]: + self._genitem(rlist, gen) + heapq.heapify(rlist) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate).next) + for gen in [iter(x).next for x in self._exrule]: + self._genitem(exlist, gen) + heapq.heapify(exlist) + lastdt = None + total = 0 + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + exitem.next() + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + ritem.next() + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + +class _rrulestr: + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError, "invalid until date" + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg): + l = [] + for wday in value.split(','): + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: n = int(n) + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError, "unknown parameter name" + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError, "unknown parameter '%s'" % name + except (KeyError, ValueError): + raise ValueError, "invalid '%s': %s" % (name, value) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + s = s.upper() + if not s.strip(): + raise ValueError, "empty string" + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] in (" ", "\t"): + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and + (s.find(':') == -1 or s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError, "empty property name" + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError, "unsupported RRULE parm: "+parm + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError, "unsupported RDATE parm: "+parm + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError, "unsupported EXRULE parm: "+parm + exrulevals.append(value) + elif name == "EXDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError, "unsupported RDATE parm: "+parm + exdatevals.append(value) + elif name == "DTSTART": + for parm in parms: + raise ValueError, "unsupported DTSTART parm: "+parm + if not parser: + from dateutil import parser + dtstart = parser.parse(value, ignoretz=ignoretz, + tzinfos=tzinfos) + elif name.upper().startswith('X-'): + # Ignore experimental properties. + pass + else: + raise ValueError, "unsupported property: "+name + if (forceset or len(rrulevals) > 1 or + rdatevals or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + set = rruleset(cache=cache) + for value in rrulevals: + set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + set.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + for datestr in value.split(','): + set.exdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + if compatible and dtstart: + set.rdate(dtstart) + return set + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/lib/dateutil/tz.py b/lib/dateutil/tz.py new file mode 100644 index 00000000..2c15a78e --- /dev/null +++ b/lib/dateutil/tz.py @@ -0,0 +1,958 @@ +""" +Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net> + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" +__license__ = "PSF License" + +import datetime +import struct +import time +import sys +import os + +relativedelta = None +parser = None +rrule = None + +__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"] + +try: + from dateutil.tzwin import tzwin, tzwinlocal +except (ImportError, OSError): + tzwin, tzwinlocal = None, None + +ZERO = datetime.timedelta(0) +EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal() + +class tzutc(datetime.tzinfo): + + def utcoffset(self, dt): + return ZERO + + def dst(self, dt): + return ZERO + + def tzname(self, dt): + return "UTC" + + def __eq__(self, other): + return (isinstance(other, tzutc) or + (isinstance(other, tzoffset) and other._offset == ZERO)) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + +class tzoffset(datetime.tzinfo): + + def __init__(self, name, offset): + self._name = name + self._offset = datetime.timedelta(seconds=offset) + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return ZERO + + def tzname(self, dt): + return self._name + + def __eq__(self, other): + return (isinstance(other, tzoffset) and + self._offset == other._offset) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, + `self._name`, + self._offset.days*86400+self._offset.seconds) + + __reduce__ = object.__reduce__ + +class tzlocal(datetime.tzinfo): + + _std_offset = datetime.timedelta(seconds=-time.timezone) + if time.daylight: + _dst_offset = datetime.timedelta(seconds=-time.altzone) + else: + _dst_offset = _std_offset + + def utcoffset(self, dt): + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if self._isdst(dt): + return self._dst_offset-self._std_offset + else: + return ZERO + + def tzname(self, dt): + return time.tzname[self._isdst(dt)] + + def _isdst(self, dt): + # We can't use mktime here. It is unstable when deciding if + # the hour near to a change is DST or not. + # + # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, + # dt.minute, dt.second, dt.weekday(), 0, -1)) + # return time.localtime(timestamp).tm_isdst + # + # The code above yields the following result: + # + #>>> import tz, datetime + #>>> t = tz.tzlocal() + #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + #'BRDT' + #>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() + #'BRST' + #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + #'BRST' + #>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() + #'BRDT' + #>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + #'BRDT' + # + # Here is a more stable implementation: + # + timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 + + dt.hour * 3600 + + dt.minute * 60 + + dt.second) + return time.localtime(timestamp+time.timezone).tm_isdst + + def __eq__(self, other): + if not isinstance(other, tzlocal): + return False + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset) + return True + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + +class _ttinfo(object): + __slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"] + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def __repr__(self): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, `value`)) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + + def __eq__(self, other): + if not isinstance(other, _ttinfo): + return False + return (self.offset == other.offset and + self.delta == other.delta and + self.isdst == other.isdst and + self.abbr == other.abbr and + self.isstd == other.isstd and + self.isgmt == other.isgmt) + + def __ne__(self, other): + return not self.__eq__(other) + + def __getstate__(self): + state = {} + for name in self.__slots__: + state[name] = getattr(self, name, None) + return state + + def __setstate__(self, state): + for name in self.__slots__: + if name in state: + setattr(self, name, state[name]) + +class tzfile(datetime.tzinfo): + + # http://www.twinsun.com/tz/tz-link.htm + # ftp://elsie.nci.nih.gov/pub/tz*.tar.gz + + def __init__(self, fileobj): + if isinstance(fileobj, basestring): + self._filename = fileobj + fileobj = open(fileobj) + elif hasattr(fileobj, "name"): + self._filename = fileobj.name + else: + self._filename = `fileobj` + + # From tzfile(5): + # + # The time zone information files used by tzset(3) + # begin with the magic characters "TZif" to identify + # them as time zone information files, followed by + # sixteen bytes reserved for future use, followed by + # six four-byte values of type long, written in a + # ``standard'' byte order (the high-order byte + # of the value is written first). + + if fileobj.read(4) != "TZif": + raise ValueError, "magic not found" + + fileobj.read(16) + + ( + # The number of UTC/local indicators stored in the file. + ttisgmtcnt, + + # The number of standard/wall indicators stored in the file. + ttisstdcnt, + + # The number of leap seconds for which data is + # stored in the file. + leapcnt, + + # The number of "transition times" for which data + # is stored in the file. + timecnt, + + # The number of "local time types" for which data + # is stored in the file (must not be zero). + typecnt, + + # The number of characters of "time zone + # abbreviation strings" stored in the file. + charcnt, + + ) = struct.unpack(">6l", fileobj.read(24)) + + # The above header is followed by tzh_timecnt four-byte + # values of type long, sorted in ascending order. + # These values are written in ``standard'' byte order. + # Each is used as a transition time (as returned by + # time(2)) at which the rules for computing local time + # change. + + if timecnt: + self._trans_list = struct.unpack(">%dl" % timecnt, + fileobj.read(timecnt*4)) + else: + self._trans_list = [] + + # Next come tzh_timecnt one-byte values of type unsigned + # char; each one tells which of the different types of + # ``local time'' types described in the file is associated + # with the same-indexed transition time. These values + # serve as indices into an array of ttinfo structures that + # appears next in the file. + + if timecnt: + self._trans_idx = struct.unpack(">%dB" % timecnt, + fileobj.read(timecnt)) + else: + self._trans_idx = [] + + # Each ttinfo structure is written as a four-byte value + # for tt_gmtoff of type long, in a standard byte + # order, followed by a one-byte value for tt_isdst + # and a one-byte value for tt_abbrind. In each + # structure, tt_gmtoff gives the number of + # seconds to be added to UTC, tt_isdst tells whether + # tm_isdst should be set by localtime(3), and + # tt_abbrind serves as an index into the array of + # time zone abbreviation characters that follow the + # ttinfo structure(s) in the file. + + ttinfo = [] + + for i in range(typecnt): + ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) + + abbr = fileobj.read(charcnt) + + # Then there are tzh_leapcnt pairs of four-byte + # values, written in standard byte order; the + # first value of each pair gives the time (as + # returned by time(2)) at which a leap second + # occurs; the second gives the total number of + # leap seconds to be applied after the given time. + # The pairs of values are sorted in ascending order + # by time. + + # Not used, for now + if leapcnt: + leap = struct.unpack(">%dl" % (leapcnt*2), + fileobj.read(leapcnt*8)) + + # Then there are tzh_ttisstdcnt standard/wall + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as standard + # time or wall clock time, and are used when + # a time zone file is used in handling POSIX-style + # time zone environment variables. + + if ttisstdcnt: + isstd = struct.unpack(">%db" % ttisstdcnt, + fileobj.read(ttisstdcnt)) + + # Finally, there are tzh_ttisgmtcnt UTC/local + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as UTC or + # local time, and are used when a time zone file + # is used in handling POSIX-style time zone envi- + # ronment variables. + + if ttisgmtcnt: + isgmt = struct.unpack(">%db" % ttisgmtcnt, + fileobj.read(ttisgmtcnt)) + + # ** Everything has been read ** + + # Build ttinfo list + self._ttinfo_list = [] + for i in range(typecnt): + gmtoff, isdst, abbrind = ttinfo[i] + # Round to full-minutes if that's not the case. Python's + # datetime doesn't accept sub-minute timezones. Check + # http://python.org/sf/1447945 for some information. + gmtoff = (gmtoff+30)//60*60 + tti = _ttinfo() + tti.offset = gmtoff + tti.delta = datetime.timedelta(seconds=gmtoff) + tti.isdst = isdst + tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] + tti.isstd = (ttisstdcnt > i and isstd[i] != 0) + tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) + self._ttinfo_list.append(tti) + + # Replace ttinfo indexes for ttinfo objects. + trans_idx = [] + for idx in self._trans_idx: + trans_idx.append(self._ttinfo_list[idx]) + self._trans_idx = tuple(trans_idx) + + # Set standard, dst, and before ttinfos. before will be + # used when a given time is before any transitions, + # and will be set to the first non-dst ttinfo, or to + # the first dst, if all of them are dst. + self._ttinfo_std = None + self._ttinfo_dst = None + self._ttinfo_before = None + if self._ttinfo_list: + if not self._trans_list: + self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0] + else: + for i in range(timecnt-1,-1,-1): + tti = self._trans_idx[i] + if not self._ttinfo_std and not tti.isdst: + self._ttinfo_std = tti + elif not self._ttinfo_dst and tti.isdst: + self._ttinfo_dst = tti + if self._ttinfo_std and self._ttinfo_dst: + break + else: + if self._ttinfo_dst and not self._ttinfo_std: + self._ttinfo_std = self._ttinfo_dst + + for tti in self._ttinfo_list: + if not tti.isdst: + self._ttinfo_before = tti + break + else: + self._ttinfo_before = self._ttinfo_list[0] + + # Now fix transition times to become relative to wall time. + # + # I'm not sure about this. In my tests, the tz source file + # is setup to wall time, and in the binary file isstd and + # isgmt are off, so it should be in wall time. OTOH, it's + # always in gmt time. Let me know if you have comments + # about this. + laststdoffset = 0 + self._trans_list = list(self._trans_list) + for i in range(len(self._trans_list)): + tti = self._trans_idx[i] + if not tti.isdst: + # This is std time. + self._trans_list[i] += tti.offset + laststdoffset = tti.offset + else: + # This is dst time. Convert to std. + self._trans_list[i] += laststdoffset + self._trans_list = tuple(self._trans_list) + + def _find_ttinfo(self, dt, laststd=0): + timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 + + dt.hour * 3600 + + dt.minute * 60 + + dt.second) + idx = 0 + for trans in self._trans_list: + if timestamp < trans: + break + idx += 1 + else: + return self._ttinfo_std + if idx == 0: + return self._ttinfo_before + if laststd: + while idx > 0: + tti = self._trans_idx[idx-1] + if not tti.isdst: + return tti + idx -= 1 + else: + return self._ttinfo_std + else: + return self._trans_idx[idx-1] + + def utcoffset(self, dt): + if not self._ttinfo_std: + return ZERO + return self._find_ttinfo(dt).delta + + def dst(self, dt): + if not self._ttinfo_dst: + return ZERO + tti = self._find_ttinfo(dt) + if not tti.isdst: + return ZERO + + # The documentation says that utcoffset()-dst() must + # be constant for every dt. + return tti.delta-self._find_ttinfo(dt, laststd=1).delta + + # An alternative for that would be: + # + # return self._ttinfo_dst.offset-self._ttinfo_std.offset + # + # However, this class stores historical changes in the + # dst offset, so I belive that this wouldn't be the right + # way to implement this. + + def tzname(self, dt): + if not self._ttinfo_std: + return None + return self._find_ttinfo(dt).abbr + + def __eq__(self, other): + if not isinstance(other, tzfile): + return False + return (self._trans_list == other._trans_list and + self._trans_idx == other._trans_idx and + self._ttinfo_list == other._ttinfo_list) + + def __ne__(self, other): + return not self.__eq__(other) + + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, `self._filename`) + + def __reduce__(self): + if not os.path.isfile(self._filename): + raise ValueError, "Unpickable %s class" % self.__class__.__name__ + return (self.__class__, (self._filename,)) + +class tzrange(datetime.tzinfo): + + def __init__(self, stdabbr, stdoffset=None, + dstabbr=None, dstoffset=None, + start=None, end=None): + global relativedelta + if not relativedelta: + from dateutil import relativedelta + self._std_abbr = stdabbr + self._dst_abbr = dstabbr + if stdoffset is not None: + self._std_offset = datetime.timedelta(seconds=stdoffset) + else: + self._std_offset = ZERO + if dstoffset is not None: + self._dst_offset = datetime.timedelta(seconds=dstoffset) + elif dstabbr and stdoffset is not None: + self._dst_offset = self._std_offset+datetime.timedelta(hours=+1) + else: + self._dst_offset = ZERO + if dstabbr and start is None: + self._start_delta = relativedelta.relativedelta( + hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) + else: + self._start_delta = start + if dstabbr and end is None: + self._end_delta = relativedelta.relativedelta( + hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) + else: + self._end_delta = end + + def utcoffset(self, dt): + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if self._isdst(dt): + return self._dst_offset-self._std_offset + else: + return ZERO + + def tzname(self, dt): + if self._isdst(dt): + return self._dst_abbr + else: + return self._std_abbr + + def _isdst(self, dt): + if not self._start_delta: + return False + year = datetime.datetime(dt.year,1,1) + start = year+self._start_delta + end = year+self._end_delta + dt = dt.replace(tzinfo=None) + if start < end: + return dt >= start and dt < end + else: + return dt >= start or dt < end + + def __eq__(self, other): + if not isinstance(other, tzrange): + return False + return (self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr and + self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._start_delta == other._start_delta and + self._end_delta == other._end_delta) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s(...)" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + +class tzstr(tzrange): + + def __init__(self, s): + global parser + if not parser: + from dateutil import parser + self._s = s + + res = parser._parsetz(s) + if res is None: + raise ValueError, "unknown string format" + + # Here we break the compatibility with the TZ variable handling. + # GMT-3 actually *means* the timezone -3. + if res.stdabbr in ("GMT", "UTC"): + res.stdoffset *= -1 + + # We must initialize it first, since _delta() needs + # _std_offset and _dst_offset set. Use False in start/end + # to avoid building it two times. + tzrange.__init__(self, res.stdabbr, res.stdoffset, + res.dstabbr, res.dstoffset, + start=False, end=False) + + if not res.dstabbr: + self._start_delta = None + self._end_delta = None + else: + self._start_delta = self._delta(res.start) + if self._start_delta: + self._end_delta = self._delta(res.end, isend=1) + + def _delta(self, x, isend=0): + kwargs = {} + if x.month is not None: + kwargs["month"] = x.month + if x.weekday is not None: + kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) + if x.week > 0: + kwargs["day"] = 1 + else: + kwargs["day"] = 31 + elif x.day: + kwargs["day"] = x.day + elif x.yday is not None: + kwargs["yearday"] = x.yday + elif x.jyday is not None: + kwargs["nlyearday"] = x.jyday + if not kwargs: + # Default is to start on first sunday of april, and end + # on last sunday of october. + if not isend: + kwargs["month"] = 4 + kwargs["day"] = 1 + kwargs["weekday"] = relativedelta.SU(+1) + else: + kwargs["month"] = 10 + kwargs["day"] = 31 + kwargs["weekday"] = relativedelta.SU(-1) + if x.time is not None: + kwargs["seconds"] = x.time + else: + # Default is 2AM. + kwargs["seconds"] = 7200 + if isend: + # Convert to standard time, to follow the documented way + # of working with the extra hour. See the documentation + # of the tzinfo class. + delta = self._dst_offset-self._std_offset + kwargs["seconds"] -= delta.seconds+delta.days*86400 + return relativedelta.relativedelta(**kwargs) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, `self._s`) + +class _tzicalvtzcomp: + def __init__(self, tzoffsetfrom, tzoffsetto, isdst, + tzname=None, rrule=None): + self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) + self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) + self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom + self.isdst = isdst + self.tzname = tzname + self.rrule = rrule + +class _tzicalvtz(datetime.tzinfo): + def __init__(self, tzid, comps=[]): + self._tzid = tzid + self._comps = comps + self._cachedate = [] + self._cachecomp = [] + + def _find_comp(self, dt): + if len(self._comps) == 1: + return self._comps[0] + dt = dt.replace(tzinfo=None) + try: + return self._cachecomp[self._cachedate.index(dt)] + except ValueError: + pass + lastcomp = None + lastcompdt = None + for comp in self._comps: + if not comp.isdst: + # Handle the extra hour in DST -> STD + compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True) + else: + compdt = comp.rrule.before(dt, inc=True) + if compdt and (not lastcompdt or lastcompdt < compdt): + lastcompdt = compdt + lastcomp = comp + if not lastcomp: + # RFC says nothing about what to do when a given + # time is before the first onset date. We'll look for the + # first standard component, or the first component, if + # none is found. + for comp in self._comps: + if not comp.isdst: + lastcomp = comp + break + else: + lastcomp = comp[0] + self._cachedate.insert(0, dt) + self._cachecomp.insert(0, lastcomp) + if len(self._cachedate) > 10: + self._cachedate.pop() + self._cachecomp.pop() + return lastcomp + + def utcoffset(self, dt): + return self._find_comp(dt).tzoffsetto + + def dst(self, dt): + comp = self._find_comp(dt) + if comp.isdst: + return comp.tzoffsetdiff + else: + return ZERO + + def tzname(self, dt): + return self._find_comp(dt).tzname + + def __repr__(self): + return "<tzicalvtz %s>" % `self._tzid` + + __reduce__ = object.__reduce__ + +class tzical: + def __init__(self, fileobj): + global rrule + if not rrule: + from dateutil import rrule + + if isinstance(fileobj, basestring): + self._s = fileobj + fileobj = open(fileobj) + elif hasattr(fileobj, "name"): + self._s = fileobj.name + else: + self._s = `fileobj` + + self._vtz = {} + + self._parse_rfc(fileobj.read()) + + def keys(self): + return self._vtz.keys() + + def get(self, tzid=None): + if tzid is None: + keys = self._vtz.keys() + if len(keys) == 0: + raise ValueError, "no timezones defined" + elif len(keys) > 1: + raise ValueError, "more than one timezone available" + tzid = keys[0] + return self._vtz.get(tzid) + + def _parse_offset(self, s): + s = s.strip() + if not s: + raise ValueError, "empty offset" + if s[0] in ('+', '-'): + signal = (-1,+1)[s[0]=='+'] + s = s[1:] + else: + signal = +1 + if len(s) == 4: + return (int(s[:2])*3600+int(s[2:])*60)*signal + elif len(s) == 6: + return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal + else: + raise ValueError, "invalid offset: "+s + + def _parse_rfc(self, s): + lines = s.splitlines() + if not lines: + raise ValueError, "empty string" + + # Unfold + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] in (" ", "\t"): + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + + tzid = None + comps = [] + invtz = False + comptype = None + for line in lines: + if not line: + continue + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError, "empty property name" + name = parms[0].upper() + parms = parms[1:] + if invtz: + if name == "BEGIN": + if value in ("STANDARD", "DAYLIGHT"): + # Process component + pass + else: + raise ValueError, "unknown component: "+value + comptype = value + founddtstart = False + tzoffsetfrom = None + tzoffsetto = None + rrulelines = [] + tzname = None + elif name == "END": + if value == "VTIMEZONE": + if comptype: + raise ValueError, \ + "component not closed: "+comptype + if not tzid: + raise ValueError, \ + "mandatory TZID not found" + if not comps: + raise ValueError, \ + "at least one component is needed" + # Process vtimezone + self._vtz[tzid] = _tzicalvtz(tzid, comps) + invtz = False + elif value == comptype: + if not founddtstart: + raise ValueError, \ + "mandatory DTSTART not found" + if tzoffsetfrom is None: + raise ValueError, \ + "mandatory TZOFFSETFROM not found" + if tzoffsetto is None: + raise ValueError, \ + "mandatory TZOFFSETFROM not found" + # Process component + rr = None + if rrulelines: + rr = rrule.rrulestr("\n".join(rrulelines), + compatible=True, + ignoretz=True, + cache=True) + comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, + (comptype == "DAYLIGHT"), + tzname, rr) + comps.append(comp) + comptype = None + else: + raise ValueError, \ + "invalid component end: "+value + elif comptype: + if name == "DTSTART": + rrulelines.append(line) + founddtstart = True + elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): + rrulelines.append(line) + elif name == "TZOFFSETFROM": + if parms: + raise ValueError, \ + "unsupported %s parm: %s "%(name, parms[0]) + tzoffsetfrom = self._parse_offset(value) + elif name == "TZOFFSETTO": + if parms: + raise ValueError, \ + "unsupported TZOFFSETTO parm: "+parms[0] + tzoffsetto = self._parse_offset(value) + elif name == "TZNAME": + if parms: + raise ValueError, \ + "unsupported TZNAME parm: "+parms[0] + tzname = value + elif name == "COMMENT": + pass + elif name.upper().startswith('X-'): + # Ignore experimental properties. + pass + else: + raise ValueError, "unsupported property: "+name + else: + if name == "TZID": + for p in parms: + if not p.upper().startswith('X-'): + raise ValueError, \ + "unsupported TZID parm: "+p + tzid = value + elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): + pass + elif name.upper().startswith('X-'): + # Ignore experimental properties. + pass + else: + raise ValueError, "unsupported property: "+name + elif name == "BEGIN" and value == "VTIMEZONE": + tzid = None + comps = [] + invtz = True + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, `self._s`) + +if sys.platform != "win32": + TZFILES = ["/etc/localtime", "localtime"] + TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"] +else: + TZFILES = [] + TZPATHS = [] + +def gettz(name=None): + tz = None + if not name: + try: + name = os.environ["TZ"] + except KeyError: + pass + if name is None or name == ":": + for filepath in TZFILES: + if not os.path.isabs(filepath): + filename = filepath + for path in TZPATHS: + filepath = os.path.join(path, filename) + if os.path.isfile(filepath): + break + else: + continue + if os.path.isfile(filepath): + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = tzlocal() + else: + if name.startswith(":"): + name = name[:-1] + if os.path.isabs(name): + if os.path.isfile(name): + tz = tzfile(name) + else: + tz = None + else: + for path in TZPATHS: + filepath = os.path.join(path, name) + if not os.path.isfile(filepath): + filepath = filepath.replace(' ','_') + if not os.path.isfile(filepath): + continue + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = None + if tzwin: + try: + tz = tzwin(name) + except OSError: + pass + if not tz: + from dateutil.zoneinfo import gettz + tz = gettz(name) + if not tz: + for c in name: + # name must have at least one offset to be a tzstr + if c in "0123456789": + try: + tz = tzstr(name) + except ValueError: + pass + break + else: + if name in ("GMT", "UTC"): + tz = tzutc() + elif name in time.tzname: + tz = tzlocal() + return tz + +# vim:ts=4:sw=4:et diff --git a/lib/dateutil/tzwin.py b/lib/dateutil/tzwin.py new file mode 100644 index 00000000..073e0ff6 --- /dev/null +++ b/lib/dateutil/tzwin.py @@ -0,0 +1,180 @@ +# This code was originally contributed by Jeffrey Harris. +import datetime +import struct +import _winreg + +__author__ = "Jeffrey Harris & Gustavo Niemeyer <gustavo@niemeyer.net>" + +__all__ = ["tzwin", "tzwinlocal"] + +ONEWEEK = datetime.timedelta(7) + +TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" +TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" +TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + +def _settzkeyname(): + global TZKEYNAME + handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) + try: + _winreg.OpenKey(handle, TZKEYNAMENT).Close() + TZKEYNAME = TZKEYNAMENT + except WindowsError: + TZKEYNAME = TZKEYNAME9X + handle.Close() + +_settzkeyname() + +class tzwinbase(datetime.tzinfo): + """tzinfo class based on win32's timezones available in the registry.""" + + def utcoffset(self, dt): + if self._isdst(dt): + return datetime.timedelta(minutes=self._dstoffset) + else: + return datetime.timedelta(minutes=self._stdoffset) + + def dst(self, dt): + if self._isdst(dt): + minutes = self._dstoffset - self._stdoffset + return datetime.timedelta(minutes=minutes) + else: + return datetime.timedelta(0) + + def tzname(self, dt): + if self._isdst(dt): + return self._dstname + else: + return self._stdname + + def list(): + """Return a list of all time zones known to the system.""" + handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) + tzkey = _winreg.OpenKey(handle, TZKEYNAME) + result = [_winreg.EnumKey(tzkey, i) + for i in range(_winreg.QueryInfoKey(tzkey)[0])] + tzkey.Close() + handle.Close() + return result + list = staticmethod(list) + + def display(self): + return self._display + + def _isdst(self, dt): + dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek, + self._dsthour, self._dstminute, + self._dstweeknumber) + dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek, + self._stdhour, self._stdminute, + self._stdweeknumber) + if dston < dstoff: + return dston <= dt.replace(tzinfo=None) < dstoff + else: + return not dstoff <= dt.replace(tzinfo=None) < dston + + +class tzwin(tzwinbase): + + def __init__(self, name): + self._name = name + + handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) + tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name)) + keydict = valuestodict(tzkey) + tzkey.Close() + handle.Close() + + self._stdname = keydict["Std"].encode("iso-8859-1") + self._dstname = keydict["Dlt"].encode("iso-8859-1") + + self._display = keydict["Display"] + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=3l16h", keydict["TZI"]) + self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 + self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1 + + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[4:9] + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[12:17] + + def __repr__(self): + return "tzwin(%s)" % repr(self._name) + + def __reduce__(self): + return (self.__class__, (self._name,)) + + +class tzwinlocal(tzwinbase): + + def __init__(self): + + handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) + + tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME) + keydict = valuestodict(tzlocalkey) + tzlocalkey.Close() + + self._stdname = keydict["StandardName"].encode("iso-8859-1") + self._dstname = keydict["DaylightName"].encode("iso-8859-1") + + try: + tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname)) + _keydict = valuestodict(tzkey) + self._display = _keydict["Display"] + tzkey.Close() + except OSError: + self._display = None + + handle.Close() + + self._stdoffset = -keydict["Bias"]-keydict["StandardBias"] + self._dstoffset = self._stdoffset-keydict["DaylightBias"] + + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=8h", keydict["StandardStart"]) + + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[1:6] + + tup = struct.unpack("=8h", keydict["DaylightStart"]) + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[1:6] + + def __reduce__(self): + return (self.__class__, ()) + +def picknthweekday(year, month, dayofweek, hour, minute, whichweek): + """dayofweek == 0 means Sunday, whichweek 5 means last instance""" + first = datetime.datetime(year, month, 1, hour, minute) + weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1)) + for n in xrange(whichweek): + dt = weekdayone+(whichweek-n)*ONEWEEK + if dt.month == month: + return dt + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dict = {} + size = _winreg.QueryInfoKey(key)[1] + for i in range(size): + data = _winreg.EnumValue(key, i) + dict[data[0]] = data[1] + return dict diff --git a/lib/dateutil/zoneinfo/__init__.py b/lib/dateutil/zoneinfo/__init__.py new file mode 100644 index 00000000..cc326064 --- /dev/null +++ b/lib/dateutil/zoneinfo/__init__.py @@ -0,0 +1,85 @@ +""" +Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net> + +This module offers extensions to the standard python 2.3+ +datetime module. +""" +from dateutil.tz import tzfile +from tarfile import TarFile +import os + +__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>" +__license__ = "PSF License" + +__all__ = ["setcachesize", "gettz", "rebuild"] + +CACHE = {} + +class tzfile(tzfile): + def __reduce__(self): + return (gettz, (self._filename,)) + +def getzoneinfofile(): + filenames = os.listdir(os.path.join(os.path.dirname(__file__))) + filenames.sort() + filenames.reverse() + for entry in filenames: + if entry.startswith("zoneinfo") and ".tar." in entry: + return os.path.join(os.path.dirname(__file__), entry) + return None + +def buildcache(): + global CACHE + zoneinfofile = getzoneinfofile() + if zoneinfofile: + tf = TarFile.open(zoneinfofile) + try: + for tarinfo in tf.getmembers(): + if tarinfo.islnk() or tarinfo.isfile(): + zonefile = tf.extractfile(tarinfo) + CACHE[tarinfo.name] = tzfile(zonefile) + finally: + tf.close() + +buildcache() + +del getzoneinfofile +del buildcache + +def setcachesize(_): + # Since the cache now eagerly initialized at + # import time, there's no point in controlling + # its size. + pass + +def gettz(name): + return CACHE.get(name) + +def rebuild(filename, tag=None, format="gz"): + import tempfile, shutil + tmpdir = tempfile.mkdtemp() + zonedir = os.path.join(tmpdir, "zoneinfo") + moduledir = os.path.dirname(__file__) + if tag: tag = "-"+tag + targetname = "zoneinfo%s.tar.%s" % (tag, format) + try: + tf = TarFile.open(filename) + for name in tf.getnames(): + if not (name.endswith(".sh") or + name.endswith(".tab") or + name == "leapseconds"): + tf.extract(name, tmpdir) + filepath = os.path.join(tmpdir, name) + os.system("zic -d %s %s" % (zonedir, filepath)) + tf.close() + target = os.path.join(moduledir, targetname) + for entry in os.listdir(moduledir): + if entry.startswith("zoneinfo") and ".tar." in entry: + os.unlink(os.path.join(moduledir, entry)) + tf = TarFile.open(target, "w:%s" % format) + for entry in os.listdir(zonedir): + entrypath = os.path.join(zonedir, entry) + tf.add(entrypath, entry) + tf.close() + finally: + shutil.rmtree(tmpdir) diff --git a/lib/dateutil/zoneinfo/zoneinfo-2012c.tar.gz b/lib/dateutil/zoneinfo/zoneinfo-2012c.tar.gz new file mode 100644 index 00000000..641f903a Binary files /dev/null and b/lib/dateutil/zoneinfo/zoneinfo-2012c.tar.gz differ diff --git a/lib/dns/LICENSE b/lib/dns/LICENSE deleted file mode 100644 index 390a726d..00000000 --- a/lib/dns/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -ISC License - -Copyright (C) Dnspython Contributors - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all -copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL -WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL -DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR -PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - - - -Copyright (C) 2001-2017 Nominum, Inc. -Copyright (C) Google Inc. - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose with or without fee is hereby granted, -provided that the above copyright notice and this permission notice -appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/lib/dns/README.md b/lib/dns/README.md deleted file mode 100644 index 9c1e2d80..00000000 --- a/lib/dns/README.md +++ /dev/null @@ -1,665 +0,0 @@ -# dnspython - -[![Build Status](https://travis-ci.org/rthalley/dnspython.svg?branch=master)](https://travis-ci.org/rthalley/dnspython) - -## INTRODUCTION - -dnspython is a DNS toolkit for Python. It supports almost all record types. It -can be used for queries, zone transfers, and dynamic updates. It supports TSIG -authenticated messages and EDNS0. - -dnspython provides both high and low level access to DNS. The high level classes -perform queries for data of a given name, type, and class, and return an answer -set. The low level classes allow direct manipulation of DNS zones, messages, -names, and records. - -To see a few of the ways dnspython can be used, look in the `examples/` directory. - -dnspython is a utility to work with DNS, `/etc/hosts` is thus not used. For -simple forward DNS lookups, it's better to use `socket.gethostbyname()`. - -dnspython originated at Nominum where it was developed -to facilitate the testing of DNS software. - -## INSTALLATION - -* Many distributions have dnspython packaged for you, so you should - check there first. -* If you have pip installed, you can do `pip install dnspython` -* If not just download the source file and unzip it, then run - `sudo python setup.py install` - -## ABOUT THIS RELEASE - -This is dnspython 1.16.0 - -### Notices - -Python 2.x support ends with the release of 1.16.0, unless there are -critical bugs in 1.16.0. Future versions of dnspython will only -support Python 3. - -Version numbering of future dnspython releases will also start at 2.0, as -incompatible changes will be permitted. We're not planning huge changes at -this time, but we'd like to do a better job at IDNA, and there are other -API improvements to be made. - -The ChangeLog has been discontinued. Please see the git history for detailed -change information. - -### New since 1.15.0: - -* Much of the internals of dns.query.udp() and dns.query.tcp() have - been factored out into dns.query.send_udp(), - dns.query.receive_udp(), dns.query.send_tcp(), and - dns.query.receive_tcp(). Applications which want more control over - the socket may find the new routines helpful; for example it would - be easy to send multiple queries over a single TCP connection. - -* The OPENPGPKEY RR, and the CHAOS class A RR are now supported. - -* EDNS0 client-subnet is supported. - -* dns.resover.query() now has a lifetime timeout optional parameter. - -* pycryptodome and pycryptodomex are now supported and recommended for use - instead of pycrypto. - -* dns.message.from_wire() now has an ignore_trailing option. - -* type signatures have been provided. - -* module dns.hash is now deprecated, use standard Python libraries instead. - -* setup.py supports Cythonization to improve performance. - -### Bugs fixed since 1.15.0: - -* DNSSEC signature validation didn't check names correctly. [Issue #295] - -* The NXDOMAIN exception should not use its docstring. [Issue #253] - -* Fixed regression where trailing zeros in APL RRs were not - suppressed, and then fixed the problem where trailing zeros - were not added back properly on python 3 when needed. - -* Masterfile TTL defaulting is now harmonized with BIND practice. - -* dns.query.xfr() now raises on a non-zero rcode. - -* Rdata module importing is now locked to avoid races. - -* Several Python 3 incompatibilities have been fixed. - -* NSEC3 bitmap parsing now works with mulitple NSEC3 windows. - -* dns.renderer.Render supports TSIG on DNS envelope sequences. - -* DNSSEC validation now checks names properly [Issue #295] - -### New since 1.14.0: - -* IDNA 2008 support is now available if the "idna" module has been - installed and IDNA 2008 is requested. The default IDNA behavior is - still IDNA 2003. The new IDNA codec mechanism is currently only - useful for direct calls to dns.name.from_text() or - dns.name.from_unicode(), but in future releases it will be deployed - throughout dnspython, e.g. so that you can read a masterfile with an - IDNA 2008 codec in force. - -* By default, dns.name.to_unicode() is not strict about which - version of IDNA the input complies with. Strictness can be - requested by using one of the strict IDNA codecs. - -* The AVC RR is now supported. - -### Bugs fixed since 1.14.0: - -* Some problems with newlines in various output modes have been - addressed. - -* dns.name.to_text() now returns text and not bytes on Python 3.x - -* Miscellaneous fixes for the Python 2/3 codeline merge. - -* Many "lint" fixes after the addition of pylint support. - -* The random number generator reseeds after a fork(). - - -## REQUIREMENTS - -Python 2.7 or 3.4+. - - -## HOME PAGE - -For the latest in releases, documentation, and information, visit the dnspython -home page at http://www.dnspython.org/ - - -## BUG REPORTS - -Bug reports may be opened at -https://github.com/rthalley/dnspython/issues or sent to -bugs@dnspython.org - - -## MAILING LISTS - -A number of mailing lists are available. Visit the dnspython home page to -subscribe or unsubscribe. - - -## PRIOR RELEASE INFORMATION - -### New since 1.13.0: - -* CSYNC RRs are now supported. - -* `dns/message.py` (`make_query`): Setting any value which implies EDNS will - turn on EDNS if `use_edns` has not been specified. - -### Bugs fixed since 1.13.0: - -* TSIG signature algorithm setting was broken by the Python 2 and Python 3 code - line merge. - -* A bug in the LOC RR destroyed N/S and E/W distinctions within a degree of the - equator or prime merdian respectively. - -* Misc. fixes to deal with fallout from the Python 2 & 3 merge. - Fixes #156, #157, #158, #159, #160. - -* Running with python optimization on caused issues when stripped docstrings - were referenced. Fixes #154 - -* `dns.zone.from_text()` erroneously required the zone to be provided. - Fixes #153 - -### New since 1.12.0: - -* Dnspython now uses a single source for Python 2 and Python 3, eliminating the - painful merging between the Python 2 and Python 3 branches. Thank you so much - to Arthur Gautier for taking on this challenge and making it work! It was a - big job! - -* Support for Python older than 2.6 dropped. - -* Support for Python older than 3.3 dropped. - -* Zone origin can be specified as a string. - -* A rich string representation for all DNSExceptions. - -* setuptools has replaced distutils - -* Added support for CAA, CDS, CDNSKEY, EUI48, EUI64, and URI RR types. - -* Names now support the pickle protocol. - -* Ports can be specified per-nameserver in the stub resolver. - -### Bugs fixed since 1.12.0: - -* A number of Unicode name bugs have been fixed. - -* `resolv.conf` processing now rejects lines with too few tokens. - -* NameDicts now keep the max-depth value correct, and update properly. - -### New since 1.11.1: - -* Added `dns.zone.to_text()`. - -* Added support for "options rotate" in `/etc/resolv.conf`. - -* `dns.rdtypes.ANY.DNSKEY` now has helpers functions to convert between the - numeric form of the flags and a set of human-friendly strings - -* The reverse name of an IPv6 mapped IPv4 address is now in the IPv4 reverse - namespace. - -* The test system can now run the tests without requiring dnspython to be - installed. - -* Preliminary Elliptic Curve DNSSEC Validation (requires ecdsa module) - -### Bugs fixed since 1.11.1: - -* dnspython raised an exception when reading a masterfile starting with leading - whitespace - -* dnspython was affected by a python slicing API bug present on 64-bit windows. - -* Unicode escaping was applied at the wrong time. - -* RRSIG `to_text()` did not respect the relativize setting. - -* APL RRs with zero rdlength were rejected. - -* The tokenizer could put back an unescaped token. - -* Making a response to a message signed with TSIG was broken. - -* The IXFR state machine didn't handle long IXFR diffs. - -### New since 1.11.0: - -* Nothing - -### Bugs fixed since 1.11.0: - -* `dns.resolver.Resolver` erroneously referred to `retry_servfail` - instead of `self.retry_servfail`. - -* `dns.tsigkeyring.to_text()` would fail trying to convert the keyname to text. - -* Multi-message TSIGs were broken for algorithms other than HMAC-MD5 because we - weren't passing the right digest module to the HMAC code. - -* `dns.dnssec._find_candidate_keys()` tried to extract the key from the wrong - variable name. - -* $GENERATE tests were not backward compatible with python 2.4. - -### New since 1.10.0: - -* $GENERATE support - -* TLSA RR support - -* Added set_flags() method to dns.resolver.Resolver - -### Bugs fixed since 1.10.0: - -* Names with offsets >= 2^14 are no longer added to the compression table. - -* The "::" syntax is not used to shorten a single 16-bit section of the text - form an IPv6 address. - -* Caches are now locked. - -* YXDOMAIN is raised if seen by the resolver. - -* Empty rdatasets are not printed. - -* DNSKEY key tags are no longer assumed to be unique. - -### New since 1.9.4: - -* Added dns.resolver.LRUCache. In this cache implementation, the cache size is - limited to a user-specified number of nodes, and when adding a new node to a - full cache the least-recently used node is removed. If you're crawling the web - or otherwise doing lots of resolutions and you are using a cache, switching - to the LRUCache is recommended. - -* `dns.resolver.query()` will try TCP if a UDP response is truncated. - -* The python socket module's DNS methods can be now be overridden with - implementations that use dnspython's resolver. - -* Old DNSSEC types KEY, NXT, and SIG have been removed. - -* Whitespace is allowed in SSHFP fingerprints. - -* Origin checking in `dns.zone.from_xfr()` can be disabled. - -* Trailing junk checking can be disabled. - -* A source port can be specified when creating a resolver query. - -* All EDNS values may now be specified to `dns.message.make_query()`. - -### Bugs fixed since 1.9.4: - -* IPv4 and IPv6 address processing is now stricter. - -* Bounds checking of slices in rdata wire processing is now more strict, and - bounds errors (e.g. we got less data than was expected) now raise - `dns.exception.FormError` rather than `IndexError`. - -* Specifying a source port without specifying source used to have no effect, but - now uses the wildcard address and the specified port. - -### New since 1.9.3: - -* Nothing. - -### Bugs fixed since 1.9.3: - -* The rdata `_wire_cmp()` routine now handles relative names. - -* The SIG RR implementation was missing `import struct`. - -### New since 1.9.2: - -* A boolean parameter, `raise_on_no_answer`, has been added to the `query()` - methods. In no-error, no-data situations, this parameter determines whether - `NoAnswer` should be raised or not. If True, `NoAnswer` is raised. If False, - then an `Answer()` object with a None rrset will be returned. - -* Resolver `Answer()` objects now have a canonical_name field. - -* Rdata now has a `__hash__` method. - -### Bugs fixed since 1.9.2: - -* Dnspython was erroneously doing case-insensitive comparisons of the names in - NSEC and RRSIG RRs. - -* We now use `is` and not `==` when testing what section an RR is in. - -* The resolver now disallows metaqueries. - -### New since 1.9.1: - -* Nothing. - -### Bugs fixed since 1.9.1: - -* The `dns.dnssec` module didn't work at all due to missing imports that escaped - detection in testing because the test suite also did the imports. The third - time is the charm! - -### New since 1.9.0: - -* Nothing. - -### Bugs fixed since 1.9.0: - -* The `dns.dnssec` module didn't work with DSA due to namespace contamination - from a "from"-style import. - -### New since 1.8.0: - -* dnspython now uses `poll()` instead of `select()` when available. - -* Basic DNSSEC validation can be done using `dns.dnsec.validate()` and - `dns.dnssec.validate_rrsig()` if you have PyCrypto 2.3 or later installed. - Complete secure resolution is not yet available. - -* Added `key_id()` to the DNSSEC module, which computes the DNSSEC key id of a - DNSKEY rdata. - -* Added `make_ds()` to the DNSSEC module, which returns the DS RR for a given - DNSKEY rdata. - -* dnspython now raises an exception if HMAC-SHA284 or HMAC-SHA512 are used with - a Python older than 2.5.2. (Older Pythons do not compute the correct value.) - -* Symbolic constants are now available for TSIG algorithm names. - -### Bugs fixed since 1.8.0 - -* `dns.resolver.zone_for_name()` didn't handle a query response with a CNAME or - DNAME correctly in some cases. - -* When specifying rdata types and classes as text, Unicode strings may now be - used. - -* Hashlib compatibility issues have been fixed. - -* `dns.message` now imports `dns.edns`. - -* The TSIG algorithm value was passed incorrectly to `use_tsig()` in some cases. - -### New since 1.7.1: - -* Support for hmac-sha1, hmac-sha224, hmac-sha256, hmac-sha384 and hmac-sha512 - has been contributed by Kevin Chen. - -* The tokenizer's tokens are now Token objects instead of (type, value) tuples. - -### Bugs fixed since 1.7.1: - -* Escapes in masterfiles now work correctly. Previously they were only working - correctly when the text involved was part of a domain name. - -* When constructing a DDNS update, if the `present()` method was used with a - single rdata, a zero TTL was not added. - -* The entropy pool needed locking to be thread safe. - -* The entropy pool's reading of `/dev/random` could cause dnspython to block. - -* The entropy pool did buffered reads, potentially consuming more randomness - than we needed. - -* The entropy pool did not seed with high quality randomness on Windows. - -* SRV records were compared incorrectly. - -* In the e164 query function, the resolver parameter was not used. - -### New since 1.7.0: - -* Nothing - -### Bugs fixed since 1.7.0: - -* The 1.7.0 kitting process inadvertently omitted the code for the DLV RR. - -* Negative DDNS prerequisites are now handled correctly. - -### New since 1.6.0: - -* Rdatas now have a `to_digestable()` method, which returns the DNSSEC canonical - form of the rdata, suitable for use in signature computations. - -* The NSEC3, NSEC3PARAM, DLV, and HIP RR types are now supported. - -* An entropy module has been added and is used to randomize query ids. - -* EDNS0 options are now supported. - -* UDP IXFR is now supported. - -* The wire format parser now has a `one_rr_per_rrset` mode, which suppresses the - usual coalescing of all RRs of a given type into a single RRset. - -* Various helpful DNSSEC-related constants are now defined. - -* The resolver's `query()` method now has an optional `source` parameter, - allowing the source IP address to be specified. - -### Bugs fixed since 1.6.0: - -* On Windows, the resolver set the domain incorrectly. - -* DS RR parsing only allowed one Base64 chunk. - -* TSIG validation didn't always use absolute names. - -* `NSEC.to_text()` only printed the last window. - -* We did not canonicalize IPv6 addresses before comparing them; we - would thus treat equivalent but different textual forms, e.g. - "1:00::1" and "1::1" as being non-equivalent. - -* If the peer set a TSIG error, we didn't raise an exception. - -* Some EDNS bugs in the message code have been fixed (see the ChangeLog - for details). - -### New since 1.5.0: - -* Added dns.inet.is_multicast(). - -### Bugs fixed since 1.5.0: - -* If `select()` raises an exception due to EINTR, we should just `select()` - again. - -* If the queried address is a multicast address, then don't check that the - address of the response is the same as the address queried. - -* NAPTR comparisons didn't compare the preference field due to a typo. - -* Testing of whether a Windows NIC is enabled now works on Vista thanks to code - contributed by Paul Marks. - -### New since 1.4.0: - -* Answer objects now support more of the python sequence protocol, forwarding - the requests to the answer rrset. E.g. `for a in answer` is equivalent to - `for a in answer.rrset`, `answer[i]` is equivalent to `answer.rrset[i]`, and - `answer[i:j]` is equivalent to `answer.rrset[i:j]`. - -* Making requests using EDNS, including indicating DNSSEC awareness, - is now easier. For example, you can now say: - `q = dns.message.make_query('www.dnspython.org', 'MX', want_dnssec=True)` - -* `dns.query.xfr()` can now be used for IXFR. - -* Support has been added for the DHCID, IPSECKEY, and SPF RR types. - -* UDP messages from unexpected sources can now be ignored by setting - `ignore_unexpected` to True when calling `dns.query.udp`. - -### Bugs fixed since 1.4.0: - -* If `/etc/resolv.conf` didn't exist, we raised an exception instead of simply - using the default resolver configuration. - -* In `dns.resolver.Resolver._config_win32_fromkey()`, we were passing the wrong - variable to `self._config_win32_search()`. - -### New since 1.3.5: - -* You can now convert E.164 numbers to/from their ENUM name forms: - ```python - >>> import dns.e164 - >>> n = dns.e164.from_e164("+1 555 1212") - >>> n - <DNS name 2.1.2.1.5.5.5.1.e164.arpa.> - >>> dns.e164.to_e164(n) - '+15551212' - ``` - -* You can now convert IPv4 and IPv6 address to/from their corresponding DNS - reverse map names: - ```python - >>> import dns.reversename - >>> n = dns.reversename.from_address("127.0.0.1") - >>> n - <DNS name 1.0.0.127.in-addr.arpa.> - >>> dns.reversename.to_address(n) - '127.0.0.1' - ``` - -* You can now convert between Unicode strings and their IDN ACE form: - ```python - >>> n = dns.name.from_text(u'les-\u00e9l\u00e8ves.example.') - >>> n - <DNS name xn--les-lves-50ai.example.> - >>> n.to_unicode() - u'les-\xe9l\xe8ves.example.' - ``` - -* The origin parameter to `dns.zone.from_text()` and `dns.zone.to_text()` is now - optional. If not specified, the origin will be taken from the first $ORIGIN - statement in the master file. - -* Sanity checking of a zone can be disabled; this is useful when working with - files which are zone fragments. - -### Bugs fixed since 1.3.5: - -* The correct delimiter was not used when retrieving the list of nameservers - from the registry in certain versions of windows. - -* The floating-point version of latitude and longitude in LOC RRs - (`float_latitude` and `float_longitude`) had incorrect signs for south - latitudes and west longitudes. - -* BIND 8 TTL syntax is now accepted in all TTL-like places (i.e. SOA fields - refresh, retry, expire, and minimum; SIG/RRSIG field original_ttl). - -* TTLs are now bounds checked when their text form is parsed, and their values - must be in the closed interval `[0, 2^31 - 1]`. - -### New since 1.3.4: - -* In the resolver, if time goes backward a little bit, ignore it. - -* `zone_for_name()` has been added to the resolver module. It returns the zone - which is authoritative for the specified name, which is handy for dynamic - update. E.g. - - import dns.resolver - print dns.resolver.zone_for_name('www.dnspython.org') - - will output `"dnspython.org."` and - `print dns.resolver.zone_for_name('a.b.c.d.e.f.example.')` - will output `"."`. - -* The default resolver can be fetched with the `get_default_resolver()` method. - -* You can now get the parent (immediate superdomain) of a name by using the - `parent()` method. - -* `Zone.iterate_rdatasets()` and `Zone.iterate_rdatas()` now have a default - rdtype of `dns.rdatatype.ANY` like the documentation says. - -* A Dynamic DNS example, ddns.py, has been added. - -### New since 1.3.3: - -* The source address and port may now be specified when calling - `dns.query.{udp,tcp,xfr}`. - -* The resolver now does exponential backoff each time it runs through all of the - nameservers. - -* Rcodes which indicate a nameserver is likely to be a "permanent failure" for a - query cause the nameserver to be removed from the mix for that query. - -### New since 1.3.2: - -* `dns.message.Message.find_rrset()` now uses an index, vastly improving the - `from_wire()` performance of large messages such as zone transfers. - -* Added `dns.message.make_response()`, which creates a skeletal response for the - specified query. - -* Added `opcode()` and `set_opcode()` convenience methods to the - `dns.message.Message` class. Added the `request_payload` attribute to the - Message class. - -* The `file` parameter of `dns.name.Name.to_wire()` is now optional; if omitted, - the wire form will be returned as the value of the function. - -* `dns.zone.from_xfr()` in relativization mode incorrectly set `zone.origin` to - the empty name. - -* The masterfile parser incorrectly rejected TXT records where a value was not - quoted. - -### New since 1.3.1: - -* The NSEC format doesn't allow specifying types by number, so we shouldn't - either. (Using the unknown type format is still OK though.) - -* The resolver wasn't catching `dns.exception.Timeout`, so a timeout erroneously - caused the whole resolution to fail instead of just going on to the next - server. - -* The renderer module didn't import random, causing an exception to be raised if - a query id wasn't provided when a Renderer was created. - -* The conversion of LOC milliseconds values from text to binary was incorrect if - the length of the milliseconds string was not 3. - -### New since 1.3.0: - -* Added support for the SSHFP type. - -### New since 1.2.0: - -* Added support for new DNSSEC types RRSIG, NSEC, and DNSKEY. - -* This release fixes all known bugs. - -* See the ChangeLog file for more detailed information on changes since the - prior release. diff --git a/lib/dns/__init__.py b/lib/dns/__init__.py deleted file mode 100644 index 04143f03..00000000 --- a/lib/dns/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009, 2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -# PROJECT URL -# https://github.com/rthalley/dnspython - -"""dnspython DNS toolkit""" - -__all__ = [ - 'dnssec', - 'e164', - 'edns', - 'entropy', - 'exception', - 'flags', - 'hash', - 'inet', - 'ipv4', - 'ipv6', - 'message', - 'name', - 'namedict', - 'node', - 'opcode', - 'query', - 'rcode', - 'rdata', - 'rdataclass', - 'rdataset', - 'rdatatype', - 'renderer', - 'resolver', - 'reversename', - 'rrset', - 'set', - 'tokenizer', - 'tsig', - 'tsigkeyring', - 'ttl', - 'rdtypes', - 'update', - 'version', - 'wiredata', - 'zone', -] diff --git a/lib/dns/_compat.py b/lib/dns/_compat.py deleted file mode 100644 index ca0931c2..00000000 --- a/lib/dns/_compat.py +++ /dev/null @@ -1,59 +0,0 @@ -import sys -import decimal -from decimal import Context - -PY3 = sys.version_info[0] == 3 -PY2 = sys.version_info[0] == 2 - - -if PY3: - long = int - xrange = range -else: - long = long # pylint: disable=long-builtin - xrange = xrange # pylint: disable=xrange-builtin - -# unicode / binary types -if PY3: - text_type = str - binary_type = bytes - string_types = (str,) - unichr = chr - def maybe_decode(x): - return x.decode() - def maybe_encode(x): - return x.encode() - def maybe_chr(x): - return x - def maybe_ord(x): - return x -else: - text_type = unicode # pylint: disable=unicode-builtin, undefined-variable - binary_type = str - string_types = ( - basestring, # pylint: disable=basestring-builtin, undefined-variable - ) - unichr = unichr # pylint: disable=unichr-builtin - def maybe_decode(x): - return x - def maybe_encode(x): - return x - def maybe_chr(x): - return chr(x) - def maybe_ord(x): - return ord(x) - - -def round_py2_compat(what): - """ - Python 2 and Python 3 use different rounding strategies in round(). This - function ensures that results are python2/3 compatible and backward - compatible with previous py2 releases - :param what: float - :return: rounded long - """ - d = Context( - prec=len(str(long(what))), # round to integer with max precision - rounding=decimal.ROUND_HALF_UP - ).create_decimal(str(what)) # str(): python 2.6 compat - return long(d) diff --git a/lib/dns/dnssec.py b/lib/dns/dnssec.py deleted file mode 100644 index 35da6b5a..00000000 --- a/lib/dns/dnssec.py +++ /dev/null @@ -1,519 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Common DNSSEC-related functions and constants.""" - -from io import BytesIO -import struct -import time - -import dns.exception -import dns.name -import dns.node -import dns.rdataset -import dns.rdata -import dns.rdatatype -import dns.rdataclass -from ._compat import string_types - - -class UnsupportedAlgorithm(dns.exception.DNSException): - """The DNSSEC algorithm is not supported.""" - - -class ValidationFailure(dns.exception.DNSException): - """The DNSSEC signature is invalid.""" - - -#: RSAMD5 -RSAMD5 = 1 -#: DH -DH = 2 -#: DSA -DSA = 3 -#: ECC -ECC = 4 -#: RSASHA1 -RSASHA1 = 5 -#: DSANSEC3SHA1 -DSANSEC3SHA1 = 6 -#: RSASHA1NSEC3SHA1 -RSASHA1NSEC3SHA1 = 7 -#: RSASHA256 -RSASHA256 = 8 -#: RSASHA512 -RSASHA512 = 10 -#: ECDSAP256SHA256 -ECDSAP256SHA256 = 13 -#: ECDSAP384SHA384 -ECDSAP384SHA384 = 14 -#: INDIRECT -INDIRECT = 252 -#: PRIVATEDNS -PRIVATEDNS = 253 -#: PRIVATEOID -PRIVATEOID = 254 - -_algorithm_by_text = { - 'RSAMD5': RSAMD5, - 'DH': DH, - 'DSA': DSA, - 'ECC': ECC, - 'RSASHA1': RSASHA1, - 'DSANSEC3SHA1': DSANSEC3SHA1, - 'RSASHA1NSEC3SHA1': RSASHA1NSEC3SHA1, - 'RSASHA256': RSASHA256, - 'RSASHA512': RSASHA512, - 'INDIRECT': INDIRECT, - 'ECDSAP256SHA256': ECDSAP256SHA256, - 'ECDSAP384SHA384': ECDSAP384SHA384, - 'PRIVATEDNS': PRIVATEDNS, - 'PRIVATEOID': PRIVATEOID, -} - -# We construct the inverse mapping programmatically to ensure that we -# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that -# would cause the mapping not to be true inverse. - -_algorithm_by_value = {y: x for x, y in _algorithm_by_text.items()} - - -def algorithm_from_text(text): - """Convert text into a DNSSEC algorithm value. - - Returns an ``int``. - """ - - value = _algorithm_by_text.get(text.upper()) - if value is None: - value = int(text) - return value - - -def algorithm_to_text(value): - """Convert a DNSSEC algorithm value to text - - Returns a ``str``. - """ - - text = _algorithm_by_value.get(value) - if text is None: - text = str(value) - return text - - -def _to_rdata(record, origin): - s = BytesIO() - record.to_wire(s, origin=origin) - return s.getvalue() - - -def key_id(key, origin=None): - """Return the key id (a 16-bit number) for the specified key. - - Note the *origin* parameter of this function is historical and - is not needed. - - Returns an ``int`` between 0 and 65535. - """ - - rdata = _to_rdata(key, origin) - rdata = bytearray(rdata) - if key.algorithm == RSAMD5: - return (rdata[-3] << 8) + rdata[-2] - else: - total = 0 - for i in range(len(rdata) // 2): - total += (rdata[2 * i] << 8) + \ - rdata[2 * i + 1] - if len(rdata) % 2 != 0: - total += rdata[len(rdata) - 1] << 8 - total += ((total >> 16) & 0xffff) - return total & 0xffff - - -def make_ds(name, key, algorithm, origin=None): - """Create a DS record for a DNSSEC key. - - *name* is the owner name of the DS record. - - *key* is a ``dns.rdtypes.ANY.DNSKEY``. - - *algorithm* is a string describing which hash algorithm to use. The - currently supported hashes are "SHA1" and "SHA256". Case does not - matter for these strings. - - *origin* is a ``dns.name.Name`` and will be used as the origin - if *key* is a relative name. - - Returns a ``dns.rdtypes.ANY.DS``. - """ - - if algorithm.upper() == 'SHA1': - dsalg = 1 - hash = SHA1.new() - elif algorithm.upper() == 'SHA256': - dsalg = 2 - hash = SHA256.new() - else: - raise UnsupportedAlgorithm('unsupported algorithm "%s"' % algorithm) - - if isinstance(name, string_types): - name = dns.name.from_text(name, origin) - hash.update(name.canonicalize().to_wire()) - hash.update(_to_rdata(key, origin)) - digest = hash.digest() - - dsrdata = struct.pack("!HBB", key_id(key), key.algorithm, dsalg) + digest - return dns.rdata.from_wire(dns.rdataclass.IN, dns.rdatatype.DS, dsrdata, 0, - len(dsrdata)) - - -def _find_candidate_keys(keys, rrsig): - candidate_keys = [] - value = keys.get(rrsig.signer) - if value is None: - return None - if isinstance(value, dns.node.Node): - try: - rdataset = value.find_rdataset(dns.rdataclass.IN, - dns.rdatatype.DNSKEY) - except KeyError: - return None - else: - rdataset = value - for rdata in rdataset: - if rdata.algorithm == rrsig.algorithm and \ - key_id(rdata) == rrsig.key_tag: - candidate_keys.append(rdata) - return candidate_keys - - -def _is_rsa(algorithm): - return algorithm in (RSAMD5, RSASHA1, - RSASHA1NSEC3SHA1, RSASHA256, - RSASHA512) - - -def _is_dsa(algorithm): - return algorithm in (DSA, DSANSEC3SHA1) - - -def _is_ecdsa(algorithm): - return _have_ecdsa and (algorithm in (ECDSAP256SHA256, ECDSAP384SHA384)) - - -def _is_md5(algorithm): - return algorithm == RSAMD5 - - -def _is_sha1(algorithm): - return algorithm in (DSA, RSASHA1, - DSANSEC3SHA1, RSASHA1NSEC3SHA1) - - -def _is_sha256(algorithm): - return algorithm in (RSASHA256, ECDSAP256SHA256) - - -def _is_sha384(algorithm): - return algorithm == ECDSAP384SHA384 - - -def _is_sha512(algorithm): - return algorithm == RSASHA512 - - -def _make_hash(algorithm): - if _is_md5(algorithm): - return MD5.new() - if _is_sha1(algorithm): - return SHA1.new() - if _is_sha256(algorithm): - return SHA256.new() - if _is_sha384(algorithm): - return SHA384.new() - if _is_sha512(algorithm): - return SHA512.new() - raise ValidationFailure('unknown hash for algorithm %u' % algorithm) - - -def _make_algorithm_id(algorithm): - if _is_md5(algorithm): - oid = [0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05] - elif _is_sha1(algorithm): - oid = [0x2b, 0x0e, 0x03, 0x02, 0x1a] - elif _is_sha256(algorithm): - oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01] - elif _is_sha512(algorithm): - oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03] - else: - raise ValidationFailure('unknown algorithm %u' % algorithm) - olen = len(oid) - dlen = _make_hash(algorithm).digest_size - idbytes = [0x30] + [8 + olen + dlen] + \ - [0x30, olen + 4] + [0x06, olen] + oid + \ - [0x05, 0x00] + [0x04, dlen] - return struct.pack('!%dB' % len(idbytes), *idbytes) - - -def _validate_rrsig(rrset, rrsig, keys, origin=None, now=None): - """Validate an RRset against a single signature rdata - - The owner name of *rrsig* is assumed to be the same as the owner name - of *rrset*. - - *rrset* is the RRset to validate. It can be a ``dns.rrset.RRset`` or - a ``(dns.name.Name, dns.rdataset.Rdataset)`` tuple. - - *rrsig* is a ``dns.rdata.Rdata``, the signature to validate. - - *keys* is the key dictionary, used to find the DNSKEY associated with - a given name. The dictionary is keyed by a ``dns.name.Name``, and has - ``dns.node.Node`` or ``dns.rdataset.Rdataset`` values. - - *origin* is a ``dns.name.Name``, the origin to use for relative names. - - *now* is an ``int``, the time to use when validating the signatures, - in seconds since the UNIX epoch. The default is the current time. - """ - - if isinstance(origin, string_types): - origin = dns.name.from_text(origin, dns.name.root) - - candidate_keys = _find_candidate_keys(keys, rrsig) - if candidate_keys is None: - raise ValidationFailure('unknown key') - - for candidate_key in candidate_keys: - # For convenience, allow the rrset to be specified as a (name, - # rdataset) tuple as well as a proper rrset - if isinstance(rrset, tuple): - rrname = rrset[0] - rdataset = rrset[1] - else: - rrname = rrset.name - rdataset = rrset - - if now is None: - now = time.time() - if rrsig.expiration < now: - raise ValidationFailure('expired') - if rrsig.inception > now: - raise ValidationFailure('not yet valid') - - hash = _make_hash(rrsig.algorithm) - - if _is_rsa(rrsig.algorithm): - keyptr = candidate_key.key - (bytes_,) = struct.unpack('!B', keyptr[0:1]) - keyptr = keyptr[1:] - if bytes_ == 0: - (bytes_,) = struct.unpack('!H', keyptr[0:2]) - keyptr = keyptr[2:] - rsa_e = keyptr[0:bytes_] - rsa_n = keyptr[bytes_:] - try: - pubkey = CryptoRSA.construct( - (number.bytes_to_long(rsa_n), - number.bytes_to_long(rsa_e))) - except ValueError: - raise ValidationFailure('invalid public key') - sig = rrsig.signature - elif _is_dsa(rrsig.algorithm): - keyptr = candidate_key.key - (t,) = struct.unpack('!B', keyptr[0:1]) - keyptr = keyptr[1:] - octets = 64 + t * 8 - dsa_q = keyptr[0:20] - keyptr = keyptr[20:] - dsa_p = keyptr[0:octets] - keyptr = keyptr[octets:] - dsa_g = keyptr[0:octets] - keyptr = keyptr[octets:] - dsa_y = keyptr[0:octets] - pubkey = CryptoDSA.construct( - (number.bytes_to_long(dsa_y), - number.bytes_to_long(dsa_g), - number.bytes_to_long(dsa_p), - number.bytes_to_long(dsa_q))) - sig = rrsig.signature[1:] - elif _is_ecdsa(rrsig.algorithm): - # use ecdsa for NIST-384p -- not currently supported by pycryptodome - - keyptr = candidate_key.key - - if rrsig.algorithm == ECDSAP256SHA256: - curve = ecdsa.curves.NIST256p - key_len = 32 - elif rrsig.algorithm == ECDSAP384SHA384: - curve = ecdsa.curves.NIST384p - key_len = 48 - - x = number.bytes_to_long(keyptr[0:key_len]) - y = number.bytes_to_long(keyptr[key_len:key_len * 2]) - if not ecdsa.ecdsa.point_is_valid(curve.generator, x, y): - raise ValidationFailure('invalid ECDSA key') - point = ecdsa.ellipticcurve.Point(curve.curve, x, y, curve.order) - verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point, - curve) - pubkey = ECKeyWrapper(verifying_key, key_len) - r = rrsig.signature[:key_len] - s = rrsig.signature[key_len:] - sig = ecdsa.ecdsa.Signature(number.bytes_to_long(r), - number.bytes_to_long(s)) - - else: - raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm) - - hash.update(_to_rdata(rrsig, origin)[:18]) - hash.update(rrsig.signer.to_digestable(origin)) - - if rrsig.labels < len(rrname) - 1: - suffix = rrname.split(rrsig.labels + 1)[1] - rrname = dns.name.from_text('*', suffix) - rrnamebuf = rrname.to_digestable(origin) - rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass, - rrsig.original_ttl) - rrlist = sorted(rdataset) - for rr in rrlist: - hash.update(rrnamebuf) - hash.update(rrfixed) - rrdata = rr.to_digestable(origin) - rrlen = struct.pack('!H', len(rrdata)) - hash.update(rrlen) - hash.update(rrdata) - - try: - if _is_rsa(rrsig.algorithm): - verifier = pkcs1_15.new(pubkey) - # will raise ValueError if verify fails: - verifier.verify(hash, sig) - elif _is_dsa(rrsig.algorithm): - verifier = DSS.new(pubkey, 'fips-186-3') - verifier.verify(hash, sig) - elif _is_ecdsa(rrsig.algorithm): - digest = hash.digest() - if not pubkey.verify(digest, sig): - raise ValueError - else: - # Raise here for code clarity; this won't actually ever happen - # since if the algorithm is really unknown we'd already have - # raised an exception above - raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm) - # If we got here, we successfully verified so we can return without error - return - except ValueError: - # this happens on an individual validation failure - continue - # nothing verified -- raise failure: - raise ValidationFailure('verify failure') - - -def _validate(rrset, rrsigset, keys, origin=None, now=None): - """Validate an RRset. - - *rrset* is the RRset to validate. It can be a ``dns.rrset.RRset`` or - a ``(dns.name.Name, dns.rdataset.Rdataset)`` tuple. - - *rrsigset* is the signature RRset to be validated. It can be a - ``dns.rrset.RRset`` or a ``(dns.name.Name, dns.rdataset.Rdataset)`` tuple. - - *keys* is the key dictionary, used to find the DNSKEY associated with - a given name. The dictionary is keyed by a ``dns.name.Name``, and has - ``dns.node.Node`` or ``dns.rdataset.Rdataset`` values. - - *origin* is a ``dns.name.Name``, the origin to use for relative names. - - *now* is an ``int``, the time to use when validating the signatures, - in seconds since the UNIX epoch. The default is the current time. - """ - - if isinstance(origin, string_types): - origin = dns.name.from_text(origin, dns.name.root) - - if isinstance(rrset, tuple): - rrname = rrset[0] - else: - rrname = rrset.name - - if isinstance(rrsigset, tuple): - rrsigname = rrsigset[0] - rrsigrdataset = rrsigset[1] - else: - rrsigname = rrsigset.name - rrsigrdataset = rrsigset - - rrname = rrname.choose_relativity(origin) - rrsigname = rrsigname.choose_relativity(origin) - if rrname != rrsigname: - raise ValidationFailure("owner names do not match") - - for rrsig in rrsigrdataset: - try: - _validate_rrsig(rrset, rrsig, keys, origin, now) - return - except ValidationFailure: - pass - raise ValidationFailure("no RRSIGs validated") - - -def _need_pycrypto(*args, **kwargs): - raise NotImplementedError("DNSSEC validation requires pycryptodome/pycryptodomex") - - -try: - try: - # test we're using pycryptodome, not pycrypto (which misses SHA1 for example) - from Crypto.Hash import MD5, SHA1, SHA256, SHA384, SHA512 - from Crypto.PublicKey import RSA as CryptoRSA, DSA as CryptoDSA - from Crypto.Signature import pkcs1_15, DSS - from Crypto.Util import number - except ImportError: - from Cryptodome.Hash import MD5, SHA1, SHA256, SHA384, SHA512 - from Cryptodome.PublicKey import RSA as CryptoRSA, DSA as CryptoDSA - from Cryptodome.Signature import pkcs1_15, DSS - from Cryptodome.Util import number -except ImportError: - validate = _need_pycrypto - validate_rrsig = _need_pycrypto - _have_pycrypto = False - _have_ecdsa = False -else: - validate = _validate - validate_rrsig = _validate_rrsig - _have_pycrypto = True - - try: - import ecdsa - import ecdsa.ecdsa - import ecdsa.ellipticcurve - import ecdsa.keys - except ImportError: - _have_ecdsa = False - else: - _have_ecdsa = True - - class ECKeyWrapper(object): - - def __init__(self, key, key_len): - self.key = key - self.key_len = key_len - - def verify(self, digest, sig): - diglong = number.bytes_to_long(digest) - return self.key.pubkey.verifies(diglong, sig) diff --git a/lib/dns/dnssec.pyi b/lib/dns/dnssec.pyi deleted file mode 100644 index 5699b3e1..00000000 --- a/lib/dns/dnssec.pyi +++ /dev/null @@ -1,19 +0,0 @@ -from typing import Union, Dict, Tuple, Optional -from . import rdataset, rrset, exception, name, rdtypes, rdata, node -import dns.rdtypes.ANY.DS as DS -import dns.rdtypes.ANY.DNSKEY as DNSKEY - -_have_ecdsa : bool -_have_pycrypto : bool - -def validate_rrsig(rrset : Union[Tuple[name.Name, rdataset.Rdataset], rrset.RRset], rrsig : rdata.Rdata, keys : Dict[name.Name, Union[node.Node, rdataset.Rdataset]], origin : Optional[name.Name] = None, now : Optional[int] = None) -> None: - ... - -def validate(rrset: Union[Tuple[name.Name, rdataset.Rdataset], rrset.RRset], rrsigset : Union[Tuple[name.Name, rdataset.Rdataset], rrset.RRset], keys : Dict[name.Name, Union[node.Node, rdataset.Rdataset]], origin=None, now=None) -> None: - ... - -class ValidationFailure(exception.DNSException): - ... - -def make_ds(name : name.Name, key : DNSKEY.DNSKEY, algorithm : str, origin : Optional[name.Name] = None) -> DS.DS: - ... diff --git a/lib/dns/e164.py b/lib/dns/e164.py deleted file mode 100644 index 758c47a7..00000000 --- a/lib/dns/e164.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2006-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS E.164 helpers.""" - -import dns.exception -import dns.name -import dns.resolver -from ._compat import string_types, maybe_decode - -#: The public E.164 domain. -public_enum_domain = dns.name.from_text('e164.arpa.') - - -def from_e164(text, origin=public_enum_domain): - """Convert an E.164 number in textual form into a Name object whose - value is the ENUM domain name for that number. - - Non-digits in the text are ignored, i.e. "16505551212", - "+1.650.555.1212" and "1 (650) 555-1212" are all the same. - - *text*, a ``text``, is an E.164 number in textual form. - - *origin*, a ``dns.name.Name``, the domain in which the number - should be constructed. The default is ``e164.arpa.``. - - Returns a ``dns.name.Name``. - """ - - parts = [d for d in text if d.isdigit()] - parts.reverse() - return dns.name.from_text('.'.join(parts), origin=origin) - - -def to_e164(name, origin=public_enum_domain, want_plus_prefix=True): - """Convert an ENUM domain name into an E.164 number. - - Note that dnspython does not have any information about preferred - number formats within national numbering plans, so all numbers are - emitted as a simple string of digits, prefixed by a '+' (unless - *want_plus_prefix* is ``False``). - - *name* is a ``dns.name.Name``, the ENUM domain name. - - *origin* is a ``dns.name.Name``, a domain containing the ENUM - domain name. The name is relativized to this domain before being - converted to text. If ``None``, no relativization is done. - - *want_plus_prefix* is a ``bool``. If True, add a '+' to the beginning of - the returned number. - - Returns a ``text``. - - """ - if origin is not None: - name = name.relativize(origin) - dlabels = [d for d in name.labels if d.isdigit() and len(d) == 1] - if len(dlabels) != len(name.labels): - raise dns.exception.SyntaxError('non-digit labels in ENUM domain name') - dlabels.reverse() - text = b''.join(dlabels) - if want_plus_prefix: - text = b'+' + text - return maybe_decode(text) - - -def query(number, domains, resolver=None): - """Look for NAPTR RRs for the specified number in the specified domains. - - e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.']) - - *number*, a ``text`` is the number to look for. - - *domains* is an iterable containing ``dns.name.Name`` values. - - *resolver*, a ``dns.resolver.Resolver``, is the resolver to use. If - ``None``, the default resolver is used. - """ - - if resolver is None: - resolver = dns.resolver.get_default_resolver() - e_nx = dns.resolver.NXDOMAIN() - for domain in domains: - if isinstance(domain, string_types): - domain = dns.name.from_text(domain) - qname = dns.e164.from_e164(number, domain) - try: - return resolver.query(qname, 'NAPTR') - except dns.resolver.NXDOMAIN as e: - e_nx += e - raise e_nx diff --git a/lib/dns/e164.pyi b/lib/dns/e164.pyi deleted file mode 100644 index 37a99fed..00000000 --- a/lib/dns/e164.pyi +++ /dev/null @@ -1,10 +0,0 @@ -from typing import Optional, Iterable -from . import name, resolver -def from_e164(text : str, origin=name.Name(".")) -> name.Name: - ... - -def to_e164(name : name.Name, origin : Optional[name.Name] = None, want_plus_prefix=True) -> str: - ... - -def query(number : str, domains : Iterable[str], resolver : Optional[resolver.Resolver] = None) -> resolver.Answer: - ... diff --git a/lib/dns/edns.py b/lib/dns/edns.py deleted file mode 100644 index 5660f7bb..00000000 --- a/lib/dns/edns.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2009-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""EDNS Options""" - -from __future__ import absolute_import - -import math -import struct - -import dns.inet - -#: NSID -NSID = 3 -#: DAU -DAU = 5 -#: DHU -DHU = 6 -#: N3U -N3U = 7 -#: ECS (client-subnet) -ECS = 8 -#: EXPIRE -EXPIRE = 9 -#: COOKIE -COOKIE = 10 -#: KEEPALIVE -KEEPALIVE = 11 -#: PADDING -PADDING = 12 -#: CHAIN -CHAIN = 13 - -class Option(object): - - """Base class for all EDNS option types.""" - - def __init__(self, otype): - """Initialize an option. - - *otype*, an ``int``, is the option type. - """ - self.otype = otype - - def to_wire(self, file): - """Convert an option to wire format. - """ - raise NotImplementedError - - @classmethod - def from_wire(cls, otype, wire, current, olen): - """Build an EDNS option object from wire format. - - *otype*, an ``int``, is the option type. - - *wire*, a ``binary``, is the wire-format message. - - *current*, an ``int``, is the offset in *wire* of the beginning - of the rdata. - - *olen*, an ``int``, is the length of the wire-format option data - - Returns a ``dns.edns.Option``. - """ - - raise NotImplementedError - - def _cmp(self, other): - """Compare an EDNS option with another option of the same type. - - Returns < 0 if < *other*, 0 if == *other*, and > 0 if > *other*. - """ - raise NotImplementedError - - def __eq__(self, other): - if not isinstance(other, Option): - return False - if self.otype != other.otype: - return False - return self._cmp(other) == 0 - - def __ne__(self, other): - if not isinstance(other, Option): - return False - if self.otype != other.otype: - return False - return self._cmp(other) != 0 - - def __lt__(self, other): - if not isinstance(other, Option) or \ - self.otype != other.otype: - return NotImplemented - return self._cmp(other) < 0 - - def __le__(self, other): - if not isinstance(other, Option) or \ - self.otype != other.otype: - return NotImplemented - return self._cmp(other) <= 0 - - def __ge__(self, other): - if not isinstance(other, Option) or \ - self.otype != other.otype: - return NotImplemented - return self._cmp(other) >= 0 - - def __gt__(self, other): - if not isinstance(other, Option) or \ - self.otype != other.otype: - return NotImplemented - return self._cmp(other) > 0 - - -class GenericOption(Option): - - """Generic Option Class - - This class is used for EDNS option types for which we have no better - implementation. - """ - - def __init__(self, otype, data): - super(GenericOption, self).__init__(otype) - self.data = data - - def to_wire(self, file): - file.write(self.data) - - def to_text(self): - return "Generic %d" % self.otype - - @classmethod - def from_wire(cls, otype, wire, current, olen): - return cls(otype, wire[current: current + olen]) - - def _cmp(self, other): - if self.data == other.data: - return 0 - if self.data > other.data: - return 1 - return -1 - - -class ECSOption(Option): - """EDNS Client Subnet (ECS, RFC7871)""" - - def __init__(self, address, srclen=None, scopelen=0): - """*address*, a ``text``, is the client address information. - - *srclen*, an ``int``, the source prefix length, which is the - leftmost number of bits of the address to be used for the - lookup. The default is 24 for IPv4 and 56 for IPv6. - - *scopelen*, an ``int``, the scope prefix length. This value - must be 0 in queries, and should be set in responses. - """ - - super(ECSOption, self).__init__(ECS) - af = dns.inet.af_for_address(address) - - if af == dns.inet.AF_INET6: - self.family = 2 - if srclen is None: - srclen = 56 - elif af == dns.inet.AF_INET: - self.family = 1 - if srclen is None: - srclen = 24 - else: - raise ValueError('Bad ip family') - - self.address = address - self.srclen = srclen - self.scopelen = scopelen - - addrdata = dns.inet.inet_pton(af, address) - nbytes = int(math.ceil(srclen/8.0)) - - # Truncate to srclen and pad to the end of the last octet needed - # See RFC section 6 - self.addrdata = addrdata[:nbytes] - nbits = srclen % 8 - if nbits != 0: - last = struct.pack('B', ord(self.addrdata[-1:]) & (0xff << nbits)) - self.addrdata = self.addrdata[:-1] + last - - def to_text(self): - return "ECS {}/{} scope/{}".format(self.address, self.srclen, - self.scopelen) - - def to_wire(self, file): - file.write(struct.pack('!H', self.family)) - file.write(struct.pack('!BB', self.srclen, self.scopelen)) - file.write(self.addrdata) - - @classmethod - def from_wire(cls, otype, wire, cur, olen): - family, src, scope = struct.unpack('!HBB', wire[cur:cur+4]) - cur += 4 - - addrlen = int(math.ceil(src/8.0)) - - if family == 1: - af = dns.inet.AF_INET - pad = 4 - addrlen - elif family == 2: - af = dns.inet.AF_INET6 - pad = 16 - addrlen - else: - raise ValueError('unsupported family') - - addr = dns.inet.inet_ntop(af, wire[cur:cur+addrlen] + b'\x00' * pad) - return cls(addr, src, scope) - - def _cmp(self, other): - if self.addrdata == other.addrdata: - return 0 - if self.addrdata > other.addrdata: - return 1 - return -1 - -_type_to_class = { - ECS: ECSOption -} - -def get_option_class(otype): - """Return the class for the specified option type. - - The GenericOption class is used if a more specific class is not - known. - """ - - cls = _type_to_class.get(otype) - if cls is None: - cls = GenericOption - return cls - - -def option_from_wire(otype, wire, current, olen): - """Build an EDNS option object from wire format. - - *otype*, an ``int``, is the option type. - - *wire*, a ``binary``, is the wire-format message. - - *current*, an ``int``, is the offset in *wire* of the beginning - of the rdata. - - *olen*, an ``int``, is the length of the wire-format option data - - Returns an instance of a subclass of ``dns.edns.Option``. - """ - - cls = get_option_class(otype) - return cls.from_wire(otype, wire, current, olen) diff --git a/lib/dns/entropy.py b/lib/dns/entropy.py deleted file mode 100644 index 00c6a4b3..00000000 --- a/lib/dns/entropy.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2009-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import os -import random -import time -from ._compat import long, binary_type -try: - import threading as _threading -except ImportError: - import dummy_threading as _threading - - -class EntropyPool(object): - - # This is an entropy pool for Python implementations that do not - # have a working SystemRandom. I'm not sure there are any, but - # leaving this code doesn't hurt anything as the library code - # is used if present. - - def __init__(self, seed=None): - self.pool_index = 0 - self.digest = None - self.next_byte = 0 - self.lock = _threading.Lock() - try: - import hashlib - self.hash = hashlib.sha1() - self.hash_len = 20 - except ImportError: - try: - import sha - self.hash = sha.new() - self.hash_len = 20 - except ImportError: - import md5 # pylint: disable=import-error - self.hash = md5.new() - self.hash_len = 16 - self.pool = bytearray(b'\0' * self.hash_len) - if seed is not None: - self.stir(bytearray(seed)) - self.seeded = True - self.seed_pid = os.getpid() - else: - self.seeded = False - self.seed_pid = 0 - - def stir(self, entropy, already_locked=False): - if not already_locked: - self.lock.acquire() - try: - for c in entropy: - if self.pool_index == self.hash_len: - self.pool_index = 0 - b = c & 0xff - self.pool[self.pool_index] ^= b - self.pool_index += 1 - finally: - if not already_locked: - self.lock.release() - - def _maybe_seed(self): - if not self.seeded or self.seed_pid != os.getpid(): - try: - seed = os.urandom(16) - except Exception: - try: - r = open('/dev/urandom', 'rb', 0) - try: - seed = r.read(16) - finally: - r.close() - except Exception: - seed = str(time.time()) - self.seeded = True - self.seed_pid = os.getpid() - self.digest = None - seed = bytearray(seed) - self.stir(seed, True) - - def random_8(self): - self.lock.acquire() - try: - self._maybe_seed() - if self.digest is None or self.next_byte == self.hash_len: - self.hash.update(binary_type(self.pool)) - self.digest = bytearray(self.hash.digest()) - self.stir(self.digest, True) - self.next_byte = 0 - value = self.digest[self.next_byte] - self.next_byte += 1 - finally: - self.lock.release() - return value - - def random_16(self): - return self.random_8() * 256 + self.random_8() - - def random_32(self): - return self.random_16() * 65536 + self.random_16() - - def random_between(self, first, last): - size = last - first + 1 - if size > long(4294967296): - raise ValueError('too big') - if size > 65536: - rand = self.random_32 - max = long(4294967295) - elif size > 256: - rand = self.random_16 - max = 65535 - else: - rand = self.random_8 - max = 255 - return first + size * rand() // (max + 1) - -pool = EntropyPool() - -try: - system_random = random.SystemRandom() -except Exception: - system_random = None - -def random_16(): - if system_random is not None: - return system_random.randrange(0, 65536) - else: - return pool.random_16() - -def between(first, last): - if system_random is not None: - return system_random.randrange(first, last + 1) - else: - return pool.random_between(first, last) diff --git a/lib/dns/entropy.pyi b/lib/dns/entropy.pyi deleted file mode 100644 index 818f805a..00000000 --- a/lib/dns/entropy.pyi +++ /dev/null @@ -1,10 +0,0 @@ -from typing import Optional -from random import SystemRandom - -system_random : Optional[SystemRandom] - -def random_16() -> int: - pass - -def between(first: int, last: int) -> int: - pass diff --git a/lib/dns/exception.py b/lib/dns/exception.py deleted file mode 100644 index 71ff04f1..00000000 --- a/lib/dns/exception.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Common DNS Exceptions. - -Dnspython modules may also define their own exceptions, which will -always be subclasses of ``DNSException``. -""" - -class DNSException(Exception): - """Abstract base class shared by all dnspython exceptions. - - It supports two basic modes of operation: - - a) Old/compatible mode is used if ``__init__`` was called with - empty *kwargs*. In compatible mode all *args* are passed - to the standard Python Exception class as before and all *args* are - printed by the standard ``__str__`` implementation. Class variable - ``msg`` (or doc string if ``msg`` is ``None``) is returned from ``str()`` - if *args* is empty. - - b) New/parametrized mode is used if ``__init__`` was called with - non-empty *kwargs*. - In the new mode *args* must be empty and all kwargs must match - those set in class variable ``supp_kwargs``. All kwargs are stored inside - ``self.kwargs`` and used in a new ``__str__`` implementation to construct - a formatted message based on the ``fmt`` class variable, a ``string``. - - In the simplest case it is enough to override the ``supp_kwargs`` - and ``fmt`` class variables to get nice parametrized messages. - """ - - msg = None # non-parametrized message - supp_kwargs = set() # accepted parameters for _fmt_kwargs (sanity check) - fmt = None # message parametrized with results from _fmt_kwargs - - def __init__(self, *args, **kwargs): - self._check_params(*args, **kwargs) - if kwargs: - self.kwargs = self._check_kwargs(**kwargs) - self.msg = str(self) - else: - self.kwargs = dict() # defined but empty for old mode exceptions - if self.msg is None: - # doc string is better implicit message than empty string - self.msg = self.__doc__ - if args: - super(DNSException, self).__init__(*args) - else: - super(DNSException, self).__init__(self.msg) - - def _check_params(self, *args, **kwargs): - """Old exceptions supported only args and not kwargs. - - For sanity we do not allow to mix old and new behavior.""" - if args or kwargs: - assert bool(args) != bool(kwargs), \ - 'keyword arguments are mutually exclusive with positional args' - - def _check_kwargs(self, **kwargs): - if kwargs: - assert set(kwargs.keys()) == self.supp_kwargs, \ - 'following set of keyword args is required: %s' % ( - self.supp_kwargs) - return kwargs - - def _fmt_kwargs(self, **kwargs): - """Format kwargs before printing them. - - Resulting dictionary has to have keys necessary for str.format call - on fmt class variable. - """ - fmtargs = {} - for kw, data in kwargs.items(): - if isinstance(data, (list, set)): - # convert list of <someobj> to list of str(<someobj>) - fmtargs[kw] = list(map(str, data)) - if len(fmtargs[kw]) == 1: - # remove list brackets [] from single-item lists - fmtargs[kw] = fmtargs[kw].pop() - else: - fmtargs[kw] = data - return fmtargs - - def __str__(self): - if self.kwargs and self.fmt: - # provide custom message constructed from keyword arguments - fmtargs = self._fmt_kwargs(**self.kwargs) - return self.fmt.format(**fmtargs) - else: - # print *args directly in the same way as old DNSException - return super(DNSException, self).__str__() - - -class FormError(DNSException): - """DNS message is malformed.""" - - -class SyntaxError(DNSException): - """Text input is malformed.""" - - -class UnexpectedEnd(SyntaxError): - """Text input ended unexpectedly.""" - - -class TooBig(DNSException): - """The DNS message is too big.""" - - -class Timeout(DNSException): - """The DNS operation timed out.""" - supp_kwargs = {'timeout'} - fmt = "The DNS operation timed out after {timeout} seconds" diff --git a/lib/dns/exception.pyi b/lib/dns/exception.pyi deleted file mode 100644 index 4b346cc4..00000000 --- a/lib/dns/exception.pyi +++ /dev/null @@ -1,9 +0,0 @@ -from typing import Set, Optional, Dict - -class DNSException(Exception): - supp_kwargs : Set[str] - kwargs : Optional[Dict] - -class SyntaxError(DNSException): ... -class FormError(DNSException): ... -class Timeout(DNSException): ... diff --git a/lib/dns/flags.py b/lib/dns/flags.py deleted file mode 100644 index 0119dec7..00000000 --- a/lib/dns/flags.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Message Flags.""" - -# Standard DNS flags - -#: Query Response -QR = 0x8000 -#: Authoritative Answer -AA = 0x0400 -#: Truncated Response -TC = 0x0200 -#: Recursion Desired -RD = 0x0100 -#: Recursion Available -RA = 0x0080 -#: Authentic Data -AD = 0x0020 -#: Checking Disabled -CD = 0x0010 - -# EDNS flags - -#: DNSSEC answer OK -DO = 0x8000 - -_by_text = { - 'QR': QR, - 'AA': AA, - 'TC': TC, - 'RD': RD, - 'RA': RA, - 'AD': AD, - 'CD': CD -} - -_edns_by_text = { - 'DO': DO -} - - -# We construct the inverse mappings programmatically to ensure that we -# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that -# would cause the mappings not to be true inverses. - -_by_value = {y: x for x, y in _by_text.items()} - -_edns_by_value = {y: x for x, y in _edns_by_text.items()} - - -def _order_flags(table): - order = list(table.items()) - order.sort() - order.reverse() - return order - -_flags_order = _order_flags(_by_value) - -_edns_flags_order = _order_flags(_edns_by_value) - - -def _from_text(text, table): - flags = 0 - tokens = text.split() - for t in tokens: - flags = flags | table[t.upper()] - return flags - - -def _to_text(flags, table, order): - text_flags = [] - for k, v in order: - if flags & k != 0: - text_flags.append(v) - return ' '.join(text_flags) - - -def from_text(text): - """Convert a space-separated list of flag text values into a flags - value. - - Returns an ``int`` - """ - - return _from_text(text, _by_text) - - -def to_text(flags): - """Convert a flags value into a space-separated list of flag text - values. - - Returns a ``text``. - """ - - return _to_text(flags, _by_value, _flags_order) - - -def edns_from_text(text): - """Convert a space-separated list of EDNS flag text values into a EDNS - flags value. - - Returns an ``int`` - """ - - return _from_text(text, _edns_by_text) - - -def edns_to_text(flags): - """Convert an EDNS flags value into a space-separated list of EDNS flag - text values. - - Returns a ``text``. - """ - - return _to_text(flags, _edns_by_value, _edns_flags_order) diff --git a/lib/dns/grange.py b/lib/dns/grange.py deleted file mode 100644 index ffe8be7c..00000000 --- a/lib/dns/grange.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2012-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS GENERATE range conversion.""" - -import dns - -def from_text(text): - """Convert the text form of a range in a ``$GENERATE`` statement to an - integer. - - *text*, a ``str``, the textual range in ``$GENERATE`` form. - - Returns a tuple of three ``int`` values ``(start, stop, step)``. - """ - - # TODO, figure out the bounds on start, stop and step. - step = 1 - cur = '' - state = 0 - # state 0 1 2 3 4 - # x - y / z - - if text and text[0] == '-': - raise dns.exception.SyntaxError("Start cannot be a negative number") - - for c in text: - if c == '-' and state == 0: - start = int(cur) - cur = '' - state = 2 - elif c == '/': - stop = int(cur) - cur = '' - state = 4 - elif c.isdigit(): - cur += c - else: - raise dns.exception.SyntaxError("Could not parse %s" % (c)) - - if state in (1, 3): - raise dns.exception.SyntaxError() - - if state == 2: - stop = int(cur) - - if state == 4: - step = int(cur) - - assert step >= 1 - assert start >= 0 - assert start <= stop - # TODO, can start == stop? - - return (start, stop, step) diff --git a/lib/dns/hash.py b/lib/dns/hash.py deleted file mode 100644 index 1713e628..00000000 --- a/lib/dns/hash.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Hashing backwards compatibility wrapper""" - -import hashlib -import warnings - -warnings.warn( - "dns.hash module will be removed in future versions. Please use hashlib instead.", - DeprecationWarning) - -hashes = {} -hashes['MD5'] = hashlib.md5 -hashes['SHA1'] = hashlib.sha1 -hashes['SHA224'] = hashlib.sha224 -hashes['SHA256'] = hashlib.sha256 -hashes['SHA384'] = hashlib.sha384 -hashes['SHA512'] = hashlib.sha512 - - -def get(algorithm): - return hashes[algorithm.upper()] diff --git a/lib/dns/inet.py b/lib/dns/inet.py deleted file mode 100644 index c8d7c1b4..00000000 --- a/lib/dns/inet.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Generic Internet address helper functions.""" - -import socket - -import dns.ipv4 -import dns.ipv6 - -from ._compat import maybe_ord - -# We assume that AF_INET is always defined. - -AF_INET = socket.AF_INET - -# AF_INET6 might not be defined in the socket module, but we need it. -# We'll try to use the socket module's value, and if it doesn't work, -# we'll use our own value. - -try: - AF_INET6 = socket.AF_INET6 -except AttributeError: - AF_INET6 = 9999 - - -def inet_pton(family, text): - """Convert the textual form of a network address into its binary form. - - *family* is an ``int``, the address family. - - *text* is a ``text``, the textual address. - - Raises ``NotImplementedError`` if the address family specified is not - implemented. - - Returns a ``binary``. - """ - - if family == AF_INET: - return dns.ipv4.inet_aton(text) - elif family == AF_INET6: - return dns.ipv6.inet_aton(text) - else: - raise NotImplementedError - - -def inet_ntop(family, address): - """Convert the binary form of a network address into its textual form. - - *family* is an ``int``, the address family. - - *address* is a ``binary``, the network address in binary form. - - Raises ``NotImplementedError`` if the address family specified is not - implemented. - - Returns a ``text``. - """ - - if family == AF_INET: - return dns.ipv4.inet_ntoa(address) - elif family == AF_INET6: - return dns.ipv6.inet_ntoa(address) - else: - raise NotImplementedError - - -def af_for_address(text): - """Determine the address family of a textual-form network address. - - *text*, a ``text``, the textual address. - - Raises ``ValueError`` if the address family cannot be determined - from the input. - - Returns an ``int``. - """ - - try: - dns.ipv4.inet_aton(text) - return AF_INET - except Exception: - try: - dns.ipv6.inet_aton(text) - return AF_INET6 - except: - raise ValueError - - -def is_multicast(text): - """Is the textual-form network address a multicast address? - - *text*, a ``text``, the textual address. - - Raises ``ValueError`` if the address family cannot be determined - from the input. - - Returns a ``bool``. - """ - - try: - first = maybe_ord(dns.ipv4.inet_aton(text)[0]) - return first >= 224 and first <= 239 - except Exception: - try: - first = maybe_ord(dns.ipv6.inet_aton(text)[0]) - return first == 255 - except Exception: - raise ValueError diff --git a/lib/dns/inet.pyi b/lib/dns/inet.pyi deleted file mode 100644 index 6d9dcc70..00000000 --- a/lib/dns/inet.pyi +++ /dev/null @@ -1,4 +0,0 @@ -from typing import Union -from socket import AddressFamily - -AF_INET6 : Union[int, AddressFamily] diff --git a/lib/dns/ipv4.py b/lib/dns/ipv4.py deleted file mode 100644 index 8fc4f7dc..00000000 --- a/lib/dns/ipv4.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""IPv4 helper functions.""" - -import struct - -import dns.exception -from ._compat import binary_type - -def inet_ntoa(address): - """Convert an IPv4 address in binary form to text form. - - *address*, a ``binary``, the IPv4 address in binary form. - - Returns a ``text``. - """ - - if len(address) != 4: - raise dns.exception.SyntaxError - if not isinstance(address, bytearray): - address = bytearray(address) - return ('%u.%u.%u.%u' % (address[0], address[1], - address[2], address[3])) - -def inet_aton(text): - """Convert an IPv4 address in text form to binary form. - - *text*, a ``text``, the IPv4 address in textual form. - - Returns a ``binary``. - """ - - if not isinstance(text, binary_type): - text = text.encode() - parts = text.split(b'.') - if len(parts) != 4: - raise dns.exception.SyntaxError - for part in parts: - if not part.isdigit(): - raise dns.exception.SyntaxError - if len(part) > 1 and part[0] == '0': - # No leading zeros - raise dns.exception.SyntaxError - try: - bytes = [int(part) for part in parts] - return struct.pack('BBBB', *bytes) - except: - raise dns.exception.SyntaxError diff --git a/lib/dns/ipv6.py b/lib/dns/ipv6.py deleted file mode 100644 index 128e56c8..00000000 --- a/lib/dns/ipv6.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""IPv6 helper functions.""" - -import re -import binascii - -import dns.exception -import dns.ipv4 -from ._compat import xrange, binary_type, maybe_decode - -_leading_zero = re.compile(r'0+([0-9a-f]+)') - -def inet_ntoa(address): - """Convert an IPv6 address in binary form to text form. - - *address*, a ``binary``, the IPv6 address in binary form. - - Raises ``ValueError`` if the address isn't 16 bytes long. - Returns a ``text``. - """ - - if len(address) != 16: - raise ValueError("IPv6 addresses are 16 bytes long") - hex = binascii.hexlify(address) - chunks = [] - i = 0 - l = len(hex) - while i < l: - chunk = maybe_decode(hex[i : i + 4]) - # strip leading zeros. we do this with an re instead of - # with lstrip() because lstrip() didn't support chars until - # python 2.2.2 - m = _leading_zero.match(chunk) - if not m is None: - chunk = m.group(1) - chunks.append(chunk) - i += 4 - # - # Compress the longest subsequence of 0-value chunks to :: - # - best_start = 0 - best_len = 0 - start = -1 - last_was_zero = False - for i in xrange(8): - if chunks[i] != '0': - if last_was_zero: - end = i - current_len = end - start - if current_len > best_len: - best_start = start - best_len = current_len - last_was_zero = False - elif not last_was_zero: - start = i - last_was_zero = True - if last_was_zero: - end = 8 - current_len = end - start - if current_len > best_len: - best_start = start - best_len = current_len - if best_len > 1: - if best_start == 0 and \ - (best_len == 6 or - best_len == 5 and chunks[5] == 'ffff'): - # We have an embedded IPv4 address - if best_len == 6: - prefix = '::' - else: - prefix = '::ffff:' - hex = prefix + dns.ipv4.inet_ntoa(address[12:]) - else: - hex = ':'.join(chunks[:best_start]) + '::' + \ - ':'.join(chunks[best_start + best_len:]) - else: - hex = ':'.join(chunks) - return hex - -_v4_ending = re.compile(br'(.*):(\d+\.\d+\.\d+\.\d+)$') -_colon_colon_start = re.compile(br'::.*') -_colon_colon_end = re.compile(br'.*::$') - -def inet_aton(text): - """Convert an IPv6 address in text form to binary form. - - *text*, a ``text``, the IPv6 address in textual form. - - Returns a ``binary``. - """ - - # - # Our aim here is not something fast; we just want something that works. - # - if not isinstance(text, binary_type): - text = text.encode() - - if text == b'::': - text = b'0::' - # - # Get rid of the icky dot-quad syntax if we have it. - # - m = _v4_ending.match(text) - if not m is None: - b = bytearray(dns.ipv4.inet_aton(m.group(2))) - text = (u"{}:{:02x}{:02x}:{:02x}{:02x}".format(m.group(1).decode(), - b[0], b[1], b[2], - b[3])).encode() - # - # Try to turn '::<whatever>' into ':<whatever>'; if no match try to - # turn '<whatever>::' into '<whatever>:' - # - m = _colon_colon_start.match(text) - if not m is None: - text = text[1:] - else: - m = _colon_colon_end.match(text) - if not m is None: - text = text[:-1] - # - # Now canonicalize into 8 chunks of 4 hex digits each - # - chunks = text.split(b':') - l = len(chunks) - if l > 8: - raise dns.exception.SyntaxError - seen_empty = False - canonical = [] - for c in chunks: - if c == b'': - if seen_empty: - raise dns.exception.SyntaxError - seen_empty = True - for i in xrange(0, 8 - l + 1): - canonical.append(b'0000') - else: - lc = len(c) - if lc > 4: - raise dns.exception.SyntaxError - if lc != 4: - c = (b'0' * (4 - lc)) + c - canonical.append(c) - if l < 8 and not seen_empty: - raise dns.exception.SyntaxError - text = b''.join(canonical) - - # - # Finally we can go to binary. - # - try: - return binascii.unhexlify(text) - except (binascii.Error, TypeError): - raise dns.exception.SyntaxError - -_mapped_prefix = b'\x00' * 10 + b'\xff\xff' - -def is_mapped(address): - """Is the specified address a mapped IPv4 address? - - *address*, a ``binary`` is an IPv6 address in binary form. - - Returns a ``bool``. - """ - - return address.startswith(_mapped_prefix) diff --git a/lib/dns/message.py b/lib/dns/message.py deleted file mode 100644 index 9d2b2f43..00000000 --- a/lib/dns/message.py +++ /dev/null @@ -1,1175 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Messages""" - -from __future__ import absolute_import - -from io import StringIO -import struct -import time - -import dns.edns -import dns.exception -import dns.flags -import dns.name -import dns.opcode -import dns.entropy -import dns.rcode -import dns.rdata -import dns.rdataclass -import dns.rdatatype -import dns.rrset -import dns.renderer -import dns.tsig -import dns.wiredata - -from ._compat import long, xrange, string_types - - -class ShortHeader(dns.exception.FormError): - """The DNS packet passed to from_wire() is too short.""" - - -class TrailingJunk(dns.exception.FormError): - """The DNS packet passed to from_wire() has extra junk at the end of it.""" - - -class UnknownHeaderField(dns.exception.DNSException): - """The header field name was not recognized when converting from text - into a message.""" - - -class BadEDNS(dns.exception.FormError): - """An OPT record occurred somewhere other than the start of - the additional data section.""" - - -class BadTSIG(dns.exception.FormError): - """A TSIG record occurred somewhere other than the end of - the additional data section.""" - - -class UnknownTSIGKey(dns.exception.DNSException): - """A TSIG with an unknown key was received.""" - - -#: The question section number -QUESTION = 0 - -#: The answer section number -ANSWER = 1 - -#: The authority section number -AUTHORITY = 2 - -#: The additional section number -ADDITIONAL = 3 - -class Message(object): - """A DNS message.""" - - def __init__(self, id=None): - if id is None: - self.id = dns.entropy.random_16() - else: - self.id = id - self.flags = 0 - self.question = [] - self.answer = [] - self.authority = [] - self.additional = [] - self.edns = -1 - self.ednsflags = 0 - self.payload = 0 - self.options = [] - self.request_payload = 0 - self.keyring = None - self.keyname = None - self.keyalgorithm = dns.tsig.default_algorithm - self.request_mac = b'' - self.other_data = b'' - self.tsig_error = 0 - self.fudge = 300 - self.original_id = self.id - self.mac = b'' - self.xfr = False - self.origin = None - self.tsig_ctx = None - self.had_tsig = False - self.multi = False - self.first = True - self.index = {} - - def __repr__(self): - return '<DNS message, ID ' + repr(self.id) + '>' - - def __str__(self): - return self.to_text() - - def to_text(self, origin=None, relativize=True, **kw): - """Convert the message to text. - - The *origin*, *relativize*, and any other keyword - arguments are passed to the RRset ``to_wire()`` method. - - Returns a ``text``. - """ - - s = StringIO() - s.write(u'id %d\n' % self.id) - s.write(u'opcode %s\n' % - dns.opcode.to_text(dns.opcode.from_flags(self.flags))) - rc = dns.rcode.from_flags(self.flags, self.ednsflags) - s.write(u'rcode %s\n' % dns.rcode.to_text(rc)) - s.write(u'flags %s\n' % dns.flags.to_text(self.flags)) - if self.edns >= 0: - s.write(u'edns %s\n' % self.edns) - if self.ednsflags != 0: - s.write(u'eflags %s\n' % - dns.flags.edns_to_text(self.ednsflags)) - s.write(u'payload %d\n' % self.payload) - for opt in self.options: - s.write(u'option %s\n' % opt.to_text()) - is_update = dns.opcode.is_update(self.flags) - if is_update: - s.write(u';ZONE\n') - else: - s.write(u';QUESTION\n') - for rrset in self.question: - s.write(rrset.to_text(origin, relativize, **kw)) - s.write(u'\n') - if is_update: - s.write(u';PREREQ\n') - else: - s.write(u';ANSWER\n') - for rrset in self.answer: - s.write(rrset.to_text(origin, relativize, **kw)) - s.write(u'\n') - if is_update: - s.write(u';UPDATE\n') - else: - s.write(u';AUTHORITY\n') - for rrset in self.authority: - s.write(rrset.to_text(origin, relativize, **kw)) - s.write(u'\n') - s.write(u';ADDITIONAL\n') - for rrset in self.additional: - s.write(rrset.to_text(origin, relativize, **kw)) - s.write(u'\n') - # - # We strip off the final \n so the caller can print the result without - # doing weird things to get around eccentricities in Python print - # formatting - # - return s.getvalue()[:-1] - - def __eq__(self, other): - """Two messages are equal if they have the same content in the - header, question, answer, and authority sections. - - Returns a ``bool``. - """ - - if not isinstance(other, Message): - return False - if self.id != other.id: - return False - if self.flags != other.flags: - return False - for n in self.question: - if n not in other.question: - return False - for n in other.question: - if n not in self.question: - return False - for n in self.answer: - if n not in other.answer: - return False - for n in other.answer: - if n not in self.answer: - return False - for n in self.authority: - if n not in other.authority: - return False - for n in other.authority: - if n not in self.authority: - return False - return True - - def __ne__(self, other): - return not self.__eq__(other) - - def is_response(self, other): - """Is this message a response to *other*? - - Returns a ``bool``. - """ - - if other.flags & dns.flags.QR == 0 or \ - self.id != other.id or \ - dns.opcode.from_flags(self.flags) != \ - dns.opcode.from_flags(other.flags): - return False - if dns.rcode.from_flags(other.flags, other.ednsflags) != \ - dns.rcode.NOERROR: - return True - if dns.opcode.is_update(self.flags): - return True - for n in self.question: - if n not in other.question: - return False - for n in other.question: - if n not in self.question: - return False - return True - - def section_number(self, section): - """Return the "section number" of the specified section for use - in indexing. The question section is 0, the answer section is 1, - the authority section is 2, and the additional section is 3. - - *section* is one of the section attributes of this message. - - Raises ``ValueError`` if the section isn't known. - - Returns an ``int``. - """ - - if section is self.question: - return QUESTION - elif section is self.answer: - return ANSWER - elif section is self.authority: - return AUTHORITY - elif section is self.additional: - return ADDITIONAL - else: - raise ValueError('unknown section') - - def section_from_number(self, number): - """Return the "section number" of the specified section for use - in indexing. The question section is 0, the answer section is 1, - the authority section is 2, and the additional section is 3. - - *section* is one of the section attributes of this message. - - Raises ``ValueError`` if the section isn't known. - - Returns an ``int``. - """ - - if number == QUESTION: - return self.question - elif number == ANSWER: - return self.answer - elif number == AUTHORITY: - return self.authority - elif number == ADDITIONAL: - return self.additional - else: - raise ValueError('unknown section') - - def find_rrset(self, section, name, rdclass, rdtype, - covers=dns.rdatatype.NONE, deleting=None, create=False, - force_unique=False): - """Find the RRset with the given attributes in the specified section. - - *section*, an ``int`` section number, or one of the section - attributes of this message. This specifies the - the section of the message to search. For example:: - - my_message.find_rrset(my_message.answer, name, rdclass, rdtype) - my_message.find_rrset(dns.message.ANSWER, name, rdclass, rdtype) - - *name*, a ``dns.name.Name``, the name of the RRset. - - *rdclass*, an ``int``, the class of the RRset. - - *rdtype*, an ``int``, the type of the RRset. - - *covers*, an ``int`` or ``None``, the covers value of the RRset. - The default is ``None``. - - *deleting*, an ``int`` or ``None``, the deleting value of the RRset. - The default is ``None``. - - *create*, a ``bool``. If ``True``, create the RRset if it is not found. - The created RRset is appended to *section*. - - *force_unique*, a ``bool``. If ``True`` and *create* is also ``True``, - create a new RRset regardless of whether a matching RRset exists - already. The default is ``False``. This is useful when creating - DDNS Update messages, as order matters for them. - - Raises ``KeyError`` if the RRset was not found and create was - ``False``. - - Returns a ``dns.rrset.RRset object``. - """ - - if isinstance(section, int): - section_number = section - section = self.section_from_number(section_number) - else: - section_number = self.section_number(section) - key = (section_number, name, rdclass, rdtype, covers, deleting) - if not force_unique: - if self.index is not None: - rrset = self.index.get(key) - if rrset is not None: - return rrset - else: - for rrset in section: - if rrset.match(name, rdclass, rdtype, covers, deleting): - return rrset - if not create: - raise KeyError - rrset = dns.rrset.RRset(name, rdclass, rdtype, covers, deleting) - section.append(rrset) - if self.index is not None: - self.index[key] = rrset - return rrset - - def get_rrset(self, section, name, rdclass, rdtype, - covers=dns.rdatatype.NONE, deleting=None, create=False, - force_unique=False): - """Get the RRset with the given attributes in the specified section. - - If the RRset is not found, None is returned. - - *section*, an ``int`` section number, or one of the section - attributes of this message. This specifies the - the section of the message to search. For example:: - - my_message.get_rrset(my_message.answer, name, rdclass, rdtype) - my_message.get_rrset(dns.message.ANSWER, name, rdclass, rdtype) - - *name*, a ``dns.name.Name``, the name of the RRset. - - *rdclass*, an ``int``, the class of the RRset. - - *rdtype*, an ``int``, the type of the RRset. - - *covers*, an ``int`` or ``None``, the covers value of the RRset. - The default is ``None``. - - *deleting*, an ``int`` or ``None``, the deleting value of the RRset. - The default is ``None``. - - *create*, a ``bool``. If ``True``, create the RRset if it is not found. - The created RRset is appended to *section*. - - *force_unique*, a ``bool``. If ``True`` and *create* is also ``True``, - create a new RRset regardless of whether a matching RRset exists - already. The default is ``False``. This is useful when creating - DDNS Update messages, as order matters for them. - - Returns a ``dns.rrset.RRset object`` or ``None``. - """ - - try: - rrset = self.find_rrset(section, name, rdclass, rdtype, covers, - deleting, create, force_unique) - except KeyError: - rrset = None - return rrset - - def to_wire(self, origin=None, max_size=0, **kw): - """Return a string containing the message in DNS compressed wire - format. - - Additional keyword arguments are passed to the RRset ``to_wire()`` - method. - - *origin*, a ``dns.name.Name`` or ``None``, the origin to be appended - to any relative names. - - *max_size*, an ``int``, the maximum size of the wire format - output; default is 0, which means "the message's request - payload, if nonzero, or 65535". - - Raises ``dns.exception.TooBig`` if *max_size* was exceeded. - - Returns a ``binary``. - """ - - if max_size == 0: - if self.request_payload != 0: - max_size = self.request_payload - else: - max_size = 65535 - if max_size < 512: - max_size = 512 - elif max_size > 65535: - max_size = 65535 - r = dns.renderer.Renderer(self.id, self.flags, max_size, origin) - for rrset in self.question: - r.add_question(rrset.name, rrset.rdtype, rrset.rdclass) - for rrset in self.answer: - r.add_rrset(dns.renderer.ANSWER, rrset, **kw) - for rrset in self.authority: - r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw) - if self.edns >= 0: - r.add_edns(self.edns, self.ednsflags, self.payload, self.options) - for rrset in self.additional: - r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw) - r.write_header() - if self.keyname is not None: - r.add_tsig(self.keyname, self.keyring[self.keyname], - self.fudge, self.original_id, self.tsig_error, - self.other_data, self.request_mac, - self.keyalgorithm) - self.mac = r.mac - return r.get_wire() - - def use_tsig(self, keyring, keyname=None, fudge=300, - original_id=None, tsig_error=0, other_data=b'', - algorithm=dns.tsig.default_algorithm): - """When sending, a TSIG signature using the specified keyring - and keyname should be added. - - See the documentation of the Message class for a complete - description of the keyring dictionary. - - *keyring*, a ``dict``, the TSIG keyring to use. If a - *keyring* is specified but a *keyname* is not, then the key - used will be the first key in the *keyring*. Note that the - order of keys in a dictionary is not defined, so applications - should supply a keyname when a keyring is used, unless they - know the keyring contains only one key. - - *keyname*, a ``dns.name.Name`` or ``None``, the name of the TSIG key - to use; defaults to ``None``. The key must be defined in the keyring. - - *fudge*, an ``int``, the TSIG time fudge. - - *original_id*, an ``int``, the TSIG original id. If ``None``, - the message's id is used. - - *tsig_error*, an ``int``, the TSIG error code. - - *other_data*, a ``binary``, the TSIG other data. - - *algorithm*, a ``dns.name.Name``, the TSIG algorithm to use. - """ - - self.keyring = keyring - if keyname is None: - self.keyname = list(self.keyring.keys())[0] - else: - if isinstance(keyname, string_types): - keyname = dns.name.from_text(keyname) - self.keyname = keyname - self.keyalgorithm = algorithm - self.fudge = fudge - if original_id is None: - self.original_id = self.id - else: - self.original_id = original_id - self.tsig_error = tsig_error - self.other_data = other_data - - def use_edns(self, edns=0, ednsflags=0, payload=1280, request_payload=None, - options=None): - """Configure EDNS behavior. - - *edns*, an ``int``, is the EDNS level to use. Specifying - ``None``, ``False``, or ``-1`` means "do not use EDNS", and in this case - the other parameters are ignored. Specifying ``True`` is - equivalent to specifying 0, i.e. "use EDNS0". - - *ednsflags*, an ``int``, the EDNS flag values. - - *payload*, an ``int``, is the EDNS sender's payload field, which is the - maximum size of UDP datagram the sender can handle. I.e. how big - a response to this message can be. - - *request_payload*, an ``int``, is the EDNS payload size to use when - sending this message. If not specified, defaults to the value of - *payload*. - - *options*, a list of ``dns.edns.Option`` objects or ``None``, the EDNS - options. - """ - - if edns is None or edns is False: - edns = -1 - if edns is True: - edns = 0 - if request_payload is None: - request_payload = payload - if edns < 0: - ednsflags = 0 - payload = 0 - request_payload = 0 - options = [] - else: - # make sure the EDNS version in ednsflags agrees with edns - ednsflags &= long(0xFF00FFFF) - ednsflags |= (edns << 16) - if options is None: - options = [] - self.edns = edns - self.ednsflags = ednsflags - self.payload = payload - self.options = options - self.request_payload = request_payload - - def want_dnssec(self, wanted=True): - """Enable or disable 'DNSSEC desired' flag in requests. - - *wanted*, a ``bool``. If ``True``, then DNSSEC data is - desired in the response, EDNS is enabled if required, and then - the DO bit is set. If ``False``, the DO bit is cleared if - EDNS is enabled. - """ - - if wanted: - if self.edns < 0: - self.use_edns() - self.ednsflags |= dns.flags.DO - elif self.edns >= 0: - self.ednsflags &= ~dns.flags.DO - - def rcode(self): - """Return the rcode. - - Returns an ``int``. - """ - return dns.rcode.from_flags(self.flags, self.ednsflags) - - def set_rcode(self, rcode): - """Set the rcode. - - *rcode*, an ``int``, is the rcode to set. - """ - (value, evalue) = dns.rcode.to_flags(rcode) - self.flags &= 0xFFF0 - self.flags |= value - self.ednsflags &= long(0x00FFFFFF) - self.ednsflags |= evalue - if self.ednsflags != 0 and self.edns < 0: - self.edns = 0 - - def opcode(self): - """Return the opcode. - - Returns an ``int``. - """ - return dns.opcode.from_flags(self.flags) - - def set_opcode(self, opcode): - """Set the opcode. - - *opcode*, an ``int``, is the opcode to set. - """ - self.flags &= 0x87FF - self.flags |= dns.opcode.to_flags(opcode) - - -class _WireReader(object): - - """Wire format reader. - - wire: a binary, is the wire-format message. - message: The message object being built - current: When building a message object from wire format, this - variable contains the offset from the beginning of wire of the next octet - to be read. - updating: Is the message a dynamic update? - one_rr_per_rrset: Put each RR into its own RRset? - ignore_trailing: Ignore trailing junk at end of request? - zone_rdclass: The class of the zone in messages which are - DNS dynamic updates. - """ - - def __init__(self, wire, message, question_only=False, - one_rr_per_rrset=False, ignore_trailing=False): - self.wire = dns.wiredata.maybe_wrap(wire) - self.message = message - self.current = 0 - self.updating = False - self.zone_rdclass = dns.rdataclass.IN - self.question_only = question_only - self.one_rr_per_rrset = one_rr_per_rrset - self.ignore_trailing = ignore_trailing - - def _get_question(self, qcount): - """Read the next *qcount* records from the wire data and add them to - the question section. - """ - - if self.updating and qcount > 1: - raise dns.exception.FormError - - for i in xrange(0, qcount): - (qname, used) = dns.name.from_wire(self.wire, self.current) - if self.message.origin is not None: - qname = qname.relativize(self.message.origin) - self.current = self.current + used - (rdtype, rdclass) = \ - struct.unpack('!HH', - self.wire[self.current:self.current + 4]) - self.current = self.current + 4 - self.message.find_rrset(self.message.question, qname, - rdclass, rdtype, create=True, - force_unique=True) - if self.updating: - self.zone_rdclass = rdclass - - def _get_section(self, section, count): - """Read the next I{count} records from the wire data and add them to - the specified section. - - section: the section of the message to which to add records - count: the number of records to read - """ - - if self.updating or self.one_rr_per_rrset: - force_unique = True - else: - force_unique = False - seen_opt = False - for i in xrange(0, count): - rr_start = self.current - (name, used) = dns.name.from_wire(self.wire, self.current) - absolute_name = name - if self.message.origin is not None: - name = name.relativize(self.message.origin) - self.current = self.current + used - (rdtype, rdclass, ttl, rdlen) = \ - struct.unpack('!HHIH', - self.wire[self.current:self.current + 10]) - self.current = self.current + 10 - if rdtype == dns.rdatatype.OPT: - if section is not self.message.additional or seen_opt: - raise BadEDNS - self.message.payload = rdclass - self.message.ednsflags = ttl - self.message.edns = (ttl & 0xff0000) >> 16 - self.message.options = [] - current = self.current - optslen = rdlen - while optslen > 0: - (otype, olen) = \ - struct.unpack('!HH', - self.wire[current:current + 4]) - current = current + 4 - opt = dns.edns.option_from_wire( - otype, self.wire, current, olen) - self.message.options.append(opt) - current = current + olen - optslen = optslen - 4 - olen - seen_opt = True - elif rdtype == dns.rdatatype.TSIG: - if not (section is self.message.additional and - i == (count - 1)): - raise BadTSIG - if self.message.keyring is None: - raise UnknownTSIGKey('got signed message without keyring') - secret = self.message.keyring.get(absolute_name) - if secret is None: - raise UnknownTSIGKey("key '%s' unknown" % name) - self.message.keyname = absolute_name - (self.message.keyalgorithm, self.message.mac) = \ - dns.tsig.get_algorithm_and_mac(self.wire, self.current, - rdlen) - self.message.tsig_ctx = \ - dns.tsig.validate(self.wire, - absolute_name, - secret, - int(time.time()), - self.message.request_mac, - rr_start, - self.current, - rdlen, - self.message.tsig_ctx, - self.message.multi, - self.message.first) - self.message.had_tsig = True - else: - if ttl < 0: - ttl = 0 - if self.updating and \ - (rdclass == dns.rdataclass.ANY or - rdclass == dns.rdataclass.NONE): - deleting = rdclass - rdclass = self.zone_rdclass - else: - deleting = None - if deleting == dns.rdataclass.ANY or \ - (deleting == dns.rdataclass.NONE and - section is self.message.answer): - covers = dns.rdatatype.NONE - rd = None - else: - rd = dns.rdata.from_wire(rdclass, rdtype, self.wire, - self.current, rdlen, - self.message.origin) - covers = rd.covers() - if self.message.xfr and rdtype == dns.rdatatype.SOA: - force_unique = True - rrset = self.message.find_rrset(section, name, - rdclass, rdtype, covers, - deleting, True, force_unique) - if rd is not None: - rrset.add(rd, ttl) - self.current = self.current + rdlen - - def read(self): - """Read a wire format DNS message and build a dns.message.Message - object.""" - - l = len(self.wire) - if l < 12: - raise ShortHeader - (self.message.id, self.message.flags, qcount, ancount, - aucount, adcount) = struct.unpack('!HHHHHH', self.wire[:12]) - self.current = 12 - if dns.opcode.is_update(self.message.flags): - self.updating = True - self._get_question(qcount) - if self.question_only: - return - self._get_section(self.message.answer, ancount) - self._get_section(self.message.authority, aucount) - self._get_section(self.message.additional, adcount) - if not self.ignore_trailing and self.current != l: - raise TrailingJunk - if self.message.multi and self.message.tsig_ctx and \ - not self.message.had_tsig: - self.message.tsig_ctx.update(self.wire) - - -def from_wire(wire, keyring=None, request_mac=b'', xfr=False, origin=None, - tsig_ctx=None, multi=False, first=True, - question_only=False, one_rr_per_rrset=False, - ignore_trailing=False): - """Convert a DNS wire format message into a message - object. - - *keyring*, a ``dict``, the keyring to use if the message is signed. - - *request_mac*, a ``binary``. If the message is a response to a - TSIG-signed request, *request_mac* should be set to the MAC of - that request. - - *xfr*, a ``bool``, should be set to ``True`` if this message is part of - a zone transfer. - - *origin*, a ``dns.name.Name`` or ``None``. If the message is part - of a zone transfer, *origin* should be the origin name of the - zone. - - *tsig_ctx*, a ``hmac.HMAC`` objext, the ongoing TSIG context, used - when validating zone transfers. - - *multi*, a ``bool``, should be set to ``True`` if this message - part of a multiple message sequence. - - *first*, a ``bool``, should be set to ``True`` if this message is - stand-alone, or the first message in a multi-message sequence. - - *question_only*, a ``bool``. If ``True``, read only up to - the end of the question section. - - *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its - own RRset. - - *ignore_trailing*, a ``bool``. If ``True``, ignore trailing - junk at end of the message. - - Raises ``dns.message.ShortHeader`` if the message is less than 12 octets - long. - - Raises ``dns.messaage.TrailingJunk`` if there were octets in the message - past the end of the proper DNS message, and *ignore_trailing* is ``False``. - - Raises ``dns.message.BadEDNS`` if an OPT record was in the - wrong section, or occurred more than once. - - Raises ``dns.message.BadTSIG`` if a TSIG record was not the last - record of the additional data section. - - Returns a ``dns.message.Message``. - """ - - m = Message(id=0) - m.keyring = keyring - m.request_mac = request_mac - m.xfr = xfr - m.origin = origin - m.tsig_ctx = tsig_ctx - m.multi = multi - m.first = first - - reader = _WireReader(wire, m, question_only, one_rr_per_rrset, - ignore_trailing) - reader.read() - - return m - - -class _TextReader(object): - - """Text format reader. - - tok: the tokenizer. - message: The message object being built. - updating: Is the message a dynamic update? - zone_rdclass: The class of the zone in messages which are - DNS dynamic updates. - last_name: The most recently read name when building a message object. - """ - - def __init__(self, text, message): - self.message = message - self.tok = dns.tokenizer.Tokenizer(text) - self.last_name = None - self.zone_rdclass = dns.rdataclass.IN - self.updating = False - - def _header_line(self, section): - """Process one line from the text format header section.""" - - token = self.tok.get() - what = token.value - if what == 'id': - self.message.id = self.tok.get_int() - elif what == 'flags': - while True: - token = self.tok.get() - if not token.is_identifier(): - self.tok.unget(token) - break - self.message.flags = self.message.flags | \ - dns.flags.from_text(token.value) - if dns.opcode.is_update(self.message.flags): - self.updating = True - elif what == 'edns': - self.message.edns = self.tok.get_int() - self.message.ednsflags = self.message.ednsflags | \ - (self.message.edns << 16) - elif what == 'eflags': - if self.message.edns < 0: - self.message.edns = 0 - while True: - token = self.tok.get() - if not token.is_identifier(): - self.tok.unget(token) - break - self.message.ednsflags = self.message.ednsflags | \ - dns.flags.edns_from_text(token.value) - elif what == 'payload': - self.message.payload = self.tok.get_int() - if self.message.edns < 0: - self.message.edns = 0 - elif what == 'opcode': - text = self.tok.get_string() - self.message.flags = self.message.flags | \ - dns.opcode.to_flags(dns.opcode.from_text(text)) - elif what == 'rcode': - text = self.tok.get_string() - self.message.set_rcode(dns.rcode.from_text(text)) - else: - raise UnknownHeaderField - self.tok.get_eol() - - def _question_line(self, section): - """Process one line from the text format question section.""" - - token = self.tok.get(want_leading=True) - if not token.is_whitespace(): - self.last_name = dns.name.from_text(token.value, None) - name = self.last_name - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - # Class - try: - rdclass = dns.rdataclass.from_text(token.value) - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - except dns.exception.SyntaxError: - raise dns.exception.SyntaxError - except Exception: - rdclass = dns.rdataclass.IN - # Type - rdtype = dns.rdatatype.from_text(token.value) - self.message.find_rrset(self.message.question, name, - rdclass, rdtype, create=True, - force_unique=True) - if self.updating: - self.zone_rdclass = rdclass - self.tok.get_eol() - - def _rr_line(self, section): - """Process one line from the text format answer, authority, or - additional data sections. - """ - - deleting = None - # Name - token = self.tok.get(want_leading=True) - if not token.is_whitespace(): - self.last_name = dns.name.from_text(token.value, None) - name = self.last_name - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - # TTL - try: - ttl = int(token.value, 0) - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - except dns.exception.SyntaxError: - raise dns.exception.SyntaxError - except Exception: - ttl = 0 - # Class - try: - rdclass = dns.rdataclass.from_text(token.value) - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE: - deleting = rdclass - rdclass = self.zone_rdclass - except dns.exception.SyntaxError: - raise dns.exception.SyntaxError - except Exception: - rdclass = dns.rdataclass.IN - # Type - rdtype = dns.rdatatype.from_text(token.value) - token = self.tok.get() - if not token.is_eol_or_eof(): - self.tok.unget(token) - rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None) - covers = rd.covers() - else: - rd = None - covers = dns.rdatatype.NONE - rrset = self.message.find_rrset(section, name, - rdclass, rdtype, covers, - deleting, True, self.updating) - if rd is not None: - rrset.add(rd, ttl) - - def read(self): - """Read a text format DNS message and build a dns.message.Message - object.""" - - line_method = self._header_line - section = None - while 1: - token = self.tok.get(True, True) - if token.is_eol_or_eof(): - break - if token.is_comment(): - u = token.value.upper() - if u == 'HEADER': - line_method = self._header_line - elif u == 'QUESTION' or u == 'ZONE': - line_method = self._question_line - section = self.message.question - elif u == 'ANSWER' or u == 'PREREQ': - line_method = self._rr_line - section = self.message.answer - elif u == 'AUTHORITY' or u == 'UPDATE': - line_method = self._rr_line - section = self.message.authority - elif u == 'ADDITIONAL': - line_method = self._rr_line - section = self.message.additional - self.tok.get_eol() - continue - self.tok.unget(token) - line_method(section) - - -def from_text(text): - """Convert the text format message into a message object. - - *text*, a ``text``, the text format message. - - Raises ``dns.message.UnknownHeaderField`` if a header is unknown. - - Raises ``dns.exception.SyntaxError`` if the text is badly formed. - - Returns a ``dns.message.Message object`` - """ - - # 'text' can also be a file, but we don't publish that fact - # since it's an implementation detail. The official file - # interface is from_file(). - - m = Message() - - reader = _TextReader(text, m) - reader.read() - - return m - - -def from_file(f): - """Read the next text format message from the specified file. - - *f*, a ``file`` or ``text``. If *f* is text, it is treated as the - pathname of a file to open. - - Raises ``dns.message.UnknownHeaderField`` if a header is unknown. - - Raises ``dns.exception.SyntaxError`` if the text is badly formed. - - Returns a ``dns.message.Message object`` - """ - - str_type = string_types - opts = 'rU' - - if isinstance(f, str_type): - f = open(f, opts) - want_close = True - else: - want_close = False - - try: - m = from_text(f) - finally: - if want_close: - f.close() - return m - - -def make_query(qname, rdtype, rdclass=dns.rdataclass.IN, use_edns=None, - want_dnssec=False, ednsflags=None, payload=None, - request_payload=None, options=None): - """Make a query message. - - The query name, type, and class may all be specified either - as objects of the appropriate type, or as strings. - - The query will have a randomly chosen query id, and its DNS flags - will be set to dns.flags.RD. - - qname, a ``dns.name.Name`` or ``text``, the query name. - - *rdtype*, an ``int`` or ``text``, the desired rdata type. - - *rdclass*, an ``int`` or ``text``, the desired rdata class; the default - is class IN. - - *use_edns*, an ``int``, ``bool`` or ``None``. The EDNS level to use; the - default is None (no EDNS). - See the description of dns.message.Message.use_edns() for the possible - values for use_edns and their meanings. - - *want_dnssec*, a ``bool``. If ``True``, DNSSEC data is desired. - - *ednsflags*, an ``int``, the EDNS flag values. - - *payload*, an ``int``, is the EDNS sender's payload field, which is the - maximum size of UDP datagram the sender can handle. I.e. how big - a response to this message can be. - - *request_payload*, an ``int``, is the EDNS payload size to use when - sending this message. If not specified, defaults to the value of - *payload*. - - *options*, a list of ``dns.edns.Option`` objects or ``None``, the EDNS - options. - - Returns a ``dns.message.Message`` - """ - - if isinstance(qname, string_types): - qname = dns.name.from_text(qname) - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - if isinstance(rdclass, string_types): - rdclass = dns.rdataclass.from_text(rdclass) - m = Message() - m.flags |= dns.flags.RD - m.find_rrset(m.question, qname, rdclass, rdtype, create=True, - force_unique=True) - # only pass keywords on to use_edns if they have been set to a - # non-None value. Setting a field will turn EDNS on if it hasn't - # been configured. - kwargs = {} - if ednsflags is not None: - kwargs['ednsflags'] = ednsflags - if use_edns is None: - use_edns = 0 - if payload is not None: - kwargs['payload'] = payload - if use_edns is None: - use_edns = 0 - if request_payload is not None: - kwargs['request_payload'] = request_payload - if use_edns is None: - use_edns = 0 - if options is not None: - kwargs['options'] = options - if use_edns is None: - use_edns = 0 - kwargs['edns'] = use_edns - m.use_edns(**kwargs) - m.want_dnssec(want_dnssec) - return m - - -def make_response(query, recursion_available=False, our_payload=8192, - fudge=300): - """Make a message which is a response for the specified query. - The message returned is really a response skeleton; it has all - of the infrastructure required of a response, but none of the - content. - - The response's question section is a shallow copy of the query's - question section, so the query's question RRsets should not be - changed. - - *query*, a ``dns.message.Message``, the query to respond to. - - *recursion_available*, a ``bool``, should RA be set in the response? - - *our_payload*, an ``int``, the payload size to advertise in EDNS - responses. - - *fudge*, an ``int``, the TSIG time fudge. - - Returns a ``dns.message.Message`` object. - """ - - if query.flags & dns.flags.QR: - raise dns.exception.FormError('specified query message is not a query') - response = dns.message.Message(query.id) - response.flags = dns.flags.QR | (query.flags & dns.flags.RD) - if recursion_available: - response.flags |= dns.flags.RA - response.set_opcode(query.opcode()) - response.question = list(query.question) - if query.edns >= 0: - response.use_edns(0, 0, our_payload, query.payload) - if query.had_tsig: - response.use_tsig(query.keyring, query.keyname, fudge, None, 0, b'', - query.keyalgorithm) - response.request_mac = query.mac - return response diff --git a/lib/dns/message.pyi b/lib/dns/message.pyi deleted file mode 100644 index ed99b3c0..00000000 --- a/lib/dns/message.pyi +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Optional, Dict, List, Tuple, Union -from . import name, rrset, tsig, rdatatype, entropy, edns, rdataclass -import hmac - -class Message: - def to_wire(self, origin : Optional[name.Name]=None, max_size=0, **kw) -> bytes: - ... - def find_rrset(self, section : List[rrset.RRset], name : name.Name, rdclass : int, rdtype : int, - covers=rdatatype.NONE, deleting : Optional[int]=None, create=False, - force_unique=False) -> rrset.RRset: - ... - def __init__(self, id : Optional[int] =None) -> None: - self.id : int - self.flags = 0 - self.question : List[rrset.RRset] = [] - self.answer : List[rrset.RRset] = [] - self.authority : List[rrset.RRset] = [] - self.additional : List[rrset.RRset] = [] - self.edns = -1 - self.ednsflags = 0 - self.payload = 0 - self.options : List[edns.Option] = [] - self.request_payload = 0 - self.keyring = None - self.keyname = None - self.keyalgorithm = tsig.default_algorithm - self.request_mac = b'' - self.other_data = b'' - self.tsig_error = 0 - self.fudge = 300 - self.original_id = self.id - self.mac = b'' - self.xfr = False - self.origin = None - self.tsig_ctx = None - self.had_tsig = False - self.multi = False - self.first = True - self.index : Dict[Tuple[rrset.RRset, name.Name, int, int, Union[int,str], int], rrset.RRset] = {} -def from_text(a : str) -> Message: - ... - -def from_wire(wire, keyring : Optional[Dict[name.Name,bytes]] = None, request_mac = b'', xfr=False, origin=None, - tsig_ctx : Optional[hmac.HMAC] = None, multi=False, first=True, - question_only=False, one_rr_per_rrset=False, - ignore_trailing=False) -> Message: - ... -def make_response(query : Message, recursion_available=False, our_payload=8192, - fudge=300) -> Message: - ... - -def make_query(qname : Union[name.Name,str], rdtype : Union[str,int], rdclass : Union[int,str] =rdataclass.IN, use_edns : Optional[bool] = None, - want_dnssec=False, ednsflags : Optional[int] = None, payload : Optional[int] = None, - request_payload : Optional[int] = None, options : Optional[List[edns.Option]] = None) -> Message: - ... diff --git a/lib/dns/name.py b/lib/dns/name.py deleted file mode 100644 index 0bcfd834..00000000 --- a/lib/dns/name.py +++ /dev/null @@ -1,994 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Names. -""" - -from io import BytesIO -import struct -import sys -import copy -import encodings.idna -try: - import idna - have_idna_2008 = True -except ImportError: - have_idna_2008 = False - -import dns.exception -import dns.wiredata - -from ._compat import long, binary_type, text_type, unichr, maybe_decode - -try: - maxint = sys.maxint # pylint: disable=sys-max-int -except AttributeError: - maxint = (1 << (8 * struct.calcsize("P"))) // 2 - 1 - - -# fullcompare() result values - -#: The compared names have no relationship to each other. -NAMERELN_NONE = 0 -#: the first name is a superdomain of the second. -NAMERELN_SUPERDOMAIN = 1 -#: The first name is a subdomain of the second. -NAMERELN_SUBDOMAIN = 2 -#: The compared names are equal. -NAMERELN_EQUAL = 3 -#: The compared names have a common ancestor. -NAMERELN_COMMONANCESTOR = 4 - - -class EmptyLabel(dns.exception.SyntaxError): - """A DNS label is empty.""" - - -class BadEscape(dns.exception.SyntaxError): - """An escaped code in a text format of DNS name is invalid.""" - - -class BadPointer(dns.exception.FormError): - """A DNS compression pointer points forward instead of backward.""" - - -class BadLabelType(dns.exception.FormError): - """The label type in DNS name wire format is unknown.""" - - -class NeedAbsoluteNameOrOrigin(dns.exception.DNSException): - """An attempt was made to convert a non-absolute name to - wire when there was also a non-absolute (or missing) origin.""" - - -class NameTooLong(dns.exception.FormError): - """A DNS name is > 255 octets long.""" - - -class LabelTooLong(dns.exception.SyntaxError): - """A DNS label is > 63 octets long.""" - - -class AbsoluteConcatenation(dns.exception.DNSException): - """An attempt was made to append anything other than the - empty name to an absolute DNS name.""" - - -class NoParent(dns.exception.DNSException): - """An attempt was made to get the parent of the root name - or the empty name.""" - -class NoIDNA2008(dns.exception.DNSException): - """IDNA 2008 processing was requested but the idna module is not - available.""" - - -class IDNAException(dns.exception.DNSException): - """IDNA processing raised an exception.""" - - supp_kwargs = {'idna_exception'} - fmt = "IDNA processing exception: {idna_exception}" - - -class IDNACodec(object): - """Abstract base class for IDNA encoder/decoders.""" - - def __init__(self): - pass - - def encode(self, label): - raise NotImplementedError - - def decode(self, label): - # We do not apply any IDNA policy on decode; we just - downcased = label.lower() - if downcased.startswith(b'xn--'): - try: - label = downcased[4:].decode('punycode') - except Exception as e: - raise IDNAException(idna_exception=e) - else: - label = maybe_decode(label) - return _escapify(label, True) - - -class IDNA2003Codec(IDNACodec): - """IDNA 2003 encoder/decoder.""" - - def __init__(self, strict_decode=False): - """Initialize the IDNA 2003 encoder/decoder. - - *strict_decode* is a ``bool``. If `True`, then IDNA2003 checking - is done when decoding. This can cause failures if the name - was encoded with IDNA2008. The default is `False`. - """ - - super(IDNA2003Codec, self).__init__() - self.strict_decode = strict_decode - - def encode(self, label): - """Encode *label*.""" - - if label == '': - return b'' - try: - return encodings.idna.ToASCII(label) - except UnicodeError: - raise LabelTooLong - - def decode(self, label): - """Decode *label*.""" - if not self.strict_decode: - return super(IDNA2003Codec, self).decode(label) - if label == b'': - return u'' - try: - return _escapify(encodings.idna.ToUnicode(label), True) - except Exception as e: - raise IDNAException(idna_exception=e) - - -class IDNA2008Codec(IDNACodec): - """IDNA 2008 encoder/decoder. - - *uts_46* is a ``bool``. If True, apply Unicode IDNA - compatibility processing as described in Unicode Technical - Standard #46 (http://unicode.org/reports/tr46/). - If False, do not apply the mapping. The default is False. - - *transitional* is a ``bool``: If True, use the - "transitional" mode described in Unicode Technical Standard - #46. The default is False. - - *allow_pure_ascii* is a ``bool``. If True, then a label which - consists of only ASCII characters is allowed. This is less - strict than regular IDNA 2008, but is also necessary for mixed - names, e.g. a name with starting with "_sip._tcp." and ending - in an IDN suffix which would otherwise be disallowed. The - default is False. - - *strict_decode* is a ``bool``: If True, then IDNA2008 checking - is done when decoding. This can cause failures if the name - was encoded with IDNA2003. The default is False. - """ - - def __init__(self, uts_46=False, transitional=False, - allow_pure_ascii=False, strict_decode=False): - """Initialize the IDNA 2008 encoder/decoder.""" - super(IDNA2008Codec, self).__init__() - self.uts_46 = uts_46 - self.transitional = transitional - self.allow_pure_ascii = allow_pure_ascii - self.strict_decode = strict_decode - - def is_all_ascii(self, label): - for c in label: - if ord(c) > 0x7f: - return False - return True - - def encode(self, label): - if label == '': - return b'' - if self.allow_pure_ascii and self.is_all_ascii(label): - return label.encode('ascii') - if not have_idna_2008: - raise NoIDNA2008 - try: - if self.uts_46: - label = idna.uts46_remap(label, False, self.transitional) - return idna.alabel(label) - except idna.IDNAError as e: - raise IDNAException(idna_exception=e) - - def decode(self, label): - if not self.strict_decode: - return super(IDNA2008Codec, self).decode(label) - if label == b'': - return u'' - if not have_idna_2008: - raise NoIDNA2008 - try: - if self.uts_46: - label = idna.uts46_remap(label, False, False) - return _escapify(idna.ulabel(label), True) - except idna.IDNAError as e: - raise IDNAException(idna_exception=e) - -_escaped = bytearray(b'"().;\\@$') - -IDNA_2003_Practical = IDNA2003Codec(False) -IDNA_2003_Strict = IDNA2003Codec(True) -IDNA_2003 = IDNA_2003_Practical -IDNA_2008_Practical = IDNA2008Codec(True, False, True, False) -IDNA_2008_UTS_46 = IDNA2008Codec(True, False, False, False) -IDNA_2008_Strict = IDNA2008Codec(False, False, False, True) -IDNA_2008_Transitional = IDNA2008Codec(True, True, False, False) -IDNA_2008 = IDNA_2008_Practical - -def _escapify(label, unicode_mode=False): - """Escape the characters in label which need it. - @param unicode_mode: escapify only special and whitespace (<= 0x20) - characters - @returns: the escaped string - @rtype: string""" - if not unicode_mode: - text = '' - if isinstance(label, text_type): - label = label.encode() - for c in bytearray(label): - if c in _escaped: - text += '\\' + chr(c) - elif c > 0x20 and c < 0x7F: - text += chr(c) - else: - text += '\\%03d' % c - return text.encode() - - text = u'' - if isinstance(label, binary_type): - label = label.decode() - for c in label: - if c > u'\x20' and c < u'\x7f': - text += c - else: - if c >= u'\x7f': - text += c - else: - text += u'\\%03d' % ord(c) - return text - -def _validate_labels(labels): - """Check for empty labels in the middle of a label sequence, - labels that are too long, and for too many labels. - - Raises ``dns.name.NameTooLong`` if the name as a whole is too long. - - Raises ``dns.name.EmptyLabel`` if a label is empty (i.e. the root - label) and appears in a position other than the end of the label - sequence - - """ - - l = len(labels) - total = 0 - i = -1 - j = 0 - for label in labels: - ll = len(label) - total += ll + 1 - if ll > 63: - raise LabelTooLong - if i < 0 and label == b'': - i = j - j += 1 - if total > 255: - raise NameTooLong - if i >= 0 and i != l - 1: - raise EmptyLabel - - -def _maybe_convert_to_binary(label): - """If label is ``text``, convert it to ``binary``. If it is already - ``binary`` just return it. - - """ - - if isinstance(label, binary_type): - return label - if isinstance(label, text_type): - return label.encode() - raise ValueError - - -class Name(object): - - """A DNS name. - - The dns.name.Name class represents a DNS name as a tuple of - labels. Each label is a `binary` in DNS wire format. Instances - of the class are immutable. - """ - - __slots__ = ['labels'] - - def __init__(self, labels): - """*labels* is any iterable whose values are ``text`` or ``binary``. - """ - - labels = [_maybe_convert_to_binary(x) for x in labels] - super(Name, self).__setattr__('labels', tuple(labels)) - _validate_labels(self.labels) - - def __setattr__(self, name, value): - # Names are immutable - raise TypeError("object doesn't support attribute assignment") - - def __copy__(self): - return Name(self.labels) - - def __deepcopy__(self, memo): - return Name(copy.deepcopy(self.labels, memo)) - - def __getstate__(self): - # Names can be pickled - return {'labels': self.labels} - - def __setstate__(self, state): - super(Name, self).__setattr__('labels', state['labels']) - _validate_labels(self.labels) - - def is_absolute(self): - """Is the most significant label of this name the root label? - - Returns a ``bool``. - """ - - return len(self.labels) > 0 and self.labels[-1] == b'' - - def is_wild(self): - """Is this name wild? (I.e. Is the least significant label '*'?) - - Returns a ``bool``. - """ - - return len(self.labels) > 0 and self.labels[0] == b'*' - - def __hash__(self): - """Return a case-insensitive hash of the name. - - Returns an ``int``. - """ - - h = long(0) - for label in self.labels: - for c in bytearray(label.lower()): - h += (h << 3) + c - return int(h % maxint) - - def fullcompare(self, other): - """Compare two names, returning a 3-tuple - ``(relation, order, nlabels)``. - - *relation* describes the relation ship between the names, - and is one of: ``dns.name.NAMERELN_NONE``, - ``dns.name.NAMERELN_SUPERDOMAIN``, ``dns.name.NAMERELN_SUBDOMAIN``, - ``dns.name.NAMERELN_EQUAL``, or ``dns.name.NAMERELN_COMMONANCESTOR``. - - *order* is < 0 if *self* < *other*, > 0 if *self* > *other*, and == - 0 if *self* == *other*. A relative name is always less than an - absolute name. If both names have the same relativity, then - the DNSSEC order relation is used to order them. - - *nlabels* is the number of significant labels that the two names - have in common. - - Here are some examples. Names ending in "." are absolute names, - those not ending in "." are relative names. - - ============= ============= =========== ===== ======= - self other relation order nlabels - ============= ============= =========== ===== ======= - www.example. www.example. equal 0 3 - www.example. example. subdomain > 0 2 - example. www.example. superdomain < 0 2 - example1.com. example2.com. common anc. < 0 2 - example1 example2. none < 0 0 - example1. example2 none > 0 0 - ============= ============= =========== ===== ======= - """ - - sabs = self.is_absolute() - oabs = other.is_absolute() - if sabs != oabs: - if sabs: - return (NAMERELN_NONE, 1, 0) - else: - return (NAMERELN_NONE, -1, 0) - l1 = len(self.labels) - l2 = len(other.labels) - ldiff = l1 - l2 - if ldiff < 0: - l = l1 - else: - l = l2 - - order = 0 - nlabels = 0 - namereln = NAMERELN_NONE - while l > 0: - l -= 1 - l1 -= 1 - l2 -= 1 - label1 = self.labels[l1].lower() - label2 = other.labels[l2].lower() - if label1 < label2: - order = -1 - if nlabels > 0: - namereln = NAMERELN_COMMONANCESTOR - return (namereln, order, nlabels) - elif label1 > label2: - order = 1 - if nlabels > 0: - namereln = NAMERELN_COMMONANCESTOR - return (namereln, order, nlabels) - nlabels += 1 - order = ldiff - if ldiff < 0: - namereln = NAMERELN_SUPERDOMAIN - elif ldiff > 0: - namereln = NAMERELN_SUBDOMAIN - else: - namereln = NAMERELN_EQUAL - return (namereln, order, nlabels) - - def is_subdomain(self, other): - """Is self a subdomain of other? - - Note that the notion of subdomain includes equality, e.g. - "dnpython.org" is a subdomain of itself. - - Returns a ``bool``. - """ - - (nr, o, nl) = self.fullcompare(other) - if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL: - return True - return False - - def is_superdomain(self, other): - """Is self a superdomain of other? - - Note that the notion of superdomain includes equality, e.g. - "dnpython.org" is a superdomain of itself. - - Returns a ``bool``. - """ - - (nr, o, nl) = self.fullcompare(other) - if nr == NAMERELN_SUPERDOMAIN or nr == NAMERELN_EQUAL: - return True - return False - - def canonicalize(self): - """Return a name which is equal to the current name, but is in - DNSSEC canonical form. - """ - - return Name([x.lower() for x in self.labels]) - - def __eq__(self, other): - if isinstance(other, Name): - return self.fullcompare(other)[1] == 0 - else: - return False - - def __ne__(self, other): - if isinstance(other, Name): - return self.fullcompare(other)[1] != 0 - else: - return True - - def __lt__(self, other): - if isinstance(other, Name): - return self.fullcompare(other)[1] < 0 - else: - return NotImplemented - - def __le__(self, other): - if isinstance(other, Name): - return self.fullcompare(other)[1] <= 0 - else: - return NotImplemented - - def __ge__(self, other): - if isinstance(other, Name): - return self.fullcompare(other)[1] >= 0 - else: - return NotImplemented - - def __gt__(self, other): - if isinstance(other, Name): - return self.fullcompare(other)[1] > 0 - else: - return NotImplemented - - def __repr__(self): - return '<DNS name ' + self.__str__() + '>' - - def __str__(self): - return self.to_text(False) - - def to_text(self, omit_final_dot=False): - """Convert name to DNS text format. - - *omit_final_dot* is a ``bool``. If True, don't emit the final - dot (denoting the root label) for absolute names. The default - is False. - - Returns a ``text``. - """ - - if len(self.labels) == 0: - return maybe_decode(b'@') - if len(self.labels) == 1 and self.labels[0] == b'': - return maybe_decode(b'.') - if omit_final_dot and self.is_absolute(): - l = self.labels[:-1] - else: - l = self.labels - s = b'.'.join(map(_escapify, l)) - return maybe_decode(s) - - def to_unicode(self, omit_final_dot=False, idna_codec=None): - """Convert name to Unicode text format. - - IDN ACE labels are converted to Unicode. - - *omit_final_dot* is a ``bool``. If True, don't emit the final - dot (denoting the root label) for absolute names. The default - is False. - *idna_codec* specifies the IDNA encoder/decoder. If None, the - dns.name.IDNA_2003_Practical encoder/decoder is used. - The IDNA_2003_Practical decoder does - not impose any policy, it just decodes punycode, so if you - don't want checking for compliance, you can use this decoder - for IDNA2008 as well. - - Returns a ``text``. - """ - - if len(self.labels) == 0: - return u'@' - if len(self.labels) == 1 and self.labels[0] == b'': - return u'.' - if omit_final_dot and self.is_absolute(): - l = self.labels[:-1] - else: - l = self.labels - if idna_codec is None: - idna_codec = IDNA_2003_Practical - return u'.'.join([idna_codec.decode(x) for x in l]) - - def to_digestable(self, origin=None): - """Convert name to a format suitable for digesting in hashes. - - The name is canonicalized and converted to uncompressed wire - format. All names in wire format are absolute. If the name - is a relative name, then an origin must be supplied. - - *origin* is a ``dns.name.Name`` or ``None``. If the name is - relative and origin is not ``None``, then origin will be appended - to the name. - - Raises ``dns.name.NeedAbsoluteNameOrOrigin`` if the name is - relative and no origin was provided. - - Returns a ``binary``. - """ - - if not self.is_absolute(): - if origin is None or not origin.is_absolute(): - raise NeedAbsoluteNameOrOrigin - labels = list(self.labels) - labels.extend(list(origin.labels)) - else: - labels = self.labels - dlabels = [struct.pack('!B%ds' % len(x), len(x), x.lower()) - for x in labels] - return b''.join(dlabels) - - def to_wire(self, file=None, compress=None, origin=None): - """Convert name to wire format, possibly compressing it. - - *file* is the file where the name is emitted (typically a - BytesIO file). If ``None`` (the default), a ``binary`` - containing the wire name will be returned. - - *compress*, a ``dict``, is the compression table to use. If - ``None`` (the default), names will not be compressed. - - *origin* is a ``dns.name.Name`` or ``None``. If the name is - relative and origin is not ``None``, then *origin* will be appended - to it. - - Raises ``dns.name.NeedAbsoluteNameOrOrigin`` if the name is - relative and no origin was provided. - - Returns a ``binary`` or ``None``. - """ - - if file is None: - file = BytesIO() - want_return = True - else: - want_return = False - - if not self.is_absolute(): - if origin is None or not origin.is_absolute(): - raise NeedAbsoluteNameOrOrigin - labels = list(self.labels) - labels.extend(list(origin.labels)) - else: - labels = self.labels - i = 0 - for label in labels: - n = Name(labels[i:]) - i += 1 - if compress is not None: - pos = compress.get(n) - else: - pos = None - if pos is not None: - value = 0xc000 + pos - s = struct.pack('!H', value) - file.write(s) - break - else: - if compress is not None and len(n) > 1: - pos = file.tell() - if pos <= 0x3fff: - compress[n] = pos - l = len(label) - file.write(struct.pack('!B', l)) - if l > 0: - file.write(label) - if want_return: - return file.getvalue() - - def __len__(self): - """The length of the name (in labels). - - Returns an ``int``. - """ - - return len(self.labels) - - def __getitem__(self, index): - return self.labels[index] - - def __add__(self, other): - return self.concatenate(other) - - def __sub__(self, other): - return self.relativize(other) - - def split(self, depth): - """Split a name into a prefix and suffix names at the specified depth. - - *depth* is an ``int`` specifying the number of labels in the suffix - - Raises ``ValueError`` if *depth* was not >= 0 and <= the length of the - name. - - Returns the tuple ``(prefix, suffix)``. - """ - - l = len(self.labels) - if depth == 0: - return (self, dns.name.empty) - elif depth == l: - return (dns.name.empty, self) - elif depth < 0 or depth > l: - raise ValueError( - 'depth must be >= 0 and <= the length of the name') - return (Name(self[: -depth]), Name(self[-depth:])) - - def concatenate(self, other): - """Return a new name which is the concatenation of self and other. - - Raises ``dns.name.AbsoluteConcatenation`` if the name is - absolute and *other* is not the empty name. - - Returns a ``dns.name.Name``. - """ - - if self.is_absolute() and len(other) > 0: - raise AbsoluteConcatenation - labels = list(self.labels) - labels.extend(list(other.labels)) - return Name(labels) - - def relativize(self, origin): - """If the name is a subdomain of *origin*, return a new name which is - the name relative to origin. Otherwise return the name. - - For example, relativizing ``www.dnspython.org.`` to origin - ``dnspython.org.`` returns the name ``www``. Relativizing ``example.`` - to origin ``dnspython.org.`` returns ``example.``. - - Returns a ``dns.name.Name``. - """ - - if origin is not None and self.is_subdomain(origin): - return Name(self[: -len(origin)]) - else: - return self - - def derelativize(self, origin): - """If the name is a relative name, return a new name which is the - concatenation of the name and origin. Otherwise return the name. - - For example, derelativizing ``www`` to origin ``dnspython.org.`` - returns the name ``www.dnspython.org.``. Derelativizing ``example.`` - to origin ``dnspython.org.`` returns ``example.``. - - Returns a ``dns.name.Name``. - """ - - if not self.is_absolute(): - return self.concatenate(origin) - else: - return self - - def choose_relativity(self, origin=None, relativize=True): - """Return a name with the relativity desired by the caller. - - If *origin* is ``None``, then the name is returned. - Otherwise, if *relativize* is ``True`` the name is - relativized, and if *relativize* is ``False`` the name is - derelativized. - - Returns a ``dns.name.Name``. - """ - - if origin: - if relativize: - return self.relativize(origin) - else: - return self.derelativize(origin) - else: - return self - - def parent(self): - """Return the parent of the name. - - For example, the parent of ``www.dnspython.org.`` is ``dnspython.org``. - - Raises ``dns.name.NoParent`` if the name is either the root name or the - empty name, and thus has no parent. - - Returns a ``dns.name.Name``. - """ - - if self == root or self == empty: - raise NoParent - return Name(self.labels[1:]) - -#: The root name, '.' -root = Name([b'']) - -#: The empty name. -empty = Name([]) - -def from_unicode(text, origin=root, idna_codec=None): - """Convert unicode text into a Name object. - - Labels are encoded in IDN ACE form according to rules specified by - the IDNA codec. - - *text*, a ``text``, is the text to convert into a name. - - *origin*, a ``dns.name.Name``, specifies the origin to - append to non-absolute names. The default is the root name. - - *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA - encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder - is used. - - Returns a ``dns.name.Name``. - """ - - if not isinstance(text, text_type): - raise ValueError("input to from_unicode() must be a unicode string") - if not (origin is None or isinstance(origin, Name)): - raise ValueError("origin must be a Name or None") - labels = [] - label = u'' - escaping = False - edigits = 0 - total = 0 - if idna_codec is None: - idna_codec = IDNA_2003 - if text == u'@': - text = u'' - if text: - if text == u'.': - return Name([b'']) # no Unicode "u" on this constant! - for c in text: - if escaping: - if edigits == 0: - if c.isdigit(): - total = int(c) - edigits += 1 - else: - label += c - escaping = False - else: - if not c.isdigit(): - raise BadEscape - total *= 10 - total += int(c) - edigits += 1 - if edigits == 3: - escaping = False - label += unichr(total) - elif c in [u'.', u'\u3002', u'\uff0e', u'\uff61']: - if len(label) == 0: - raise EmptyLabel - labels.append(idna_codec.encode(label)) - label = u'' - elif c == u'\\': - escaping = True - edigits = 0 - total = 0 - else: - label += c - if escaping: - raise BadEscape - if len(label) > 0: - labels.append(idna_codec.encode(label)) - else: - labels.append(b'') - - if (len(labels) == 0 or labels[-1] != b'') and origin is not None: - labels.extend(list(origin.labels)) - return Name(labels) - - -def from_text(text, origin=root, idna_codec=None): - """Convert text into a Name object. - - *text*, a ``text``, is the text to convert into a name. - - *origin*, a ``dns.name.Name``, specifies the origin to - append to non-absolute names. The default is the root name. - - *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA - encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder - is used. - - Returns a ``dns.name.Name``. - """ - - if isinstance(text, text_type): - return from_unicode(text, origin, idna_codec) - if not isinstance(text, binary_type): - raise ValueError("input to from_text() must be a string") - if not (origin is None or isinstance(origin, Name)): - raise ValueError("origin must be a Name or None") - labels = [] - label = b'' - escaping = False - edigits = 0 - total = 0 - if text == b'@': - text = b'' - if text: - if text == b'.': - return Name([b'']) - for c in bytearray(text): - byte_ = struct.pack('!B', c) - if escaping: - if edigits == 0: - if byte_.isdigit(): - total = int(byte_) - edigits += 1 - else: - label += byte_ - escaping = False - else: - if not byte_.isdigit(): - raise BadEscape - total *= 10 - total += int(byte_) - edigits += 1 - if edigits == 3: - escaping = False - label += struct.pack('!B', total) - elif byte_ == b'.': - if len(label) == 0: - raise EmptyLabel - labels.append(label) - label = b'' - elif byte_ == b'\\': - escaping = True - edigits = 0 - total = 0 - else: - label += byte_ - if escaping: - raise BadEscape - if len(label) > 0: - labels.append(label) - else: - labels.append(b'') - if (len(labels) == 0 or labels[-1] != b'') and origin is not None: - labels.extend(list(origin.labels)) - return Name(labels) - - -def from_wire(message, current): - """Convert possibly compressed wire format into a Name. - - *message* is a ``binary`` containing an entire DNS message in DNS - wire form. - - *current*, an ``int``, is the offset of the beginning of the name - from the start of the message - - Raises ``dns.name.BadPointer`` if a compression pointer did not - point backwards in the message. - - Raises ``dns.name.BadLabelType`` if an invalid label type was encountered. - - Returns a ``(dns.name.Name, int)`` tuple consisting of the name - that was read and the number of bytes of the wire format message - which were consumed reading it. - """ - - if not isinstance(message, binary_type): - raise ValueError("input to from_wire() must be a byte string") - message = dns.wiredata.maybe_wrap(message) - labels = [] - biggest_pointer = current - hops = 0 - count = message[current] - current += 1 - cused = 1 - while count != 0: - if count < 64: - labels.append(message[current: current + count].unwrap()) - current += count - if hops == 0: - cused += count - elif count >= 192: - current = (count & 0x3f) * 256 + message[current] - if hops == 0: - cused += 1 - if current >= biggest_pointer: - raise BadPointer - biggest_pointer = current - hops += 1 - else: - raise BadLabelType - count = message[current] - current += 1 - if hops == 0: - cused += 1 - labels.append('') - return (Name(labels), cused) diff --git a/lib/dns/name.pyi b/lib/dns/name.pyi deleted file mode 100644 index 5a8061b2..00000000 --- a/lib/dns/name.pyi +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Optional, Union, Tuple, Iterable, List - -class Name: - def is_subdomain(self, o : Name) -> bool: ... - def is_superdomain(self, o : Name) -> bool: ... - def __init__(self, labels : Iterable[Union[bytes,str]]) -> None: - self.labels : List[bytes] - def is_absolute(self) -> bool: ... - def is_wild(self) -> bool: ... - def fullcompare(self, other) -> Tuple[int,int,int]: ... - def canonicalize(self) -> Name: ... - def __lt__(self, other : Name): ... - def __le__(self, other : Name): ... - def __ge__(self, other : Name): ... - def __gt__(self, other : Name): ... - def to_text(self, omit_final_dot=False) -> str: ... - def to_unicode(self, omit_final_dot=False, idna_codec=None) -> str: ... - def to_digestable(self, origin=None) -> bytes: ... - def to_wire(self, file=None, compress=None, origin=None) -> Optional[bytes]: ... - def __add__(self, other : Name): ... - def __sub__(self, other : Name): ... - def split(self, depth) -> List[Tuple[str,str]]: ... - def concatenate(self, other : Name) -> Name: ... - def relativize(self, origin): ... - def derelativize(self, origin): ... - def choose_relativity(self, origin : Optional[Name] = None, relativize=True): ... - def parent(self) -> Name: ... - -class IDNACodec: - pass - -def from_text(text, origin : Optional[Name] = Name('.'), idna_codec : Optional[IDNACodec] = None) -> Name: - ... - -empty : Name diff --git a/lib/dns/namedict.py b/lib/dns/namedict.py deleted file mode 100644 index 37a13104..00000000 --- a/lib/dns/namedict.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# Copyright (C) 2016 Coresec Systems AB -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND CORESEC SYSTEMS AB DISCLAIMS ALL -# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL CORESEC -# SYSTEMS AB BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, -# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION -# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS name dictionary""" - -import collections -import dns.name -from ._compat import xrange - - -class NameDict(collections.MutableMapping): - """A dictionary whose keys are dns.name.Name objects. - - In addition to being like a regular Python dictionary, this - dictionary can also get the deepest match for a given key. - """ - - __slots__ = ["max_depth", "max_depth_items", "__store"] - - def __init__(self, *args, **kwargs): - super(NameDict, self).__init__() - self.__store = dict() - #: the maximum depth of the keys that have ever been added - self.max_depth = 0 - #: the number of items of maximum depth - self.max_depth_items = 0 - self.update(dict(*args, **kwargs)) - - def __update_max_depth(self, key): - if len(key) == self.max_depth: - self.max_depth_items = self.max_depth_items + 1 - elif len(key) > self.max_depth: - self.max_depth = len(key) - self.max_depth_items = 1 - - def __getitem__(self, key): - return self.__store[key] - - def __setitem__(self, key, value): - if not isinstance(key, dns.name.Name): - raise ValueError('NameDict key must be a name') - self.__store[key] = value - self.__update_max_depth(key) - - def __delitem__(self, key): - value = self.__store.pop(key) - if len(value) == self.max_depth: - self.max_depth_items = self.max_depth_items - 1 - if self.max_depth_items == 0: - self.max_depth = 0 - for k in self.__store: - self.__update_max_depth(k) - - def __iter__(self): - return iter(self.__store) - - def __len__(self): - return len(self.__store) - - def has_key(self, key): - return key in self.__store - - def get_deepest_match(self, name): - """Find the deepest match to *fname* in the dictionary. - - The deepest match is the longest name in the dictionary which is - a superdomain of *name*. Note that *superdomain* includes matching - *name* itself. - - *name*, a ``dns.name.Name``, the name to find. - - Returns a ``(key, value)`` where *key* is the deepest - ``dns.name.Name``, and *value* is the value associated with *key*. - """ - - depth = len(name) - if depth > self.max_depth: - depth = self.max_depth - for i in xrange(-depth, 0): - n = dns.name.Name(name[i:]) - if n in self: - return (n, self[n]) - v = self[dns.name.empty] - return (dns.name.empty, v) diff --git a/lib/dns/node.py b/lib/dns/node.py deleted file mode 100644 index 8a7f19f5..00000000 --- a/lib/dns/node.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS nodes. A node is a set of rdatasets.""" - -from io import StringIO - -import dns.rdataset -import dns.rdatatype -import dns.renderer - - -class Node(object): - - """A Node is a set of rdatasets.""" - - __slots__ = ['rdatasets'] - - def __init__(self): - #: the set of rdatsets, represented as a list. - self.rdatasets = [] - - def to_text(self, name, **kw): - """Convert a node to text format. - - Each rdataset at the node is printed. Any keyword arguments - to this method are passed on to the rdataset's to_text() method. - - *name*, a ``dns.name.Name`` or ``text``, the owner name of the rdatasets. - - Returns a ``text``. - """ - - s = StringIO() - for rds in self.rdatasets: - if len(rds) > 0: - s.write(rds.to_text(name, **kw)) - s.write(u'\n') - return s.getvalue()[:-1] - - def __repr__(self): - return '<DNS node ' + str(id(self)) + '>' - - def __eq__(self, other): - # - # This is inefficient. Good thing we don't need to do it much. - # - for rd in self.rdatasets: - if rd not in other.rdatasets: - return False - for rd in other.rdatasets: - if rd not in self.rdatasets: - return False - return True - - def __ne__(self, other): - return not self.__eq__(other) - - def __len__(self): - return len(self.rdatasets) - - def __iter__(self): - return iter(self.rdatasets) - - def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE, - create=False): - """Find an rdataset matching the specified properties in the - current node. - - *rdclass*, an ``int``, the class of the rdataset. - - *rdtype*, an ``int``, the type of the rdataset. - - *covers*, an ``int``, the covered type. Usually this value is - dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or - dns.rdatatype.RRSIG, then the covers value will be the rdata - type the SIG/RRSIG covers. The library treats the SIG and RRSIG - types as if they were a family of - types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much - easier to work with than if RRSIGs covering different rdata - types were aggregated into a single RRSIG rdataset. - - *create*, a ``bool``. If True, create the rdataset if it is not found. - - Raises ``KeyError`` if an rdataset of the desired type and class does - not exist and *create* is not ``True``. - - Returns a ``dns.rdataset.Rdataset``. - """ - - for rds in self.rdatasets: - if rds.match(rdclass, rdtype, covers): - return rds - if not create: - raise KeyError - rds = dns.rdataset.Rdataset(rdclass, rdtype) - self.rdatasets.append(rds) - return rds - - def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE, - create=False): - """Get an rdataset matching the specified properties in the - current node. - - None is returned if an rdataset of the specified type and - class does not exist and *create* is not ``True``. - - *rdclass*, an ``int``, the class of the rdataset. - - *rdtype*, an ``int``, the type of the rdataset. - - *covers*, an ``int``, the covered type. Usually this value is - dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or - dns.rdatatype.RRSIG, then the covers value will be the rdata - type the SIG/RRSIG covers. The library treats the SIG and RRSIG - types as if they were a family of - types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much - easier to work with than if RRSIGs covering different rdata - types were aggregated into a single RRSIG rdataset. - - *create*, a ``bool``. If True, create the rdataset if it is not found. - - Returns a ``dns.rdataset.Rdataset`` or ``None``. - """ - - try: - rds = self.find_rdataset(rdclass, rdtype, covers, create) - except KeyError: - rds = None - return rds - - def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE): - """Delete the rdataset matching the specified properties in the - current node. - - If a matching rdataset does not exist, it is not an error. - - *rdclass*, an ``int``, the class of the rdataset. - - *rdtype*, an ``int``, the type of the rdataset. - - *covers*, an ``int``, the covered type. - """ - - rds = self.get_rdataset(rdclass, rdtype, covers) - if rds is not None: - self.rdatasets.remove(rds) - - def replace_rdataset(self, replacement): - """Replace an rdataset. - - It is not an error if there is no rdataset matching *replacement*. - - Ownership of the *replacement* object is transferred to the node; - in other words, this method does not store a copy of *replacement* - at the node, it stores *replacement* itself. - - *replacement*, a ``dns.rdataset.Rdataset``. - - Raises ``ValueError`` if *replacement* is not a - ``dns.rdataset.Rdataset``. - """ - - if not isinstance(replacement, dns.rdataset.Rdataset): - raise ValueError('replacement is not an rdataset') - self.delete_rdataset(replacement.rdclass, replacement.rdtype, - replacement.covers) - self.rdatasets.append(replacement) diff --git a/lib/dns/node.pyi b/lib/dns/node.pyi deleted file mode 100644 index 0997edf9..00000000 --- a/lib/dns/node.pyi +++ /dev/null @@ -1,17 +0,0 @@ -from typing import List, Optional, Union -from . import rdataset, rdatatype, name -class Node: - def __init__(self): - self.rdatasets : List[rdataset.Rdataset] - def to_text(self, name : Union[str,name.Name], **kw) -> str: - ... - def find_rdataset(self, rdclass : int, rdtype : int, covers=rdatatype.NONE, - create=False) -> rdataset.Rdataset: - ... - def get_rdataset(self, rdclass : int, rdtype : int, covers=rdatatype.NONE, - create=False) -> Optional[rdataset.Rdataset]: - ... - def delete_rdataset(self, rdclass : int, rdtype : int, covers=rdatatype.NONE): - ... - def replace_rdataset(self, replacement : rdataset.Rdataset) -> None: - ... diff --git a/lib/dns/opcode.py b/lib/dns/opcode.py deleted file mode 100644 index c0735ba4..00000000 --- a/lib/dns/opcode.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Opcodes.""" - -import dns.exception - -#: Query -QUERY = 0 -#: Inverse Query (historical) -IQUERY = 1 -#: Server Status (unspecified and unimplemented anywhere) -STATUS = 2 -#: Notify -NOTIFY = 4 -#: Dynamic Update -UPDATE = 5 - -_by_text = { - 'QUERY': QUERY, - 'IQUERY': IQUERY, - 'STATUS': STATUS, - 'NOTIFY': NOTIFY, - 'UPDATE': UPDATE -} - -# We construct the inverse mapping programmatically to ensure that we -# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that -# would cause the mapping not to be true inverse. - -_by_value = {y: x for x, y in _by_text.items()} - - -class UnknownOpcode(dns.exception.DNSException): - """An DNS opcode is unknown.""" - - -def from_text(text): - """Convert text into an opcode. - - *text*, a ``text``, the textual opcode - - Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown. - - Returns an ``int``. - """ - - if text.isdigit(): - value = int(text) - if value >= 0 and value <= 15: - return value - value = _by_text.get(text.upper()) - if value is None: - raise UnknownOpcode - return value - - -def from_flags(flags): - """Extract an opcode from DNS message flags. - - *flags*, an ``int``, the DNS flags. - - Returns an ``int``. - """ - - return (flags & 0x7800) >> 11 - - -def to_flags(value): - """Convert an opcode to a value suitable for ORing into DNS message - flags. - - *value*, an ``int``, the DNS opcode value. - - Returns an ``int``. - """ - - return (value << 11) & 0x7800 - - -def to_text(value): - """Convert an opcode to text. - - *value*, an ``int`` the opcode value, - - Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown. - - Returns a ``text``. - """ - - text = _by_value.get(value) - if text is None: - text = str(value) - return text - - -def is_update(flags): - """Is the opcode in flags UPDATE? - - *flags*, an ``int``, the DNS message flags. - - Returns a ``bool``. - """ - - return from_flags(flags) == UPDATE diff --git a/lib/dns/py.typed b/lib/dns/py.typed deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/dns/query.py b/lib/dns/query.py deleted file mode 100644 index c0c517cc..00000000 --- a/lib/dns/query.py +++ /dev/null @@ -1,683 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Talk to a DNS server.""" - -from __future__ import generators - -import errno -import select -import socket -import struct -import sys -import time - -import dns.exception -import dns.inet -import dns.name -import dns.message -import dns.rcode -import dns.rdataclass -import dns.rdatatype -from ._compat import long, string_types, PY3 - -if PY3: - select_error = OSError -else: - select_error = select.error - -# Function used to create a socket. Can be overridden if needed in special -# situations. -socket_factory = socket.socket - -class UnexpectedSource(dns.exception.DNSException): - """A DNS query response came from an unexpected address or port.""" - - -class BadResponse(dns.exception.FormError): - """A DNS query response does not respond to the question asked.""" - - -class TransferError(dns.exception.DNSException): - """A zone transfer response got a non-zero rcode.""" - - def __init__(self, rcode): - message = 'Zone transfer error: %s' % dns.rcode.to_text(rcode) - super(TransferError, self).__init__(message) - self.rcode = rcode - - -def _compute_expiration(timeout): - if timeout is None: - return None - else: - return time.time() + timeout - -# This module can use either poll() or select() as the "polling backend". -# -# A backend function takes an fd, bools for readability, writablity, and -# error detection, and a timeout. - -def _poll_for(fd, readable, writable, error, timeout): - """Poll polling backend.""" - - event_mask = 0 - if readable: - event_mask |= select.POLLIN - if writable: - event_mask |= select.POLLOUT - if error: - event_mask |= select.POLLERR - - pollable = select.poll() - pollable.register(fd, event_mask) - - if timeout: - event_list = pollable.poll(long(timeout * 1000)) - else: - event_list = pollable.poll() - - return bool(event_list) - - -def _select_for(fd, readable, writable, error, timeout): - """Select polling backend.""" - - rset, wset, xset = [], [], [] - - if readable: - rset = [fd] - if writable: - wset = [fd] - if error: - xset = [fd] - - if timeout is None: - (rcount, wcount, xcount) = select.select(rset, wset, xset) - else: - (rcount, wcount, xcount) = select.select(rset, wset, xset, timeout) - - return bool((rcount or wcount or xcount)) - - -def _wait_for(fd, readable, writable, error, expiration): - # Use the selected polling backend to wait for any of the specified - # events. An "expiration" absolute time is converted into a relative - # timeout. - - done = False - while not done: - if expiration is None: - timeout = None - else: - timeout = expiration - time.time() - if timeout <= 0.0: - raise dns.exception.Timeout - try: - if not _polling_backend(fd, readable, writable, error, timeout): - raise dns.exception.Timeout - except select_error as e: - if e.args[0] != errno.EINTR: - raise e - done = True - - -def _set_polling_backend(fn): - # Internal API. Do not use. - - global _polling_backend - - _polling_backend = fn - -if hasattr(select, 'poll'): - # Prefer poll() on platforms that support it because it has no - # limits on the maximum value of a file descriptor (plus it will - # be more efficient for high values). - _polling_backend = _poll_for -else: - _polling_backend = _select_for - - -def _wait_for_readable(s, expiration): - _wait_for(s, True, False, True, expiration) - - -def _wait_for_writable(s, expiration): - _wait_for(s, False, True, True, expiration) - - -def _addresses_equal(af, a1, a2): - # Convert the first value of the tuple, which is a textual format - # address into binary form, so that we are not confused by different - # textual representations of the same address - try: - n1 = dns.inet.inet_pton(af, a1[0]) - n2 = dns.inet.inet_pton(af, a2[0]) - except dns.exception.SyntaxError: - return False - return n1 == n2 and a1[1:] == a2[1:] - - -def _destination_and_source(af, where, port, source, source_port): - # Apply defaults and compute destination and source tuples - # suitable for use in connect(), sendto(), or bind(). - if af is None: - try: - af = dns.inet.af_for_address(where) - except Exception: - af = dns.inet.AF_INET - if af == dns.inet.AF_INET: - destination = (where, port) - if source is not None or source_port != 0: - if source is None: - source = '0.0.0.0' - source = (source, source_port) - elif af == dns.inet.AF_INET6: - destination = (where, port, 0, 0) - if source is not None or source_port != 0: - if source is None: - source = '::' - source = (source, source_port, 0, 0) - return (af, destination, source) - - -def send_udp(sock, what, destination, expiration=None): - """Send a DNS message to the specified UDP socket. - - *sock*, a ``socket``. - - *what*, a ``binary`` or ``dns.message.Message``, the message to send. - - *destination*, a destination tuple appropriate for the address family - of the socket, specifying where to send the query. - - *expiration*, a ``float`` or ``None``, the absolute time at which - a timeout exception should be raised. If ``None``, no timeout will - occur. - - Returns an ``(int, float)`` tuple of bytes sent and the sent time. - """ - - if isinstance(what, dns.message.Message): - what = what.to_wire() - _wait_for_writable(sock, expiration) - sent_time = time.time() - n = sock.sendto(what, destination) - return (n, sent_time) - - -def receive_udp(sock, destination, expiration=None, - ignore_unexpected=False, one_rr_per_rrset=False, - keyring=None, request_mac=b'', ignore_trailing=False): - """Read a DNS message from a UDP socket. - - *sock*, a ``socket``. - - *destination*, a destination tuple appropriate for the address family - of the socket, specifying where the associated query was sent. - - *expiration*, a ``float`` or ``None``, the absolute time at which - a timeout exception should be raised. If ``None``, no timeout will - occur. - - *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from - unexpected sources. - - *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own - RRset. - - *keyring*, a ``dict``, the keyring to use for TSIG. - - *request_mac*, a ``binary``, the MAC of the request (for TSIG). - - *ignore_trailing*, a ``bool``. If ``True``, ignore trailing - junk at end of the received message. - - Raises if the message is malformed, if network errors occur, of if - there is a timeout. - - Returns a ``dns.message.Message`` object. - """ - - wire = b'' - while 1: - _wait_for_readable(sock, expiration) - (wire, from_address) = sock.recvfrom(65535) - if _addresses_equal(sock.family, from_address, destination) or \ - (dns.inet.is_multicast(destination[0]) and - from_address[1:] == destination[1:]): - break - if not ignore_unexpected: - raise UnexpectedSource('got a response from ' - '%s instead of %s' % (from_address, - destination)) - received_time = time.time() - r = dns.message.from_wire(wire, keyring=keyring, request_mac=request_mac, - one_rr_per_rrset=one_rr_per_rrset, - ignore_trailing=ignore_trailing) - return (r, received_time) - -def udp(q, where, timeout=None, port=53, af=None, source=None, source_port=0, - ignore_unexpected=False, one_rr_per_rrset=False, ignore_trailing=False): - """Return the response obtained after sending a query via UDP. - - *q*, a ``dns.message.Message``, the query to send - - *where*, a ``text`` containing an IPv4 or IPv6 address, where - to send the message. - - *timeout*, a ``float`` or ``None``, the number of seconds to wait before the - query times out. If ``None``, the default, wait forever. - - *port*, an ``int``, the port send the message to. The default is 53. - - *af*, an ``int``, the address family to use. The default is ``None``, - which causes the address family to use to be inferred from the form of - *where*. If the inference attempt fails, AF_INET is used. This - parameter is historical; you need never set it. - - *source*, a ``text`` containing an IPv4 or IPv6 address, specifying - the source address. The default is the wildcard address. - - *source_port*, an ``int``, the port from which to send the message. - The default is 0. - - *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from - unexpected sources. - - *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own - RRset. - - *ignore_trailing*, a ``bool``. If ``True``, ignore trailing - junk at end of the received message. - - Returns a ``dns.message.Message``. - """ - - wire = q.to_wire() - (af, destination, source) = _destination_and_source(af, where, port, - source, source_port) - s = socket_factory(af, socket.SOCK_DGRAM, 0) - received_time = None - sent_time = None - try: - expiration = _compute_expiration(timeout) - s.setblocking(0) - if source is not None: - s.bind(source) - (_, sent_time) = send_udp(s, wire, destination, expiration) - (r, received_time) = receive_udp(s, destination, expiration, - ignore_unexpected, one_rr_per_rrset, - q.keyring, q.mac, ignore_trailing) - finally: - if sent_time is None or received_time is None: - response_time = 0 - else: - response_time = received_time - sent_time - s.close() - r.time = response_time - if not q.is_response(r): - raise BadResponse - return r - - -def _net_read(sock, count, expiration): - """Read the specified number of bytes from sock. Keep trying until we - either get the desired amount, or we hit EOF. - A Timeout exception will be raised if the operation is not completed - by the expiration time. - """ - s = b'' - while count > 0: - _wait_for_readable(sock, expiration) - n = sock.recv(count) - if n == b'': - raise EOFError - count = count - len(n) - s = s + n - return s - - -def _net_write(sock, data, expiration): - """Write the specified data to the socket. - A Timeout exception will be raised if the operation is not completed - by the expiration time. - """ - current = 0 - l = len(data) - while current < l: - _wait_for_writable(sock, expiration) - current += sock.send(data[current:]) - - -def send_tcp(sock, what, expiration=None): - """Send a DNS message to the specified TCP socket. - - *sock*, a ``socket``. - - *what*, a ``binary`` or ``dns.message.Message``, the message to send. - - *expiration*, a ``float`` or ``None``, the absolute time at which - a timeout exception should be raised. If ``None``, no timeout will - occur. - - Returns an ``(int, float)`` tuple of bytes sent and the sent time. - """ - - if isinstance(what, dns.message.Message): - what = what.to_wire() - l = len(what) - # copying the wire into tcpmsg is inefficient, but lets us - # avoid writev() or doing a short write that would get pushed - # onto the net - tcpmsg = struct.pack("!H", l) + what - _wait_for_writable(sock, expiration) - sent_time = time.time() - _net_write(sock, tcpmsg, expiration) - return (len(tcpmsg), sent_time) - -def receive_tcp(sock, expiration=None, one_rr_per_rrset=False, - keyring=None, request_mac=b'', ignore_trailing=False): - """Read a DNS message from a TCP socket. - - *sock*, a ``socket``. - - *expiration*, a ``float`` or ``None``, the absolute time at which - a timeout exception should be raised. If ``None``, no timeout will - occur. - - *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own - RRset. - - *keyring*, a ``dict``, the keyring to use for TSIG. - - *request_mac*, a ``binary``, the MAC of the request (for TSIG). - - *ignore_trailing*, a ``bool``. If ``True``, ignore trailing - junk at end of the received message. - - Raises if the message is malformed, if network errors occur, of if - there is a timeout. - - Returns a ``dns.message.Message`` object. - """ - - ldata = _net_read(sock, 2, expiration) - (l,) = struct.unpack("!H", ldata) - wire = _net_read(sock, l, expiration) - received_time = time.time() - r = dns.message.from_wire(wire, keyring=keyring, request_mac=request_mac, - one_rr_per_rrset=one_rr_per_rrset, - ignore_trailing=ignore_trailing) - return (r, received_time) - -def _connect(s, address): - try: - s.connect(address) - except socket.error: - (ty, v) = sys.exc_info()[:2] - - if hasattr(v, 'errno'): - v_err = v.errno - else: - v_err = v[0] - if v_err not in [errno.EINPROGRESS, errno.EWOULDBLOCK, errno.EALREADY]: - raise v - - -def tcp(q, where, timeout=None, port=53, af=None, source=None, source_port=0, - one_rr_per_rrset=False, ignore_trailing=False): - """Return the response obtained after sending a query via TCP. - - *q*, a ``dns.message.Message``, the query to send - - *where*, a ``text`` containing an IPv4 or IPv6 address, where - to send the message. - - *timeout*, a ``float`` or ``None``, the number of seconds to wait before the - query times out. If ``None``, the default, wait forever. - - *port*, an ``int``, the port send the message to. The default is 53. - - *af*, an ``int``, the address family to use. The default is ``None``, - which causes the address family to use to be inferred from the form of - *where*. If the inference attempt fails, AF_INET is used. This - parameter is historical; you need never set it. - - *source*, a ``text`` containing an IPv4 or IPv6 address, specifying - the source address. The default is the wildcard address. - - *source_port*, an ``int``, the port from which to send the message. - The default is 0. - - *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own - RRset. - - *ignore_trailing*, a ``bool``. If ``True``, ignore trailing - junk at end of the received message. - - Returns a ``dns.message.Message``. - """ - - wire = q.to_wire() - (af, destination, source) = _destination_and_source(af, where, port, - source, source_port) - s = socket_factory(af, socket.SOCK_STREAM, 0) - begin_time = None - received_time = None - try: - expiration = _compute_expiration(timeout) - s.setblocking(0) - begin_time = time.time() - if source is not None: - s.bind(source) - _connect(s, destination) - send_tcp(s, wire, expiration) - (r, received_time) = receive_tcp(s, expiration, one_rr_per_rrset, - q.keyring, q.mac, ignore_trailing) - finally: - if begin_time is None or received_time is None: - response_time = 0 - else: - response_time = received_time - begin_time - s.close() - r.time = response_time - if not q.is_response(r): - raise BadResponse - return r - - -def xfr(where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN, - timeout=None, port=53, keyring=None, keyname=None, relativize=True, - af=None, lifetime=None, source=None, source_port=0, serial=0, - use_udp=False, keyalgorithm=dns.tsig.default_algorithm): - """Return a generator for the responses to a zone transfer. - - *where*. If the inference attempt fails, AF_INET is used. This - parameter is historical; you need never set it. - - *zone*, a ``dns.name.Name`` or ``text``, the name of the zone to transfer. - - *rdtype*, an ``int`` or ``text``, the type of zone transfer. The - default is ``dns.rdatatype.AXFR``. ``dns.rdatatype.IXFR`` can be - used to do an incremental transfer instead. - - *rdclass*, an ``int`` or ``text``, the class of the zone transfer. - The default is ``dns.rdataclass.IN``. - - *timeout*, a ``float``, the number of seconds to wait for each - response message. If None, the default, wait forever. - - *port*, an ``int``, the port send the message to. The default is 53. - - *keyring*, a ``dict``, the keyring to use for TSIG. - - *keyname*, a ``dns.name.Name`` or ``text``, the name of the TSIG - key to use. - - *relativize*, a ``bool``. If ``True``, all names in the zone will be - relativized to the zone origin. It is essential that the - relativize setting matches the one specified to - ``dns.zone.from_xfr()`` if using this generator to make a zone. - - *af*, an ``int``, the address family to use. The default is ``None``, - which causes the address family to use to be inferred from the form of - *where*. If the inference attempt fails, AF_INET is used. This - parameter is historical; you need never set it. - - *lifetime*, a ``float``, the total number of seconds to spend - doing the transfer. If ``None``, the default, then there is no - limit on the time the transfer may take. - - *source*, a ``text`` containing an IPv4 or IPv6 address, specifying - the source address. The default is the wildcard address. - - *source_port*, an ``int``, the port from which to send the message. - The default is 0. - - *serial*, an ``int``, the SOA serial number to use as the base for - an IXFR diff sequence (only meaningful if *rdtype* is - ``dns.rdatatype.IXFR``). - - *use_udp*, a ``bool``. If ``True``, use UDP (only meaningful for IXFR). - - *keyalgorithm*, a ``dns.name.Name`` or ``text``, the TSIG algorithm to use. - - Raises on errors, and so does the generator. - - Returns a generator of ``dns.message.Message`` objects. - """ - - if isinstance(zone, string_types): - zone = dns.name.from_text(zone) - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - q = dns.message.make_query(zone, rdtype, rdclass) - if rdtype == dns.rdatatype.IXFR: - rrset = dns.rrset.from_text(zone, 0, 'IN', 'SOA', - '. . %u 0 0 0 0' % serial) - q.authority.append(rrset) - if keyring is not None: - q.use_tsig(keyring, keyname, algorithm=keyalgorithm) - wire = q.to_wire() - (af, destination, source) = _destination_and_source(af, where, port, - source, source_port) - if use_udp: - if rdtype != dns.rdatatype.IXFR: - raise ValueError('cannot do a UDP AXFR') - s = socket_factory(af, socket.SOCK_DGRAM, 0) - else: - s = socket_factory(af, socket.SOCK_STREAM, 0) - s.setblocking(0) - if source is not None: - s.bind(source) - expiration = _compute_expiration(lifetime) - _connect(s, destination) - l = len(wire) - if use_udp: - _wait_for_writable(s, expiration) - s.send(wire) - else: - tcpmsg = struct.pack("!H", l) + wire - _net_write(s, tcpmsg, expiration) - done = False - delete_mode = True - expecting_SOA = False - soa_rrset = None - if relativize: - origin = zone - oname = dns.name.empty - else: - origin = None - oname = zone - tsig_ctx = None - first = True - while not done: - mexpiration = _compute_expiration(timeout) - if mexpiration is None or mexpiration > expiration: - mexpiration = expiration - if use_udp: - _wait_for_readable(s, expiration) - (wire, from_address) = s.recvfrom(65535) - else: - ldata = _net_read(s, 2, mexpiration) - (l,) = struct.unpack("!H", ldata) - wire = _net_read(s, l, mexpiration) - is_ixfr = (rdtype == dns.rdatatype.IXFR) - r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac, - xfr=True, origin=origin, tsig_ctx=tsig_ctx, - multi=True, first=first, - one_rr_per_rrset=is_ixfr) - rcode = r.rcode() - if rcode != dns.rcode.NOERROR: - raise TransferError(rcode) - tsig_ctx = r.tsig_ctx - first = False - answer_index = 0 - if soa_rrset is None: - if not r.answer or r.answer[0].name != oname: - raise dns.exception.FormError( - "No answer or RRset not for qname") - rrset = r.answer[0] - if rrset.rdtype != dns.rdatatype.SOA: - raise dns.exception.FormError("first RRset is not an SOA") - answer_index = 1 - soa_rrset = rrset.copy() - if rdtype == dns.rdatatype.IXFR: - if soa_rrset[0].serial <= serial: - # - # We're already up-to-date. - # - done = True - else: - expecting_SOA = True - # - # Process SOAs in the answer section (other than the initial - # SOA in the first message). - # - for rrset in r.answer[answer_index:]: - if done: - raise dns.exception.FormError("answers after final SOA") - if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname: - if expecting_SOA: - if rrset[0].serial != serial: - raise dns.exception.FormError( - "IXFR base serial mismatch") - expecting_SOA = False - elif rdtype == dns.rdatatype.IXFR: - delete_mode = not delete_mode - # - # If this SOA RRset is equal to the first we saw then we're - # finished. If this is an IXFR we also check that we're seeing - # the record in the expected part of the response. - # - if rrset == soa_rrset and \ - (rdtype == dns.rdatatype.AXFR or - (rdtype == dns.rdatatype.IXFR and delete_mode)): - done = True - elif expecting_SOA: - # - # We made an IXFR request and are expecting another - # SOA RR, but saw something else, so this must be an - # AXFR response. - # - rdtype = dns.rdatatype.AXFR - expecting_SOA = False - if done and q.keyring and not r.had_tsig: - raise dns.exception.FormError("missing TSIG") - yield r - s.close() diff --git a/lib/dns/query.pyi b/lib/dns/query.pyi deleted file mode 100644 index fe5ef826..00000000 --- a/lib/dns/query.pyi +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Optional, Union, Dict, Generator, Any -from . import message, tsig, rdatatype, rdataclass, name, message -def tcp(q : message.Message, where : str, timeout : float = None, port=53, af : Optional[int] = None, source : Optional[str] = None, source_port : int = 0, - one_rr_per_rrset=False) -> message.Message: - pass - -def xfr(where : None, zone : Union[name.Name,str], rdtype=rdatatype.AXFR, rdclass=rdataclass.IN, - timeout : Optional[float] =None, port=53, keyring : Optional[Dict[name.Name, bytes]] =None, keyname : Union[str,name.Name]=None, relativize=True, - af : Optional[int] =None, lifetime : Optional[float]=None, source : Optional[str] =None, source_port=0, serial=0, - use_udp=False, keyalgorithm=tsig.default_algorithm) -> Generator[Any,Any,message.Message]: - pass - -def udp(q : message.Message, where : str, timeout : Optional[float] = None, port=53, af : Optional[int] = None, source : Optional[str] = None, source_port=0, - ignore_unexpected=False, one_rr_per_rrset=False) -> message.Message: - ... diff --git a/lib/dns/rcode.py b/lib/dns/rcode.py deleted file mode 100644 index 5191e1b1..00000000 --- a/lib/dns/rcode.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Result Codes.""" - -import dns.exception -from ._compat import long - -#: No error -NOERROR = 0 -#: Form error -FORMERR = 1 -#: Server failure -SERVFAIL = 2 -#: Name does not exist ("Name Error" in RFC 1025 terminology). -NXDOMAIN = 3 -#: Not implemented -NOTIMP = 4 -#: Refused -REFUSED = 5 -#: Name exists. -YXDOMAIN = 6 -#: RRset exists. -YXRRSET = 7 -#: RRset does not exist. -NXRRSET = 8 -#: Not authoritative. -NOTAUTH = 9 -#: Name not in zone. -NOTZONE = 10 -#: Bad EDNS version. -BADVERS = 16 - -_by_text = { - 'NOERROR': NOERROR, - 'FORMERR': FORMERR, - 'SERVFAIL': SERVFAIL, - 'NXDOMAIN': NXDOMAIN, - 'NOTIMP': NOTIMP, - 'REFUSED': REFUSED, - 'YXDOMAIN': YXDOMAIN, - 'YXRRSET': YXRRSET, - 'NXRRSET': NXRRSET, - 'NOTAUTH': NOTAUTH, - 'NOTZONE': NOTZONE, - 'BADVERS': BADVERS -} - -# We construct the inverse mapping programmatically to ensure that we -# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that -# would cause the mapping not to be a true inverse. - -_by_value = {y: x for x, y in _by_text.items()} - - -class UnknownRcode(dns.exception.DNSException): - """A DNS rcode is unknown.""" - - -def from_text(text): - """Convert text into an rcode. - - *text*, a ``text``, the textual rcode or an integer in textual form. - - Raises ``dns.rcode.UnknownRcode`` if the rcode mnemonic is unknown. - - Returns an ``int``. - """ - - if text.isdigit(): - v = int(text) - if v >= 0 and v <= 4095: - return v - v = _by_text.get(text.upper()) - if v is None: - raise UnknownRcode - return v - - -def from_flags(flags, ednsflags): - """Return the rcode value encoded by flags and ednsflags. - - *flags*, an ``int``, the DNS flags field. - - *ednsflags*, an ``int``, the EDNS flags field. - - Raises ``ValueError`` if rcode is < 0 or > 4095 - - Returns an ``int``. - """ - - value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0) - if value < 0 or value > 4095: - raise ValueError('rcode must be >= 0 and <= 4095') - return value - - -def to_flags(value): - """Return a (flags, ednsflags) tuple which encodes the rcode. - - *value*, an ``int``, the rcode. - - Raises ``ValueError`` if rcode is < 0 or > 4095. - - Returns an ``(int, int)`` tuple. - """ - - if value < 0 or value > 4095: - raise ValueError('rcode must be >= 0 and <= 4095') - v = value & 0xf - ev = long(value & 0xff0) << 20 - return (v, ev) - - -def to_text(value): - """Convert rcode into text. - - *value*, and ``int``, the rcode. - - Raises ``ValueError`` if rcode is < 0 or > 4095. - - Returns a ``text``. - """ - - if value < 0 or value > 4095: - raise ValueError('rcode must be >= 0 and <= 4095') - text = _by_value.get(value) - if text is None: - text = str(value) - return text diff --git a/lib/dns/rdata.py b/lib/dns/rdata.py deleted file mode 100644 index ea1971dc..00000000 --- a/lib/dns/rdata.py +++ /dev/null @@ -1,456 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS rdata.""" - -from io import BytesIO -import base64 -import binascii - -import dns.exception -import dns.name -import dns.rdataclass -import dns.rdatatype -import dns.tokenizer -import dns.wiredata -from ._compat import xrange, string_types, text_type - -try: - import threading as _threading -except ImportError: - import dummy_threading as _threading - -_hex_chunksize = 32 - - -def _hexify(data, chunksize=_hex_chunksize): - """Convert a binary string into its hex encoding, broken up into chunks - of chunksize characters separated by a space. - """ - - line = binascii.hexlify(data) - return b' '.join([line[i:i + chunksize] - for i - in range(0, len(line), chunksize)]).decode() - -_base64_chunksize = 32 - - -def _base64ify(data, chunksize=_base64_chunksize): - """Convert a binary string into its base64 encoding, broken up into chunks - of chunksize characters separated by a space. - """ - - line = base64.b64encode(data) - return b' '.join([line[i:i + chunksize] - for i - in range(0, len(line), chunksize)]).decode() - -__escaped = bytearray(b'"\\') - -def _escapify(qstring): - """Escape the characters in a quoted string which need it.""" - - if isinstance(qstring, text_type): - qstring = qstring.encode() - if not isinstance(qstring, bytearray): - qstring = bytearray(qstring) - - text = '' - for c in qstring: - if c in __escaped: - text += '\\' + chr(c) - elif c >= 0x20 and c < 0x7F: - text += chr(c) - else: - text += '\\%03d' % c - return text - - -def _truncate_bitmap(what): - """Determine the index of greatest byte that isn't all zeros, and - return the bitmap that contains all the bytes less than that index. - """ - - for i in xrange(len(what) - 1, -1, -1): - if what[i] != 0: - return what[0: i + 1] - return what[0:1] - - -class Rdata(object): - """Base class for all DNS rdata types.""" - - __slots__ = ['rdclass', 'rdtype'] - - def __init__(self, rdclass, rdtype): - """Initialize an rdata. - - *rdclass*, an ``int`` is the rdataclass of the Rdata. - *rdtype*, an ``int`` is the rdatatype of the Rdata. - """ - - self.rdclass = rdclass - self.rdtype = rdtype - - def covers(self): - """Return the type a Rdata covers. - - DNS SIG/RRSIG rdatas apply to a specific type; this type is - returned by the covers() function. If the rdata type is not - SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when - creating rdatasets, allowing the rdataset to contain only RRSIGs - of a particular type, e.g. RRSIG(NS). - - Returns an ``int``. - """ - - return dns.rdatatype.NONE - - def extended_rdatatype(self): - """Return a 32-bit type value, the least significant 16 bits of - which are the ordinary DNS type, and the upper 16 bits of which are - the "covered" type, if any. - - Returns an ``int``. - """ - - return self.covers() << 16 | self.rdtype - - def to_text(self, origin=None, relativize=True, **kw): - """Convert an rdata to text format. - - Returns a ``text``. - """ - - raise NotImplementedError - - def to_wire(self, file, compress=None, origin=None): - """Convert an rdata to wire format. - - Returns a ``binary``. - """ - - raise NotImplementedError - - def to_digestable(self, origin=None): - """Convert rdata to a format suitable for digesting in hashes. This - is also the DNSSEC canonical form. - - Returns a ``binary``. - """ - - f = BytesIO() - self.to_wire(f, None, origin) - return f.getvalue() - - def validate(self): - """Check that the current contents of the rdata's fields are - valid. - - If you change an rdata by assigning to its fields, - it is a good idea to call validate() when you are done making - changes. - - Raises various exceptions if there are problems. - - Returns ``None``. - """ - - dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text()) - - def __repr__(self): - covers = self.covers() - if covers == dns.rdatatype.NONE: - ctext = '' - else: - ctext = '(' + dns.rdatatype.to_text(covers) + ')' - return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \ - dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \ - str(self) + '>' - - def __str__(self): - return self.to_text() - - def _cmp(self, other): - """Compare an rdata with another rdata of the same rdtype and - rdclass. - - Return < 0 if self < other in the DNSSEC ordering, 0 if self - == other, and > 0 if self > other. - - """ - our = self.to_digestable(dns.name.root) - their = other.to_digestable(dns.name.root) - if our == their: - return 0 - elif our > their: - return 1 - else: - return -1 - - def __eq__(self, other): - if not isinstance(other, Rdata): - return False - if self.rdclass != other.rdclass or self.rdtype != other.rdtype: - return False - return self._cmp(other) == 0 - - def __ne__(self, other): - if not isinstance(other, Rdata): - return True - if self.rdclass != other.rdclass or self.rdtype != other.rdtype: - return True - return self._cmp(other) != 0 - - def __lt__(self, other): - if not isinstance(other, Rdata) or \ - self.rdclass != other.rdclass or self.rdtype != other.rdtype: - - return NotImplemented - return self._cmp(other) < 0 - - def __le__(self, other): - if not isinstance(other, Rdata) or \ - self.rdclass != other.rdclass or self.rdtype != other.rdtype: - return NotImplemented - return self._cmp(other) <= 0 - - def __ge__(self, other): - if not isinstance(other, Rdata) or \ - self.rdclass != other.rdclass or self.rdtype != other.rdtype: - return NotImplemented - return self._cmp(other) >= 0 - - def __gt__(self, other): - if not isinstance(other, Rdata) or \ - self.rdclass != other.rdclass or self.rdtype != other.rdtype: - return NotImplemented - return self._cmp(other) > 0 - - def __hash__(self): - return hash(self.to_digestable(dns.name.root)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - raise NotImplementedError - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - raise NotImplementedError - - def choose_relativity(self, origin=None, relativize=True): - """Convert any domain names in the rdata to the specified - relativization. - """ - -class GenericRdata(Rdata): - - """Generic Rdata Class - - This class is used for rdata types for which we have no better - implementation. It implements the DNS "unknown RRs" scheme. - """ - - __slots__ = ['data'] - - def __init__(self, rdclass, rdtype, data): - super(GenericRdata, self).__init__(rdclass, rdtype) - self.data = data - - def to_text(self, origin=None, relativize=True, **kw): - return r'\# %d ' % len(self.data) + _hexify(self.data) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - token = tok.get() - if not token.is_identifier() or token.value != r'\#': - raise dns.exception.SyntaxError( - r'generic rdata does not start with \#') - length = tok.get_int() - chunks = [] - while 1: - token = tok.get() - if token.is_eol_or_eof(): - break - chunks.append(token.value.encode()) - hex = b''.join(chunks) - data = binascii.unhexlify(hex) - if len(data) != length: - raise dns.exception.SyntaxError( - 'generic rdata hex data has wrong length') - return cls(rdclass, rdtype, data) - - def to_wire(self, file, compress=None, origin=None): - file.write(self.data) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - return cls(rdclass, rdtype, wire[current: current + rdlen]) - -_rdata_modules = {} -_module_prefix = 'dns.rdtypes' -_import_lock = _threading.Lock() - -def get_rdata_class(rdclass, rdtype): - - def import_module(name): - with _import_lock: - mod = __import__(name) - components = name.split('.') - for comp in components[1:]: - mod = getattr(mod, comp) - return mod - - mod = _rdata_modules.get((rdclass, rdtype)) - rdclass_text = dns.rdataclass.to_text(rdclass) - rdtype_text = dns.rdatatype.to_text(rdtype) - rdtype_text = rdtype_text.replace('-', '_') - if not mod: - mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype)) - if not mod: - try: - mod = import_module('.'.join([_module_prefix, - rdclass_text, rdtype_text])) - _rdata_modules[(rdclass, rdtype)] = mod - except ImportError: - try: - mod = import_module('.'.join([_module_prefix, - 'ANY', rdtype_text])) - _rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod - except ImportError: - mod = None - if mod: - cls = getattr(mod, rdtype_text) - else: - cls = GenericRdata - return cls - - -def from_text(rdclass, rdtype, tok, origin=None, relativize=True): - """Build an rdata object from text format. - - This function attempts to dynamically load a class which - implements the specified rdata class and type. If there is no - class-and-type-specific implementation, the GenericRdata class - is used. - - Once a class is chosen, its from_text() class method is called - with the parameters to this function. - - If *tok* is a ``text``, then a tokenizer is created and the string - is used as its input. - - *rdclass*, an ``int``, the rdataclass. - - *rdtype*, an ``int``, the rdatatype. - - *tok*, a ``dns.tokenizer.Tokenizer`` or a ``text``. - - *origin*, a ``dns.name.Name`` (or ``None``), the - origin to use for relative names. - - *relativize*, a ``bool``. If true, name will be relativized to - the specified origin. - - Returns an instance of the chosen Rdata subclass. - """ - - if isinstance(tok, string_types): - tok = dns.tokenizer.Tokenizer(tok) - cls = get_rdata_class(rdclass, rdtype) - if cls != GenericRdata: - # peek at first token - token = tok.get() - tok.unget(token) - if token.is_identifier() and \ - token.value == r'\#': - # - # Known type using the generic syntax. Extract the - # wire form from the generic syntax, and then run - # from_wire on it. - # - rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin, - relativize) - return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data), - origin) - return cls.from_text(rdclass, rdtype, tok, origin, relativize) - - -def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None): - """Build an rdata object from wire format - - This function attempts to dynamically load a class which - implements the specified rdata class and type. If there is no - class-and-type-specific implementation, the GenericRdata class - is used. - - Once a class is chosen, its from_wire() class method is called - with the parameters to this function. - - *rdclass*, an ``int``, the rdataclass. - - *rdtype*, an ``int``, the rdatatype. - - *wire*, a ``binary``, the wire-format message. - - *current*, an ``int``, the offset in wire of the beginning of - the rdata. - - *rdlen*, an ``int``, the length of the wire-format rdata - - *origin*, a ``dns.name.Name`` (or ``None``). If not ``None``, - then names will be relativized to this origin. - - Returns an instance of the chosen Rdata subclass. - """ - - wire = dns.wiredata.maybe_wrap(wire) - cls = get_rdata_class(rdclass, rdtype) - return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin) - - -class RdatatypeExists(dns.exception.DNSException): - """DNS rdatatype already exists.""" - supp_kwargs = {'rdclass', 'rdtype'} - fmt = "The rdata type with class {rdclass} and rdtype {rdtype} " + \ - "already exists." - - -def register_type(implementation, rdtype, rdtype_text, is_singleton=False, - rdclass=dns.rdataclass.IN): - """Dynamically register a module to handle an rdatatype. - - *implementation*, a module implementing the type in the usual dnspython - way. - - *rdtype*, an ``int``, the rdatatype to register. - - *rdtype_text*, a ``text``, the textual form of the rdatatype. - - *is_singleton*, a ``bool``, indicating if the type is a singleton (i.e. - RRsets of the type can have only one member.) - - *rdclass*, the rdataclass of the type, or ``dns.rdataclass.ANY`` if - it applies to all classes. - """ - - existing_cls = get_rdata_class(rdclass, rdtype) - if existing_cls != GenericRdata: - raise RdatatypeExists(rdclass=rdclass, rdtype=rdtype) - _rdata_modules[(rdclass, rdtype)] = implementation - dns.rdatatype.register_type(rdtype, rdtype_text, is_singleton) diff --git a/lib/dns/rdata.pyi b/lib/dns/rdata.pyi deleted file mode 100644 index 8663955c..00000000 --- a/lib/dns/rdata.pyi +++ /dev/null @@ -1,17 +0,0 @@ -from typing import Dict, Tuple, Any, Optional -from .name import Name -class Rdata: - def __init__(self): - self.address : str - def to_wire(self, file, compress : Optional[Dict[Name,int]], origin : Optional[Name]) -> bytes: - ... - @classmethod - def from_text(cls, rdclass : int, rdtype : int, tok, origin=None, relativize=True): - ... -_rdata_modules : Dict[Tuple[Any,Rdata],Any] - -def from_text(rdclass : int, rdtype : int, tok : Optional[str], origin : Optional[Name] = None, relativize : bool = True): - ... - -def from_wire(rdclass : int, rdtype : int, wire : bytes, current : int, rdlen : int, origin : Optional[Name] = None): - ... diff --git a/lib/dns/rdataclass.py b/lib/dns/rdataclass.py deleted file mode 100644 index b88aa85b..00000000 --- a/lib/dns/rdataclass.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Rdata Classes.""" - -import re - -import dns.exception - -RESERVED0 = 0 -IN = 1 -CH = 3 -HS = 4 -NONE = 254 -ANY = 255 - -_by_text = { - 'RESERVED0': RESERVED0, - 'IN': IN, - 'CH': CH, - 'HS': HS, - 'NONE': NONE, - 'ANY': ANY -} - -# We construct the inverse mapping programmatically to ensure that we -# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that -# would cause the mapping not to be true inverse. - -_by_value = {y: x for x, y in _by_text.items()} - -# Now that we've built the inverse map, we can add class aliases to -# the _by_text mapping. - -_by_text.update({ - 'INTERNET': IN, - 'CHAOS': CH, - 'HESIOD': HS -}) - -_metaclasses = { - NONE: True, - ANY: True -} - -_unknown_class_pattern = re.compile('CLASS([0-9]+)$', re.I) - - -class UnknownRdataclass(dns.exception.DNSException): - """A DNS class is unknown.""" - - -def from_text(text): - """Convert text into a DNS rdata class value. - - The input text can be a defined DNS RR class mnemonic or - instance of the DNS generic class syntax. - - For example, "IN" and "CLASS1" will both result in a value of 1. - - Raises ``dns.rdatatype.UnknownRdataclass`` if the class is unknown. - - Raises ``ValueError`` if the rdata class value is not >= 0 and <= 65535. - - Returns an ``int``. - """ - - value = _by_text.get(text.upper()) - if value is None: - match = _unknown_class_pattern.match(text) - if match is None: - raise UnknownRdataclass - value = int(match.group(1)) - if value < 0 or value > 65535: - raise ValueError("class must be between >= 0 and <= 65535") - return value - - -def to_text(value): - """Convert a DNS rdata type value to text. - - If the value has a known mnemonic, it will be used, otherwise the - DNS generic class syntax will be used. - - Raises ``ValueError`` if the rdata class value is not >= 0 and <= 65535. - - Returns a ``str``. - """ - - if value < 0 or value > 65535: - raise ValueError("class must be between >= 0 and <= 65535") - text = _by_value.get(value) - if text is None: - text = 'CLASS' + repr(value) - return text - - -def is_metaclass(rdclass): - """True if the specified class is a metaclass. - - The currently defined metaclasses are ANY and NONE. - - *rdclass* is an ``int``. - """ - - if rdclass in _metaclasses: - return True - return False diff --git a/lib/dns/rdataset.py b/lib/dns/rdataset.py deleted file mode 100644 index f1afe241..00000000 --- a/lib/dns/rdataset.py +++ /dev/null @@ -1,347 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)""" - -import random -from io import StringIO -import struct - -import dns.exception -import dns.rdatatype -import dns.rdataclass -import dns.rdata -import dns.set -from ._compat import string_types - -# define SimpleSet here for backwards compatibility -SimpleSet = dns.set.Set - - -class DifferingCovers(dns.exception.DNSException): - """An attempt was made to add a DNS SIG/RRSIG whose covered type - is not the same as that of the other rdatas in the rdataset.""" - - -class IncompatibleTypes(dns.exception.DNSException): - """An attempt was made to add DNS RR data of an incompatible type.""" - - -class Rdataset(dns.set.Set): - - """A DNS rdataset.""" - - __slots__ = ['rdclass', 'rdtype', 'covers', 'ttl'] - - def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE, ttl=0): - """Create a new rdataset of the specified class and type. - - *rdclass*, an ``int``, the rdataclass. - - *rdtype*, an ``int``, the rdatatype. - - *covers*, an ``int``, the covered rdatatype. - - *ttl*, an ``int``, the TTL. - """ - - super(Rdataset, self).__init__() - self.rdclass = rdclass - self.rdtype = rdtype - self.covers = covers - self.ttl = ttl - - def _clone(self): - obj = super(Rdataset, self)._clone() - obj.rdclass = self.rdclass - obj.rdtype = self.rdtype - obj.covers = self.covers - obj.ttl = self.ttl - return obj - - def update_ttl(self, ttl): - """Perform TTL minimization. - - Set the TTL of the rdataset to be the lesser of the set's current - TTL or the specified TTL. If the set contains no rdatas, set the TTL - to the specified TTL. - - *ttl*, an ``int``. - """ - - if len(self) == 0: - self.ttl = ttl - elif ttl < self.ttl: - self.ttl = ttl - - def add(self, rd, ttl=None): - """Add the specified rdata to the rdataset. - - If the optional *ttl* parameter is supplied, then - ``self.update_ttl(ttl)`` will be called prior to adding the rdata. - - *rd*, a ``dns.rdata.Rdata``, the rdata - - *ttl*, an ``int``, the TTL. - - Raises ``dns.rdataset.IncompatibleTypes`` if the type and class - do not match the type and class of the rdataset. - - Raises ``dns.rdataset.DifferingCovers`` if the type is a signature - type and the covered type does not match that of the rdataset. - """ - - # - # If we're adding a signature, do some special handling to - # check that the signature covers the same type as the - # other rdatas in this rdataset. If this is the first rdata - # in the set, initialize the covers field. - # - if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype: - raise IncompatibleTypes - if ttl is not None: - self.update_ttl(ttl) - if self.rdtype == dns.rdatatype.RRSIG or \ - self.rdtype == dns.rdatatype.SIG: - covers = rd.covers() - if len(self) == 0 and self.covers == dns.rdatatype.NONE: - self.covers = covers - elif self.covers != covers: - raise DifferingCovers - if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0: - self.clear() - super(Rdataset, self).add(rd) - - def union_update(self, other): - self.update_ttl(other.ttl) - super(Rdataset, self).union_update(other) - - def intersection_update(self, other): - self.update_ttl(other.ttl) - super(Rdataset, self).intersection_update(other) - - def update(self, other): - """Add all rdatas in other to self. - - *other*, a ``dns.rdataset.Rdataset``, the rdataset from which - to update. - """ - - self.update_ttl(other.ttl) - super(Rdataset, self).update(other) - - def __repr__(self): - if self.covers == 0: - ctext = '' - else: - ctext = '(' + dns.rdatatype.to_text(self.covers) + ')' - return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \ - dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>' - - def __str__(self): - return self.to_text() - - def __eq__(self, other): - if not isinstance(other, Rdataset): - return False - if self.rdclass != other.rdclass or \ - self.rdtype != other.rdtype or \ - self.covers != other.covers: - return False - return super(Rdataset, self).__eq__(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def to_text(self, name=None, origin=None, relativize=True, - override_rdclass=None, **kw): - """Convert the rdataset into DNS master file format. - - See ``dns.name.Name.choose_relativity`` for more information - on how *origin* and *relativize* determine the way names - are emitted. - - Any additional keyword arguments are passed on to the rdata - ``to_text()`` method. - - *name*, a ``dns.name.Name``. If name is not ``None``, emit RRs with - *name* as the owner name. - - *origin*, a ``dns.name.Name`` or ``None``, the origin for relative - names. - - *relativize*, a ``bool``. If ``True``, names will be relativized - to *origin*. - """ - - if name is not None: - name = name.choose_relativity(origin, relativize) - ntext = str(name) - pad = ' ' - else: - ntext = '' - pad = '' - s = StringIO() - if override_rdclass is not None: - rdclass = override_rdclass - else: - rdclass = self.rdclass - if len(self) == 0: - # - # Empty rdatasets are used for the question section, and in - # some dynamic updates, so we don't need to print out the TTL - # (which is meaningless anyway). - # - s.write(u'{}{}{} {}\n'.format(ntext, pad, - dns.rdataclass.to_text(rdclass), - dns.rdatatype.to_text(self.rdtype))) - else: - for rd in self: - s.write(u'%s%s%d %s %s %s\n' % - (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass), - dns.rdatatype.to_text(self.rdtype), - rd.to_text(origin=origin, relativize=relativize, - **kw))) - # - # We strip off the final \n for the caller's convenience in printing - # - return s.getvalue()[:-1] - - def to_wire(self, name, file, compress=None, origin=None, - override_rdclass=None, want_shuffle=True): - """Convert the rdataset to wire format. - - *name*, a ``dns.name.Name`` is the owner name to use. - - *file* is the file where the name is emitted (typically a - BytesIO file). - - *compress*, a ``dict``, is the compression table to use. If - ``None`` (the default), names will not be compressed. - - *origin* is a ``dns.name.Name`` or ``None``. If the name is - relative and origin is not ``None``, then *origin* will be appended - to it. - - *override_rdclass*, an ``int``, is used as the class instead of the - class of the rdataset. This is useful when rendering rdatasets - associated with dynamic updates. - - *want_shuffle*, a ``bool``. If ``True``, then the order of the - Rdatas within the Rdataset will be shuffled before rendering. - - Returns an ``int``, the number of records emitted. - """ - - if override_rdclass is not None: - rdclass = override_rdclass - want_shuffle = False - else: - rdclass = self.rdclass - file.seek(0, 2) - if len(self) == 0: - name.to_wire(file, compress, origin) - stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0) - file.write(stuff) - return 1 - else: - if want_shuffle: - l = list(self) - random.shuffle(l) - else: - l = self - for rd in l: - name.to_wire(file, compress, origin) - stuff = struct.pack("!HHIH", self.rdtype, rdclass, - self.ttl, 0) - file.write(stuff) - start = file.tell() - rd.to_wire(file, compress, origin) - end = file.tell() - assert end - start < 65536 - file.seek(start - 2) - stuff = struct.pack("!H", end - start) - file.write(stuff) - file.seek(0, 2) - return len(self) - - def match(self, rdclass, rdtype, covers): - """Returns ``True`` if this rdataset matches the specified class, - type, and covers. - """ - if self.rdclass == rdclass and \ - self.rdtype == rdtype and \ - self.covers == covers: - return True - return False - - -def from_text_list(rdclass, rdtype, ttl, text_rdatas): - """Create an rdataset with the specified class, type, and TTL, and with - the specified list of rdatas in text format. - - Returns a ``dns.rdataset.Rdataset`` object. - """ - - if isinstance(rdclass, string_types): - rdclass = dns.rdataclass.from_text(rdclass) - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - r = Rdataset(rdclass, rdtype) - r.update_ttl(ttl) - for t in text_rdatas: - rd = dns.rdata.from_text(r.rdclass, r.rdtype, t) - r.add(rd) - return r - - -def from_text(rdclass, rdtype, ttl, *text_rdatas): - """Create an rdataset with the specified class, type, and TTL, and with - the specified rdatas in text format. - - Returns a ``dns.rdataset.Rdataset`` object. - """ - - return from_text_list(rdclass, rdtype, ttl, text_rdatas) - - -def from_rdata_list(ttl, rdatas): - """Create an rdataset with the specified TTL, and with - the specified list of rdata objects. - - Returns a ``dns.rdataset.Rdataset`` object. - """ - - if len(rdatas) == 0: - raise ValueError("rdata list must not be empty") - r = None - for rd in rdatas: - if r is None: - r = Rdataset(rd.rdclass, rd.rdtype) - r.update_ttl(ttl) - r.add(rd) - return r - - -def from_rdata(ttl, *rdatas): - """Create an rdataset with the specified TTL, and with - the specified rdata objects. - - Returns a ``dns.rdataset.Rdataset`` object. - """ - - return from_rdata_list(ttl, rdatas) diff --git a/lib/dns/rdataset.pyi b/lib/dns/rdataset.pyi deleted file mode 100644 index 3efff88a..00000000 --- a/lib/dns/rdataset.pyi +++ /dev/null @@ -1,58 +0,0 @@ -from typing import Optional, Dict, List, Union -from io import BytesIO -from . import exception, name, set, rdatatype, rdata, rdataset - -class DifferingCovers(exception.DNSException): - """An attempt was made to add a DNS SIG/RRSIG whose covered type - is not the same as that of the other rdatas in the rdataset.""" - - -class IncompatibleTypes(exception.DNSException): - """An attempt was made to add DNS RR data of an incompatible type.""" - - -class Rdataset(set.Set): - def __init__(self, rdclass, rdtype, covers=rdatatype.NONE, ttl=0): - self.rdclass : int = rdclass - self.rdtype : int = rdtype - self.covers : int = covers - self.ttl : int = ttl - - def update_ttl(self, ttl : int) -> None: - ... - - def add(self, rd : rdata.Rdata, ttl : Optional[int] =None): - ... - - def union_update(self, other : Rdataset): - ... - - def intersection_update(self, other : Rdataset): - ... - - def update(self, other : Rdataset): - ... - - def to_text(self, name : Optional[name.Name] =None, origin : Optional[name.Name] =None, relativize=True, - override_rdclass : Optional[int] =None, **kw) -> bytes: - ... - - def to_wire(self, name : Optional[name.Name], file : BytesIO, compress : Optional[Dict[name.Name, int]] = None, origin : Optional[name.Name] = None, - override_rdclass : Optional[int] = None, want_shuffle=True) -> int: - ... - - def match(self, rdclass : int, rdtype : int, covers : int) -> bool: - ... - - -def from_text_list(rdclass : Union[int,str], rdtype : Union[int,str], ttl : int, text_rdatas : str) -> rdataset.Rdataset: - ... - -def from_text(rdclass : Union[int,str], rdtype : Union[int,str], ttl : int, *text_rdatas : str) -> rdataset.Rdataset: - ... - -def from_rdata_list(ttl : int, rdatas : List[rdata.Rdata]) -> rdataset.Rdataset: - ... - -def from_rdata(ttl : int, *rdatas : List[rdata.Rdata]) -> rdataset.Rdataset: - ... diff --git a/lib/dns/rdatatype.py b/lib/dns/rdatatype.py deleted file mode 100644 index b247bc9c..00000000 --- a/lib/dns/rdatatype.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Rdata Types.""" - -import re - -import dns.exception - -NONE = 0 -A = 1 -NS = 2 -MD = 3 -MF = 4 -CNAME = 5 -SOA = 6 -MB = 7 -MG = 8 -MR = 9 -NULL = 10 -WKS = 11 -PTR = 12 -HINFO = 13 -MINFO = 14 -MX = 15 -TXT = 16 -RP = 17 -AFSDB = 18 -X25 = 19 -ISDN = 20 -RT = 21 -NSAP = 22 -NSAP_PTR = 23 -SIG = 24 -KEY = 25 -PX = 26 -GPOS = 27 -AAAA = 28 -LOC = 29 -NXT = 30 -SRV = 33 -NAPTR = 35 -KX = 36 -CERT = 37 -A6 = 38 -DNAME = 39 -OPT = 41 -APL = 42 -DS = 43 -SSHFP = 44 -IPSECKEY = 45 -RRSIG = 46 -NSEC = 47 -DNSKEY = 48 -DHCID = 49 -NSEC3 = 50 -NSEC3PARAM = 51 -TLSA = 52 -HIP = 55 -CDS = 59 -CDNSKEY = 60 -OPENPGPKEY = 61 -CSYNC = 62 -SPF = 99 -UNSPEC = 103 -EUI48 = 108 -EUI64 = 109 -TKEY = 249 -TSIG = 250 -IXFR = 251 -AXFR = 252 -MAILB = 253 -MAILA = 254 -ANY = 255 -URI = 256 -CAA = 257 -AVC = 258 -TA = 32768 -DLV = 32769 - -_by_text = { - 'NONE': NONE, - 'A': A, - 'NS': NS, - 'MD': MD, - 'MF': MF, - 'CNAME': CNAME, - 'SOA': SOA, - 'MB': MB, - 'MG': MG, - 'MR': MR, - 'NULL': NULL, - 'WKS': WKS, - 'PTR': PTR, - 'HINFO': HINFO, - 'MINFO': MINFO, - 'MX': MX, - 'TXT': TXT, - 'RP': RP, - 'AFSDB': AFSDB, - 'X25': X25, - 'ISDN': ISDN, - 'RT': RT, - 'NSAP': NSAP, - 'NSAP-PTR': NSAP_PTR, - 'SIG': SIG, - 'KEY': KEY, - 'PX': PX, - 'GPOS': GPOS, - 'AAAA': AAAA, - 'LOC': LOC, - 'NXT': NXT, - 'SRV': SRV, - 'NAPTR': NAPTR, - 'KX': KX, - 'CERT': CERT, - 'A6': A6, - 'DNAME': DNAME, - 'OPT': OPT, - 'APL': APL, - 'DS': DS, - 'SSHFP': SSHFP, - 'IPSECKEY': IPSECKEY, - 'RRSIG': RRSIG, - 'NSEC': NSEC, - 'DNSKEY': DNSKEY, - 'DHCID': DHCID, - 'NSEC3': NSEC3, - 'NSEC3PARAM': NSEC3PARAM, - 'TLSA': TLSA, - 'HIP': HIP, - 'CDS': CDS, - 'CDNSKEY': CDNSKEY, - 'OPENPGPKEY': OPENPGPKEY, - 'CSYNC': CSYNC, - 'SPF': SPF, - 'UNSPEC': UNSPEC, - 'EUI48': EUI48, - 'EUI64': EUI64, - 'TKEY': TKEY, - 'TSIG': TSIG, - 'IXFR': IXFR, - 'AXFR': AXFR, - 'MAILB': MAILB, - 'MAILA': MAILA, - 'ANY': ANY, - 'URI': URI, - 'CAA': CAA, - 'AVC': AVC, - 'TA': TA, - 'DLV': DLV, -} - -# We construct the inverse mapping programmatically to ensure that we -# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that -# would cause the mapping not to be true inverse. - -_by_value = {y: x for x, y in _by_text.items()} - -_metatypes = { - OPT: True -} - -_singletons = { - SOA: True, - NXT: True, - DNAME: True, - NSEC: True, - CNAME: True, -} - -_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I) - - -class UnknownRdatatype(dns.exception.DNSException): - """DNS resource record type is unknown.""" - - -def from_text(text): - """Convert text into a DNS rdata type value. - - The input text can be a defined DNS RR type mnemonic or - instance of the DNS generic type syntax. - - For example, "NS" and "TYPE2" will both result in a value of 2. - - Raises ``dns.rdatatype.UnknownRdatatype`` if the type is unknown. - - Raises ``ValueError`` if the rdata type value is not >= 0 and <= 65535. - - Returns an ``int``. - """ - - value = _by_text.get(text.upper()) - if value is None: - match = _unknown_type_pattern.match(text) - if match is None: - raise UnknownRdatatype - value = int(match.group(1)) - if value < 0 or value > 65535: - raise ValueError("type must be between >= 0 and <= 65535") - return value - - -def to_text(value): - """Convert a DNS rdata type value to text. - - If the value has a known mnemonic, it will be used, otherwise the - DNS generic type syntax will be used. - - Raises ``ValueError`` if the rdata type value is not >= 0 and <= 65535. - - Returns a ``str``. - """ - - if value < 0 or value > 65535: - raise ValueError("type must be between >= 0 and <= 65535") - text = _by_value.get(value) - if text is None: - text = 'TYPE' + repr(value) - return text - - -def is_metatype(rdtype): - """True if the specified type is a metatype. - - *rdtype* is an ``int``. - - The currently defined metatypes are TKEY, TSIG, IXFR, AXFR, MAILA, - MAILB, ANY, and OPT. - - Returns a ``bool``. - """ - - if rdtype >= TKEY and rdtype <= ANY or rdtype in _metatypes: - return True - return False - - -def is_singleton(rdtype): - """Is the specified type a singleton type? - - Singleton types can only have a single rdata in an rdataset, or a single - RR in an RRset. - - The currently defined singleton types are CNAME, DNAME, NSEC, NXT, and - SOA. - - *rdtype* is an ``int``. - - Returns a ``bool``. - """ - - if rdtype in _singletons: - return True - return False - - -def register_type(rdtype, rdtype_text, is_singleton=False): # pylint: disable=redefined-outer-name - """Dynamically register an rdatatype. - - *rdtype*, an ``int``, the rdatatype to register. - - *rdtype_text*, a ``text``, the textual form of the rdatatype. - - *is_singleton*, a ``bool``, indicating if the type is a singleton (i.e. - RRsets of the type can have only one member.) - """ - - _by_text[rdtype_text] = rdtype - _by_value[rdtype] = rdtype_text - if is_singleton: - _singletons[rdtype] = True diff --git a/lib/dns/rdtypes/ANY/AFSDB.py b/lib/dns/rdtypes/ANY/AFSDB.py deleted file mode 100644 index c6a700cf..00000000 --- a/lib/dns/rdtypes/ANY/AFSDB.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.mxbase - - -class AFSDB(dns.rdtypes.mxbase.UncompressedDowncasingMX): - - """AFSDB record - - @ivar subtype: the subtype value - @type subtype: int - @ivar hostname: the hostname name - @type hostname: dns.name.Name object""" - - # Use the property mechanism to make "subtype" an alias for the - # "preference" attribute, and "hostname" an alias for the "exchange" - # attribute. - # - # This lets us inherit the UncompressedMX implementation but lets - # the caller use appropriate attribute names for the rdata type. - # - # We probably lose some performance vs. a cut-and-paste - # implementation, but this way we don't copy code, and that's - # good. - - def get_subtype(self): - return self.preference - - def set_subtype(self, subtype): - self.preference = subtype - - subtype = property(get_subtype, set_subtype) - - def get_hostname(self): - return self.exchange - - def set_hostname(self, hostname): - self.exchange = hostname - - hostname = property(get_hostname, set_hostname) diff --git a/lib/dns/rdtypes/ANY/AVC.py b/lib/dns/rdtypes/ANY/AVC.py deleted file mode 100644 index 7f340b39..00000000 --- a/lib/dns/rdtypes/ANY/AVC.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2016 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.txtbase - - -class AVC(dns.rdtypes.txtbase.TXTBase): - - """AVC record - - @see: U{http://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template}""" diff --git a/lib/dns/rdtypes/ANY/CAA.py b/lib/dns/rdtypes/ANY/CAA.py deleted file mode 100644 index 0acf201a..00000000 --- a/lib/dns/rdtypes/ANY/CAA.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.tokenizer - - -class CAA(dns.rdata.Rdata): - - """CAA (Certification Authority Authorization) record - - @ivar flags: the flags - @type flags: int - @ivar tag: the tag - @type tag: string - @ivar value: the value - @type value: string - @see: RFC 6844""" - - __slots__ = ['flags', 'tag', 'value'] - - def __init__(self, rdclass, rdtype, flags, tag, value): - super(CAA, self).__init__(rdclass, rdtype) - self.flags = flags - self.tag = tag - self.value = value - - def to_text(self, origin=None, relativize=True, **kw): - return '%u %s "%s"' % (self.flags, - dns.rdata._escapify(self.tag), - dns.rdata._escapify(self.value)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - flags = tok.get_uint8() - tag = tok.get_string().encode() - if len(tag) > 255: - raise dns.exception.SyntaxError("tag too long") - if not tag.isalnum(): - raise dns.exception.SyntaxError("tag is not alphanumeric") - value = tok.get_string().encode() - return cls(rdclass, rdtype, flags, tag, value) - - def to_wire(self, file, compress=None, origin=None): - file.write(struct.pack('!B', self.flags)) - l = len(self.tag) - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(self.tag) - file.write(self.value) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (flags, l) = struct.unpack('!BB', wire[current: current + 2]) - current += 2 - tag = wire[current: current + l] - value = wire[current + l:current + rdlen - 2] - return cls(rdclass, rdtype, flags, tag, value) diff --git a/lib/dns/rdtypes/ANY/CDNSKEY.py b/lib/dns/rdtypes/ANY/CDNSKEY.py deleted file mode 100644 index 653ae1be..00000000 --- a/lib/dns/rdtypes/ANY/CDNSKEY.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.dnskeybase -from dns.rdtypes.dnskeybase import flags_to_text_set, flags_from_text_set - - -__all__ = ['flags_to_text_set', 'flags_from_text_set'] - - -class CDNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase): - - """CDNSKEY record""" diff --git a/lib/dns/rdtypes/ANY/CDS.py b/lib/dns/rdtypes/ANY/CDS.py deleted file mode 100644 index a63041dd..00000000 --- a/lib/dns/rdtypes/ANY/CDS.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.dsbase - - -class CDS(dns.rdtypes.dsbase.DSBase): - - """CDS record""" diff --git a/lib/dns/rdtypes/ANY/CERT.py b/lib/dns/rdtypes/ANY/CERT.py deleted file mode 100644 index eea27b52..00000000 --- a/lib/dns/rdtypes/ANY/CERT.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct -import base64 - -import dns.exception -import dns.dnssec -import dns.rdata -import dns.tokenizer - -_ctype_by_value = { - 1: 'PKIX', - 2: 'SPKI', - 3: 'PGP', - 253: 'URI', - 254: 'OID', -} - -_ctype_by_name = { - 'PKIX': 1, - 'SPKI': 2, - 'PGP': 3, - 'URI': 253, - 'OID': 254, -} - - -def _ctype_from_text(what): - v = _ctype_by_name.get(what) - if v is not None: - return v - return int(what) - - -def _ctype_to_text(what): - v = _ctype_by_value.get(what) - if v is not None: - return v - return str(what) - - -class CERT(dns.rdata.Rdata): - - """CERT record - - @ivar certificate_type: certificate type - @type certificate_type: int - @ivar key_tag: key tag - @type key_tag: int - @ivar algorithm: algorithm - @type algorithm: int - @ivar certificate: the certificate or CRL - @type certificate: string - @see: RFC 2538""" - - __slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate'] - - def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm, - certificate): - super(CERT, self).__init__(rdclass, rdtype) - self.certificate_type = certificate_type - self.key_tag = key_tag - self.algorithm = algorithm - self.certificate = certificate - - def to_text(self, origin=None, relativize=True, **kw): - certificate_type = _ctype_to_text(self.certificate_type) - return "%s %d %s %s" % (certificate_type, self.key_tag, - dns.dnssec.algorithm_to_text(self.algorithm), - dns.rdata._base64ify(self.certificate)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - certificate_type = _ctype_from_text(tok.get_string()) - key_tag = tok.get_uint16() - algorithm = dns.dnssec.algorithm_from_text(tok.get_string()) - if algorithm < 0 or algorithm > 255: - raise dns.exception.SyntaxError("bad algorithm type") - chunks = [] - while 1: - t = tok.get().unescape() - if t.is_eol_or_eof(): - break - if not t.is_identifier(): - raise dns.exception.SyntaxError - chunks.append(t.value.encode()) - b64 = b''.join(chunks) - certificate = base64.b64decode(b64) - return cls(rdclass, rdtype, certificate_type, key_tag, - algorithm, certificate) - - def to_wire(self, file, compress=None, origin=None): - prefix = struct.pack("!HHB", self.certificate_type, self.key_tag, - self.algorithm) - file.write(prefix) - file.write(self.certificate) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - prefix = wire[current: current + 5].unwrap() - current += 5 - rdlen -= 5 - if rdlen < 0: - raise dns.exception.FormError - (certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix) - certificate = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, certificate_type, key_tag, algorithm, - certificate) diff --git a/lib/dns/rdtypes/ANY/CNAME.py b/lib/dns/rdtypes/ANY/CNAME.py deleted file mode 100644 index 11d42aa7..00000000 --- a/lib/dns/rdtypes/ANY/CNAME.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.nsbase - - -class CNAME(dns.rdtypes.nsbase.NSBase): - - """CNAME record - - Note: although CNAME is officially a singleton type, dnspython allows - non-singleton CNAME rdatasets because such sets have been commonly - used by BIND and other nameservers for load balancing.""" diff --git a/lib/dns/rdtypes/ANY/CSYNC.py b/lib/dns/rdtypes/ANY/CSYNC.py deleted file mode 100644 index 06292fb2..00000000 --- a/lib/dns/rdtypes/ANY/CSYNC.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2004-2007, 2009-2011, 2016 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.rdatatype -import dns.name -from dns._compat import xrange - -class CSYNC(dns.rdata.Rdata): - - """CSYNC record - - @ivar serial: the SOA serial number - @type serial: int - @ivar flags: the CSYNC flags - @type flags: int - @ivar windows: the windowed bitmap list - @type windows: list of (window number, string) tuples""" - - __slots__ = ['serial', 'flags', 'windows'] - - def __init__(self, rdclass, rdtype, serial, flags, windows): - super(CSYNC, self).__init__(rdclass, rdtype) - self.serial = serial - self.flags = flags - self.windows = windows - - def to_text(self, origin=None, relativize=True, **kw): - text = '' - for (window, bitmap) in self.windows: - bits = [] - for i in xrange(0, len(bitmap)): - byte = bitmap[i] - for j in xrange(0, 8): - if byte & (0x80 >> j): - bits.append(dns.rdatatype.to_text(window * 256 + - i * 8 + j)) - text += (' ' + ' '.join(bits)) - return '%d %d%s' % (self.serial, self.flags, text) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - serial = tok.get_uint32() - flags = tok.get_uint16() - rdtypes = [] - while 1: - token = tok.get().unescape() - if token.is_eol_or_eof(): - break - nrdtype = dns.rdatatype.from_text(token.value) - if nrdtype == 0: - raise dns.exception.SyntaxError("CSYNC with bit 0") - if nrdtype > 65535: - raise dns.exception.SyntaxError("CSYNC with bit > 65535") - rdtypes.append(nrdtype) - rdtypes.sort() - window = 0 - octets = 0 - prior_rdtype = 0 - bitmap = bytearray(b'\0' * 32) - windows = [] - for nrdtype in rdtypes: - if nrdtype == prior_rdtype: - continue - prior_rdtype = nrdtype - new_window = nrdtype // 256 - if new_window != window: - windows.append((window, bitmap[0:octets])) - bitmap = bytearray(b'\0' * 32) - window = new_window - offset = nrdtype % 256 - byte = offset // 8 - bit = offset % 8 - octets = byte + 1 - bitmap[byte] = bitmap[byte] | (0x80 >> bit) - - windows.append((window, bitmap[0:octets])) - return cls(rdclass, rdtype, serial, flags, windows) - - def to_wire(self, file, compress=None, origin=None): - file.write(struct.pack('!IH', self.serial, self.flags)) - for (window, bitmap) in self.windows: - file.write(struct.pack('!BB', window, len(bitmap))) - file.write(bitmap) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - if rdlen < 6: - raise dns.exception.FormError("CSYNC too short") - (serial, flags) = struct.unpack("!IH", wire[current: current + 6]) - current += 6 - rdlen -= 6 - windows = [] - while rdlen > 0: - if rdlen < 3: - raise dns.exception.FormError("CSYNC too short") - window = wire[current] - octets = wire[current + 1] - if octets == 0 or octets > 32: - raise dns.exception.FormError("bad CSYNC octets") - current += 2 - rdlen -= 2 - if rdlen < octets: - raise dns.exception.FormError("bad CSYNC bitmap length") - bitmap = bytearray(wire[current: current + octets].unwrap()) - current += octets - rdlen -= octets - windows.append((window, bitmap)) - return cls(rdclass, rdtype, serial, flags, windows) diff --git a/lib/dns/rdtypes/ANY/DLV.py b/lib/dns/rdtypes/ANY/DLV.py deleted file mode 100644 index 16352125..00000000 --- a/lib/dns/rdtypes/ANY/DLV.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.dsbase - - -class DLV(dns.rdtypes.dsbase.DSBase): - - """DLV record""" diff --git a/lib/dns/rdtypes/ANY/DNAME.py b/lib/dns/rdtypes/ANY/DNAME.py deleted file mode 100644 index 2499283c..00000000 --- a/lib/dns/rdtypes/ANY/DNAME.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.nsbase - - -class DNAME(dns.rdtypes.nsbase.UncompressedNS): - - """DNAME record""" - - def to_digestable(self, origin=None): - return self.target.to_digestable(origin) diff --git a/lib/dns/rdtypes/ANY/DNSKEY.py b/lib/dns/rdtypes/ANY/DNSKEY.py deleted file mode 100644 index e36f7bc5..00000000 --- a/lib/dns/rdtypes/ANY/DNSKEY.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.dnskeybase -from dns.rdtypes.dnskeybase import flags_to_text_set, flags_from_text_set - - -__all__ = ['flags_to_text_set', 'flags_from_text_set'] - - -class DNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase): - - """DNSKEY record""" diff --git a/lib/dns/rdtypes/ANY/DS.py b/lib/dns/rdtypes/ANY/DS.py deleted file mode 100644 index 7d457b22..00000000 --- a/lib/dns/rdtypes/ANY/DS.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.dsbase - - -class DS(dns.rdtypes.dsbase.DSBase): - - """DS record""" diff --git a/lib/dns/rdtypes/ANY/EUI48.py b/lib/dns/rdtypes/ANY/EUI48.py deleted file mode 100644 index aa260e20..00000000 --- a/lib/dns/rdtypes/ANY/EUI48.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2015 Red Hat, Inc. -# Author: Petr Spacek <pspacek@redhat.com> -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.euibase - - -class EUI48(dns.rdtypes.euibase.EUIBase): - - """EUI48 record - - @ivar fingerprint: 48-bit Extended Unique Identifier (EUI-48) - @type fingerprint: string - @see: rfc7043.txt""" - - byte_len = 6 # 0123456789ab (in hex) - text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab diff --git a/lib/dns/rdtypes/ANY/EUI64.py b/lib/dns/rdtypes/ANY/EUI64.py deleted file mode 100644 index 5eba350d..00000000 --- a/lib/dns/rdtypes/ANY/EUI64.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2015 Red Hat, Inc. -# Author: Petr Spacek <pspacek@redhat.com> -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.euibase - - -class EUI64(dns.rdtypes.euibase.EUIBase): - - """EUI64 record - - @ivar fingerprint: 64-bit Extended Unique Identifier (EUI-64) - @type fingerprint: string - @see: rfc7043.txt""" - - byte_len = 8 # 0123456789abcdef (in hex) - text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab-cd-ef diff --git a/lib/dns/rdtypes/ANY/GPOS.py b/lib/dns/rdtypes/ANY/GPOS.py deleted file mode 100644 index 422822f0..00000000 --- a/lib/dns/rdtypes/ANY/GPOS.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.tokenizer -from dns._compat import long, text_type - - -def _validate_float_string(what): - if what[0] == b'-'[0] or what[0] == b'+'[0]: - what = what[1:] - if what.isdigit(): - return - (left, right) = what.split(b'.') - if left == b'' and right == b'': - raise dns.exception.FormError - if not left == b'' and not left.decode().isdigit(): - raise dns.exception.FormError - if not right == b'' and not right.decode().isdigit(): - raise dns.exception.FormError - - -def _sanitize(value): - if isinstance(value, text_type): - return value.encode() - return value - - -class GPOS(dns.rdata.Rdata): - - """GPOS record - - @ivar latitude: latitude - @type latitude: string - @ivar longitude: longitude - @type longitude: string - @ivar altitude: altitude - @type altitude: string - @see: RFC 1712""" - - __slots__ = ['latitude', 'longitude', 'altitude'] - - def __init__(self, rdclass, rdtype, latitude, longitude, altitude): - super(GPOS, self).__init__(rdclass, rdtype) - if isinstance(latitude, float) or \ - isinstance(latitude, int) or \ - isinstance(latitude, long): - latitude = str(latitude) - if isinstance(longitude, float) or \ - isinstance(longitude, int) or \ - isinstance(longitude, long): - longitude = str(longitude) - if isinstance(altitude, float) or \ - isinstance(altitude, int) or \ - isinstance(altitude, long): - altitude = str(altitude) - latitude = _sanitize(latitude) - longitude = _sanitize(longitude) - altitude = _sanitize(altitude) - _validate_float_string(latitude) - _validate_float_string(longitude) - _validate_float_string(altitude) - self.latitude = latitude - self.longitude = longitude - self.altitude = altitude - - def to_text(self, origin=None, relativize=True, **kw): - return '{} {} {}'.format(self.latitude.decode(), - self.longitude.decode(), - self.altitude.decode()) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - latitude = tok.get_string() - longitude = tok.get_string() - altitude = tok.get_string() - tok.get_eol() - return cls(rdclass, rdtype, latitude, longitude, altitude) - - def to_wire(self, file, compress=None, origin=None): - l = len(self.latitude) - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(self.latitude) - l = len(self.longitude) - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(self.longitude) - l = len(self.altitude) - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(self.altitude) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - l = wire[current] - current += 1 - rdlen -= 1 - if l > rdlen: - raise dns.exception.FormError - latitude = wire[current: current + l].unwrap() - current += l - rdlen -= l - l = wire[current] - current += 1 - rdlen -= 1 - if l > rdlen: - raise dns.exception.FormError - longitude = wire[current: current + l].unwrap() - current += l - rdlen -= l - l = wire[current] - current += 1 - rdlen -= 1 - if l != rdlen: - raise dns.exception.FormError - altitude = wire[current: current + l].unwrap() - return cls(rdclass, rdtype, latitude, longitude, altitude) - - def _get_float_latitude(self): - return float(self.latitude) - - def _set_float_latitude(self, value): - self.latitude = str(value) - - float_latitude = property(_get_float_latitude, _set_float_latitude, - doc="latitude as a floating point value") - - def _get_float_longitude(self): - return float(self.longitude) - - def _set_float_longitude(self, value): - self.longitude = str(value) - - float_longitude = property(_get_float_longitude, _set_float_longitude, - doc="longitude as a floating point value") - - def _get_float_altitude(self): - return float(self.altitude) - - def _set_float_altitude(self, value): - self.altitude = str(value) - - float_altitude = property(_get_float_altitude, _set_float_altitude, - doc="altitude as a floating point value") diff --git a/lib/dns/rdtypes/ANY/HINFO.py b/lib/dns/rdtypes/ANY/HINFO.py deleted file mode 100644 index e4e0b34a..00000000 --- a/lib/dns/rdtypes/ANY/HINFO.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.tokenizer -from dns._compat import text_type - - -class HINFO(dns.rdata.Rdata): - - """HINFO record - - @ivar cpu: the CPU type - @type cpu: string - @ivar os: the OS type - @type os: string - @see: RFC 1035""" - - __slots__ = ['cpu', 'os'] - - def __init__(self, rdclass, rdtype, cpu, os): - super(HINFO, self).__init__(rdclass, rdtype) - if isinstance(cpu, text_type): - self.cpu = cpu.encode() - else: - self.cpu = cpu - if isinstance(os, text_type): - self.os = os.encode() - else: - self.os = os - - def to_text(self, origin=None, relativize=True, **kw): - return '"{}" "{}"'.format(dns.rdata._escapify(self.cpu), - dns.rdata._escapify(self.os)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - cpu = tok.get_string() - os = tok.get_string() - tok.get_eol() - return cls(rdclass, rdtype, cpu, os) - - def to_wire(self, file, compress=None, origin=None): - l = len(self.cpu) - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(self.cpu) - l = len(self.os) - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(self.os) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - l = wire[current] - current += 1 - rdlen -= 1 - if l > rdlen: - raise dns.exception.FormError - cpu = wire[current:current + l].unwrap() - current += l - rdlen -= l - l = wire[current] - current += 1 - rdlen -= 1 - if l != rdlen: - raise dns.exception.FormError - os = wire[current: current + l].unwrap() - return cls(rdclass, rdtype, cpu, os) diff --git a/lib/dns/rdtypes/ANY/HIP.py b/lib/dns/rdtypes/ANY/HIP.py deleted file mode 100644 index 7c876b2d..00000000 --- a/lib/dns/rdtypes/ANY/HIP.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2010, 2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct -import base64 -import binascii - -import dns.exception -import dns.rdata -import dns.rdatatype - - -class HIP(dns.rdata.Rdata): - - """HIP record - - @ivar hit: the host identity tag - @type hit: string - @ivar algorithm: the public key cryptographic algorithm - @type algorithm: int - @ivar key: the public key - @type key: string - @ivar servers: the rendezvous servers - @type servers: list of dns.name.Name objects - @see: RFC 5205""" - - __slots__ = ['hit', 'algorithm', 'key', 'servers'] - - def __init__(self, rdclass, rdtype, hit, algorithm, key, servers): - super(HIP, self).__init__(rdclass, rdtype) - self.hit = hit - self.algorithm = algorithm - self.key = key - self.servers = servers - - def to_text(self, origin=None, relativize=True, **kw): - hit = binascii.hexlify(self.hit).decode() - key = base64.b64encode(self.key).replace(b'\n', b'').decode() - text = u'' - servers = [] - for server in self.servers: - servers.append(server.choose_relativity(origin, relativize)) - if len(servers) > 0: - text += (u' ' + u' '.join((x.to_unicode() for x in servers))) - return u'%u %s %s%s' % (self.algorithm, hit, key, text) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - algorithm = tok.get_uint8() - hit = binascii.unhexlify(tok.get_string().encode()) - if len(hit) > 255: - raise dns.exception.SyntaxError("HIT too long") - key = base64.b64decode(tok.get_string().encode()) - servers = [] - while 1: - token = tok.get() - if token.is_eol_or_eof(): - break - server = dns.name.from_text(token.value, origin) - server.choose_relativity(origin, relativize) - servers.append(server) - return cls(rdclass, rdtype, hit, algorithm, key, servers) - - def to_wire(self, file, compress=None, origin=None): - lh = len(self.hit) - lk = len(self.key) - file.write(struct.pack("!BBH", lh, self.algorithm, lk)) - file.write(self.hit) - file.write(self.key) - for server in self.servers: - server.to_wire(file, None, origin) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (lh, algorithm, lk) = struct.unpack('!BBH', - wire[current: current + 4]) - current += 4 - rdlen -= 4 - hit = wire[current: current + lh].unwrap() - current += lh - rdlen -= lh - key = wire[current: current + lk].unwrap() - current += lk - rdlen -= lk - servers = [] - while rdlen > 0: - (server, cused) = dns.name.from_wire(wire[: current + rdlen], - current) - current += cused - rdlen -= cused - if origin is not None: - server = server.relativize(origin) - servers.append(server) - return cls(rdclass, rdtype, hit, algorithm, key, servers) - - def choose_relativity(self, origin=None, relativize=True): - servers = [] - for server in self.servers: - server = server.choose_relativity(origin, relativize) - servers.append(server) - self.servers = servers diff --git a/lib/dns/rdtypes/ANY/ISDN.py b/lib/dns/rdtypes/ANY/ISDN.py deleted file mode 100644 index f5f5f8b9..00000000 --- a/lib/dns/rdtypes/ANY/ISDN.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.tokenizer -from dns._compat import text_type - - -class ISDN(dns.rdata.Rdata): - - """ISDN record - - @ivar address: the ISDN address - @type address: string - @ivar subaddress: the ISDN subaddress (or '' if not present) - @type subaddress: string - @see: RFC 1183""" - - __slots__ = ['address', 'subaddress'] - - def __init__(self, rdclass, rdtype, address, subaddress): - super(ISDN, self).__init__(rdclass, rdtype) - if isinstance(address, text_type): - self.address = address.encode() - else: - self.address = address - if isinstance(address, text_type): - self.subaddress = subaddress.encode() - else: - self.subaddress = subaddress - - def to_text(self, origin=None, relativize=True, **kw): - if self.subaddress: - return '"{}" "{}"'.format(dns.rdata._escapify(self.address), - dns.rdata._escapify(self.subaddress)) - else: - return '"%s"' % dns.rdata._escapify(self.address) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - address = tok.get_string() - t = tok.get() - if not t.is_eol_or_eof(): - tok.unget(t) - subaddress = tok.get_string() - else: - tok.unget(t) - subaddress = '' - tok.get_eol() - return cls(rdclass, rdtype, address, subaddress) - - def to_wire(self, file, compress=None, origin=None): - l = len(self.address) - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(self.address) - l = len(self.subaddress) - if l > 0: - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(self.subaddress) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - l = wire[current] - current += 1 - rdlen -= 1 - if l > rdlen: - raise dns.exception.FormError - address = wire[current: current + l].unwrap() - current += l - rdlen -= l - if rdlen > 0: - l = wire[current] - current += 1 - rdlen -= 1 - if l != rdlen: - raise dns.exception.FormError - subaddress = wire[current: current + l].unwrap() - else: - subaddress = '' - return cls(rdclass, rdtype, address, subaddress) diff --git a/lib/dns/rdtypes/ANY/LOC.py b/lib/dns/rdtypes/ANY/LOC.py deleted file mode 100644 index da9bb03a..00000000 --- a/lib/dns/rdtypes/ANY/LOC.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import division - -import struct - -import dns.exception -import dns.rdata -from dns._compat import long, xrange, round_py2_compat - - -_pows = tuple(long(10**i) for i in range(0, 11)) - -# default values are in centimeters -_default_size = 100.0 -_default_hprec = 1000000.0 -_default_vprec = 1000.0 - - -def _exponent_of(what, desc): - if what == 0: - return 0 - exp = None - for i in xrange(len(_pows)): - if what // _pows[i] == long(0): - exp = i - 1 - break - if exp is None or exp < 0: - raise dns.exception.SyntaxError("%s value out of bounds" % desc) - return exp - - -def _float_to_tuple(what): - if what < 0: - sign = -1 - what *= -1 - else: - sign = 1 - what = round_py2_compat(what * 3600000) - degrees = int(what // 3600000) - what -= degrees * 3600000 - minutes = int(what // 60000) - what -= minutes * 60000 - seconds = int(what // 1000) - what -= int(seconds * 1000) - what = int(what) - return (degrees, minutes, seconds, what, sign) - - -def _tuple_to_float(what): - value = float(what[0]) - value += float(what[1]) / 60.0 - value += float(what[2]) / 3600.0 - value += float(what[3]) / 3600000.0 - return float(what[4]) * value - - -def _encode_size(what, desc): - what = long(what) - exponent = _exponent_of(what, desc) & 0xF - base = what // pow(10, exponent) & 0xF - return base * 16 + exponent - - -def _decode_size(what, desc): - exponent = what & 0x0F - if exponent > 9: - raise dns.exception.SyntaxError("bad %s exponent" % desc) - base = (what & 0xF0) >> 4 - if base > 9: - raise dns.exception.SyntaxError("bad %s base" % desc) - return long(base) * pow(10, exponent) - - -class LOC(dns.rdata.Rdata): - - """LOC record - - @ivar latitude: latitude - @type latitude: (int, int, int, int, sign) tuple specifying the degrees, minutes, - seconds, milliseconds, and sign of the coordinate. - @ivar longitude: longitude - @type longitude: (int, int, int, int, sign) tuple specifying the degrees, - minutes, seconds, milliseconds, and sign of the coordinate. - @ivar altitude: altitude - @type altitude: float - @ivar size: size of the sphere - @type size: float - @ivar horizontal_precision: horizontal precision - @type horizontal_precision: float - @ivar vertical_precision: vertical precision - @type vertical_precision: float - @see: RFC 1876""" - - __slots__ = ['latitude', 'longitude', 'altitude', 'size', - 'horizontal_precision', 'vertical_precision'] - - def __init__(self, rdclass, rdtype, latitude, longitude, altitude, - size=_default_size, hprec=_default_hprec, - vprec=_default_vprec): - """Initialize a LOC record instance. - - The parameters I{latitude} and I{longitude} may be either a 4-tuple - of integers specifying (degrees, minutes, seconds, milliseconds), - or they may be floating point values specifying the number of - degrees. The other parameters are floats. Size, horizontal precision, - and vertical precision are specified in centimeters.""" - - super(LOC, self).__init__(rdclass, rdtype) - if isinstance(latitude, int) or isinstance(latitude, long): - latitude = float(latitude) - if isinstance(latitude, float): - latitude = _float_to_tuple(latitude) - self.latitude = latitude - if isinstance(longitude, int) or isinstance(longitude, long): - longitude = float(longitude) - if isinstance(longitude, float): - longitude = _float_to_tuple(longitude) - self.longitude = longitude - self.altitude = float(altitude) - self.size = float(size) - self.horizontal_precision = float(hprec) - self.vertical_precision = float(vprec) - - def to_text(self, origin=None, relativize=True, **kw): - if self.latitude[4] > 0: - lat_hemisphere = 'N' - else: - lat_hemisphere = 'S' - if self.longitude[4] > 0: - long_hemisphere = 'E' - else: - long_hemisphere = 'W' - text = "%d %d %d.%03d %s %d %d %d.%03d %s %0.2fm" % ( - self.latitude[0], self.latitude[1], - self.latitude[2], self.latitude[3], lat_hemisphere, - self.longitude[0], self.longitude[1], self.longitude[2], - self.longitude[3], long_hemisphere, - self.altitude / 100.0 - ) - - # do not print default values - if self.size != _default_size or \ - self.horizontal_precision != _default_hprec or \ - self.vertical_precision != _default_vprec: - text += " {:0.2f}m {:0.2f}m {:0.2f}m".format( - self.size / 100.0, self.horizontal_precision / 100.0, - self.vertical_precision / 100.0 - ) - return text - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - latitude = [0, 0, 0, 0, 1] - longitude = [0, 0, 0, 0, 1] - size = _default_size - hprec = _default_hprec - vprec = _default_vprec - - latitude[0] = tok.get_int() - t = tok.get_string() - if t.isdigit(): - latitude[1] = int(t) - t = tok.get_string() - if '.' in t: - (seconds, milliseconds) = t.split('.') - if not seconds.isdigit(): - raise dns.exception.SyntaxError( - 'bad latitude seconds value') - latitude[2] = int(seconds) - if latitude[2] >= 60: - raise dns.exception.SyntaxError('latitude seconds >= 60') - l = len(milliseconds) - if l == 0 or l > 3 or not milliseconds.isdigit(): - raise dns.exception.SyntaxError( - 'bad latitude milliseconds value') - if l == 1: - m = 100 - elif l == 2: - m = 10 - else: - m = 1 - latitude[3] = m * int(milliseconds) - t = tok.get_string() - elif t.isdigit(): - latitude[2] = int(t) - t = tok.get_string() - if t == 'S': - latitude[4] = -1 - elif t != 'N': - raise dns.exception.SyntaxError('bad latitude hemisphere value') - - longitude[0] = tok.get_int() - t = tok.get_string() - if t.isdigit(): - longitude[1] = int(t) - t = tok.get_string() - if '.' in t: - (seconds, milliseconds) = t.split('.') - if not seconds.isdigit(): - raise dns.exception.SyntaxError( - 'bad longitude seconds value') - longitude[2] = int(seconds) - if longitude[2] >= 60: - raise dns.exception.SyntaxError('longitude seconds >= 60') - l = len(milliseconds) - if l == 0 or l > 3 or not milliseconds.isdigit(): - raise dns.exception.SyntaxError( - 'bad longitude milliseconds value') - if l == 1: - m = 100 - elif l == 2: - m = 10 - else: - m = 1 - longitude[3] = m * int(milliseconds) - t = tok.get_string() - elif t.isdigit(): - longitude[2] = int(t) - t = tok.get_string() - if t == 'W': - longitude[4] = -1 - elif t != 'E': - raise dns.exception.SyntaxError('bad longitude hemisphere value') - - t = tok.get_string() - if t[-1] == 'm': - t = t[0: -1] - altitude = float(t) * 100.0 # m -> cm - - token = tok.get().unescape() - if not token.is_eol_or_eof(): - value = token.value - if value[-1] == 'm': - value = value[0: -1] - size = float(value) * 100.0 # m -> cm - token = tok.get().unescape() - if not token.is_eol_or_eof(): - value = token.value - if value[-1] == 'm': - value = value[0: -1] - hprec = float(value) * 100.0 # m -> cm - token = tok.get().unescape() - if not token.is_eol_or_eof(): - value = token.value - if value[-1] == 'm': - value = value[0: -1] - vprec = float(value) * 100.0 # m -> cm - tok.get_eol() - - return cls(rdclass, rdtype, latitude, longitude, altitude, - size, hprec, vprec) - - def to_wire(self, file, compress=None, origin=None): - milliseconds = (self.latitude[0] * 3600000 + - self.latitude[1] * 60000 + - self.latitude[2] * 1000 + - self.latitude[3]) * self.latitude[4] - latitude = long(0x80000000) + milliseconds - milliseconds = (self.longitude[0] * 3600000 + - self.longitude[1] * 60000 + - self.longitude[2] * 1000 + - self.longitude[3]) * self.longitude[4] - longitude = long(0x80000000) + milliseconds - altitude = long(self.altitude) + long(10000000) - size = _encode_size(self.size, "size") - hprec = _encode_size(self.horizontal_precision, "horizontal precision") - vprec = _encode_size(self.vertical_precision, "vertical precision") - wire = struct.pack("!BBBBIII", 0, size, hprec, vprec, latitude, - longitude, altitude) - file.write(wire) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (version, size, hprec, vprec, latitude, longitude, altitude) = \ - struct.unpack("!BBBBIII", wire[current: current + rdlen]) - if latitude > long(0x80000000): - latitude = float(latitude - long(0x80000000)) / 3600000 - else: - latitude = -1 * float(long(0x80000000) - latitude) / 3600000 - if latitude < -90.0 or latitude > 90.0: - raise dns.exception.FormError("bad latitude") - if longitude > long(0x80000000): - longitude = float(longitude - long(0x80000000)) / 3600000 - else: - longitude = -1 * float(long(0x80000000) - longitude) / 3600000 - if longitude < -180.0 or longitude > 180.0: - raise dns.exception.FormError("bad longitude") - altitude = float(altitude) - 10000000.0 - size = _decode_size(size, "size") - hprec = _decode_size(hprec, "horizontal precision") - vprec = _decode_size(vprec, "vertical precision") - return cls(rdclass, rdtype, latitude, longitude, altitude, - size, hprec, vprec) - - def _get_float_latitude(self): - return _tuple_to_float(self.latitude) - - def _set_float_latitude(self, value): - self.latitude = _float_to_tuple(value) - - float_latitude = property(_get_float_latitude, _set_float_latitude, - doc="latitude as a floating point value") - - def _get_float_longitude(self): - return _tuple_to_float(self.longitude) - - def _set_float_longitude(self, value): - self.longitude = _float_to_tuple(value) - - float_longitude = property(_get_float_longitude, _set_float_longitude, - doc="longitude as a floating point value") diff --git a/lib/dns/rdtypes/ANY/MX.py b/lib/dns/rdtypes/ANY/MX.py deleted file mode 100644 index 0a06494f..00000000 --- a/lib/dns/rdtypes/ANY/MX.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.mxbase - - -class MX(dns.rdtypes.mxbase.MXBase): - - """MX record""" diff --git a/lib/dns/rdtypes/ANY/NINFO.py b/lib/dns/rdtypes/ANY/NINFO.py deleted file mode 100644 index d754bc1a..00000000 --- a/lib/dns/rdtypes/ANY/NINFO.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.txtbase - - -class NINFO(dns.rdtypes.txtbase.TXTBase): - - """NINFO record - @see: draft-reid-dnsext-zs-01""" diff --git a/lib/dns/rdtypes/ANY/NS.py b/lib/dns/rdtypes/ANY/NS.py deleted file mode 100644 index f9fcf637..00000000 --- a/lib/dns/rdtypes/ANY/NS.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.nsbase - - -class NS(dns.rdtypes.nsbase.NSBase): - - """NS record""" diff --git a/lib/dns/rdtypes/ANY/NSEC.py b/lib/dns/rdtypes/ANY/NSEC.py deleted file mode 100644 index 4e3da729..00000000 --- a/lib/dns/rdtypes/ANY/NSEC.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.rdatatype -import dns.name -from dns._compat import xrange - - -class NSEC(dns.rdata.Rdata): - - """NSEC record - - @ivar next: the next name - @type next: dns.name.Name object - @ivar windows: the windowed bitmap list - @type windows: list of (window number, string) tuples""" - - __slots__ = ['next', 'windows'] - - def __init__(self, rdclass, rdtype, next, windows): - super(NSEC, self).__init__(rdclass, rdtype) - self.next = next - self.windows = windows - - def to_text(self, origin=None, relativize=True, **kw): - next = self.next.choose_relativity(origin, relativize) - text = '' - for (window, bitmap) in self.windows: - bits = [] - for i in xrange(0, len(bitmap)): - byte = bitmap[i] - for j in xrange(0, 8): - if byte & (0x80 >> j): - bits.append(dns.rdatatype.to_text(window * 256 + - i * 8 + j)) - text += (' ' + ' '.join(bits)) - return '{}{}'.format(next, text) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - next = tok.get_name() - next = next.choose_relativity(origin, relativize) - rdtypes = [] - while 1: - token = tok.get().unescape() - if token.is_eol_or_eof(): - break - nrdtype = dns.rdatatype.from_text(token.value) - if nrdtype == 0: - raise dns.exception.SyntaxError("NSEC with bit 0") - if nrdtype > 65535: - raise dns.exception.SyntaxError("NSEC with bit > 65535") - rdtypes.append(nrdtype) - rdtypes.sort() - window = 0 - octets = 0 - prior_rdtype = 0 - bitmap = bytearray(b'\0' * 32) - windows = [] - for nrdtype in rdtypes: - if nrdtype == prior_rdtype: - continue - prior_rdtype = nrdtype - new_window = nrdtype // 256 - if new_window != window: - windows.append((window, bitmap[0:octets])) - bitmap = bytearray(b'\0' * 32) - window = new_window - offset = nrdtype % 256 - byte = offset // 8 - bit = offset % 8 - octets = byte + 1 - bitmap[byte] = bitmap[byte] | (0x80 >> bit) - - windows.append((window, bitmap[0:octets])) - return cls(rdclass, rdtype, next, windows) - - def to_wire(self, file, compress=None, origin=None): - self.next.to_wire(file, None, origin) - for (window, bitmap) in self.windows: - file.write(struct.pack('!BB', window, len(bitmap))) - file.write(bitmap) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (next, cused) = dns.name.from_wire(wire[: current + rdlen], current) - current += cused - rdlen -= cused - windows = [] - while rdlen > 0: - if rdlen < 3: - raise dns.exception.FormError("NSEC too short") - window = wire[current] - octets = wire[current + 1] - if octets == 0 or octets > 32: - raise dns.exception.FormError("bad NSEC octets") - current += 2 - rdlen -= 2 - if rdlen < octets: - raise dns.exception.FormError("bad NSEC bitmap length") - bitmap = bytearray(wire[current: current + octets].unwrap()) - current += octets - rdlen -= octets - windows.append((window, bitmap)) - if origin is not None: - next = next.relativize(origin) - return cls(rdclass, rdtype, next, windows) - - def choose_relativity(self, origin=None, relativize=True): - self.next = self.next.choose_relativity(origin, relativize) diff --git a/lib/dns/rdtypes/ANY/NSEC3.py b/lib/dns/rdtypes/ANY/NSEC3.py deleted file mode 100644 index 1c281c4a..00000000 --- a/lib/dns/rdtypes/ANY/NSEC3.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2004-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import base64 -import binascii -import string -import struct - -import dns.exception -import dns.rdata -import dns.rdatatype -from dns._compat import xrange, text_type, PY3 - -# pylint: disable=deprecated-string-function -if PY3: - b32_hex_to_normal = bytes.maketrans(b'0123456789ABCDEFGHIJKLMNOPQRSTUV', - b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567') - b32_normal_to_hex = bytes.maketrans(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567', - b'0123456789ABCDEFGHIJKLMNOPQRSTUV') -else: - b32_hex_to_normal = string.maketrans('0123456789ABCDEFGHIJKLMNOPQRSTUV', - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567') - b32_normal_to_hex = string.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567', - '0123456789ABCDEFGHIJKLMNOPQRSTUV') -# pylint: enable=deprecated-string-function - - -# hash algorithm constants -SHA1 = 1 - -# flag constants -OPTOUT = 1 - - -class NSEC3(dns.rdata.Rdata): - - """NSEC3 record - - @ivar algorithm: the hash algorithm number - @type algorithm: int - @ivar flags: the flags - @type flags: int - @ivar iterations: the number of iterations - @type iterations: int - @ivar salt: the salt - @type salt: string - @ivar next: the next name hash - @type next: string - @ivar windows: the windowed bitmap list - @type windows: list of (window number, string) tuples""" - - __slots__ = ['algorithm', 'flags', 'iterations', 'salt', 'next', 'windows'] - - def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt, - next, windows): - super(NSEC3, self).__init__(rdclass, rdtype) - self.algorithm = algorithm - self.flags = flags - self.iterations = iterations - if isinstance(salt, text_type): - self.salt = salt.encode() - else: - self.salt = salt - self.next = next - self.windows = windows - - def to_text(self, origin=None, relativize=True, **kw): - next = base64.b32encode(self.next).translate( - b32_normal_to_hex).lower().decode() - if self.salt == b'': - salt = '-' - else: - salt = binascii.hexlify(self.salt).decode() - text = u'' - for (window, bitmap) in self.windows: - bits = [] - for i in xrange(0, len(bitmap)): - byte = bitmap[i] - for j in xrange(0, 8): - if byte & (0x80 >> j): - bits.append(dns.rdatatype.to_text(window * 256 + - i * 8 + j)) - text += (u' ' + u' '.join(bits)) - return u'%u %u %u %s %s%s' % (self.algorithm, self.flags, - self.iterations, salt, next, text) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - algorithm = tok.get_uint8() - flags = tok.get_uint8() - iterations = tok.get_uint16() - salt = tok.get_string() - if salt == u'-': - salt = b'' - else: - salt = binascii.unhexlify(salt.encode('ascii')) - next = tok.get_string().encode( - 'ascii').upper().translate(b32_hex_to_normal) - next = base64.b32decode(next) - rdtypes = [] - while 1: - token = tok.get().unescape() - if token.is_eol_or_eof(): - break - nrdtype = dns.rdatatype.from_text(token.value) - if nrdtype == 0: - raise dns.exception.SyntaxError("NSEC3 with bit 0") - if nrdtype > 65535: - raise dns.exception.SyntaxError("NSEC3 with bit > 65535") - rdtypes.append(nrdtype) - rdtypes.sort() - window = 0 - octets = 0 - prior_rdtype = 0 - bitmap = bytearray(b'\0' * 32) - windows = [] - for nrdtype in rdtypes: - if nrdtype == prior_rdtype: - continue - prior_rdtype = nrdtype - new_window = nrdtype // 256 - if new_window != window: - if octets != 0: - windows.append((window, bitmap[0:octets])) - bitmap = bytearray(b'\0' * 32) - window = new_window - offset = nrdtype % 256 - byte = offset // 8 - bit = offset % 8 - octets = byte + 1 - bitmap[byte] = bitmap[byte] | (0x80 >> bit) - if octets != 0: - windows.append((window, bitmap[0:octets])) - return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, - windows) - - def to_wire(self, file, compress=None, origin=None): - l = len(self.salt) - file.write(struct.pack("!BBHB", self.algorithm, self.flags, - self.iterations, l)) - file.write(self.salt) - l = len(self.next) - file.write(struct.pack("!B", l)) - file.write(self.next) - for (window, bitmap) in self.windows: - file.write(struct.pack("!BB", window, len(bitmap))) - file.write(bitmap) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (algorithm, flags, iterations, slen) = \ - struct.unpack('!BBHB', wire[current: current + 5]) - - current += 5 - rdlen -= 5 - salt = wire[current: current + slen].unwrap() - current += slen - rdlen -= slen - nlen = wire[current] - current += 1 - rdlen -= 1 - next = wire[current: current + nlen].unwrap() - current += nlen - rdlen -= nlen - windows = [] - while rdlen > 0: - if rdlen < 3: - raise dns.exception.FormError("NSEC3 too short") - window = wire[current] - octets = wire[current + 1] - if octets == 0 or octets > 32: - raise dns.exception.FormError("bad NSEC3 octets") - current += 2 - rdlen -= 2 - if rdlen < octets: - raise dns.exception.FormError("bad NSEC3 bitmap length") - bitmap = bytearray(wire[current: current + octets].unwrap()) - current += octets - rdlen -= octets - windows.append((window, bitmap)) - return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, - windows) diff --git a/lib/dns/rdtypes/ANY/NSEC3PARAM.py b/lib/dns/rdtypes/ANY/NSEC3PARAM.py deleted file mode 100644 index 87c36e56..00000000 --- a/lib/dns/rdtypes/ANY/NSEC3PARAM.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct -import binascii - -import dns.exception -import dns.rdata -from dns._compat import text_type - - -class NSEC3PARAM(dns.rdata.Rdata): - - """NSEC3PARAM record - - @ivar algorithm: the hash algorithm number - @type algorithm: int - @ivar flags: the flags - @type flags: int - @ivar iterations: the number of iterations - @type iterations: int - @ivar salt: the salt - @type salt: string""" - - __slots__ = ['algorithm', 'flags', 'iterations', 'salt'] - - def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt): - super(NSEC3PARAM, self).__init__(rdclass, rdtype) - self.algorithm = algorithm - self.flags = flags - self.iterations = iterations - if isinstance(salt, text_type): - self.salt = salt.encode() - else: - self.salt = salt - - def to_text(self, origin=None, relativize=True, **kw): - if self.salt == b'': - salt = '-' - else: - salt = binascii.hexlify(self.salt).decode() - return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations, - salt) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - algorithm = tok.get_uint8() - flags = tok.get_uint8() - iterations = tok.get_uint16() - salt = tok.get_string() - if salt == '-': - salt = '' - else: - salt = binascii.unhexlify(salt.encode()) - tok.get_eol() - return cls(rdclass, rdtype, algorithm, flags, iterations, salt) - - def to_wire(self, file, compress=None, origin=None): - l = len(self.salt) - file.write(struct.pack("!BBHB", self.algorithm, self.flags, - self.iterations, l)) - file.write(self.salt) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (algorithm, flags, iterations, slen) = \ - struct.unpack('!BBHB', - wire[current: current + 5]) - current += 5 - rdlen -= 5 - salt = wire[current: current + slen].unwrap() - current += slen - rdlen -= slen - if rdlen != 0: - raise dns.exception.FormError - return cls(rdclass, rdtype, algorithm, flags, iterations, salt) diff --git a/lib/dns/rdtypes/ANY/OPENPGPKEY.py b/lib/dns/rdtypes/ANY/OPENPGPKEY.py deleted file mode 100644 index a066cf98..00000000 --- a/lib/dns/rdtypes/ANY/OPENPGPKEY.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2016 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import base64 - -import dns.exception -import dns.rdata -import dns.tokenizer - -class OPENPGPKEY(dns.rdata.Rdata): - - """OPENPGPKEY record - - @ivar key: the key - @type key: bytes - @see: RFC 7929 - """ - - def __init__(self, rdclass, rdtype, key): - super(OPENPGPKEY, self).__init__(rdclass, rdtype) - self.key = key - - def to_text(self, origin=None, relativize=True, **kw): - return dns.rdata._base64ify(self.key) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - chunks = [] - while 1: - t = tok.get().unescape() - if t.is_eol_or_eof(): - break - if not t.is_identifier(): - raise dns.exception.SyntaxError - chunks.append(t.value.encode()) - b64 = b''.join(chunks) - key = base64.b64decode(b64) - return cls(rdclass, rdtype, key) - - def to_wire(self, file, compress=None, origin=None): - file.write(self.key) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - key = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, key) diff --git a/lib/dns/rdtypes/ANY/PTR.py b/lib/dns/rdtypes/ANY/PTR.py deleted file mode 100644 index 20cd5076..00000000 --- a/lib/dns/rdtypes/ANY/PTR.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.nsbase - - -class PTR(dns.rdtypes.nsbase.NSBase): - - """PTR record""" diff --git a/lib/dns/rdtypes/ANY/RP.py b/lib/dns/rdtypes/ANY/RP.py deleted file mode 100644 index 8f07be90..00000000 --- a/lib/dns/rdtypes/ANY/RP.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.exception -import dns.rdata -import dns.name - - -class RP(dns.rdata.Rdata): - - """RP record - - @ivar mbox: The responsible person's mailbox - @type mbox: dns.name.Name object - @ivar txt: The owner name of a node with TXT records, or the root name - if no TXT records are associated with this RP. - @type txt: dns.name.Name object - @see: RFC 1183""" - - __slots__ = ['mbox', 'txt'] - - def __init__(self, rdclass, rdtype, mbox, txt): - super(RP, self).__init__(rdclass, rdtype) - self.mbox = mbox - self.txt = txt - - def to_text(self, origin=None, relativize=True, **kw): - mbox = self.mbox.choose_relativity(origin, relativize) - txt = self.txt.choose_relativity(origin, relativize) - return "{} {}".format(str(mbox), str(txt)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - mbox = tok.get_name() - txt = tok.get_name() - mbox = mbox.choose_relativity(origin, relativize) - txt = txt.choose_relativity(origin, relativize) - tok.get_eol() - return cls(rdclass, rdtype, mbox, txt) - - def to_wire(self, file, compress=None, origin=None): - self.mbox.to_wire(file, None, origin) - self.txt.to_wire(file, None, origin) - - def to_digestable(self, origin=None): - return self.mbox.to_digestable(origin) + \ - self.txt.to_digestable(origin) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (mbox, cused) = dns.name.from_wire(wire[: current + rdlen], - current) - current += cused - rdlen -= cused - if rdlen <= 0: - raise dns.exception.FormError - (txt, cused) = dns.name.from_wire(wire[: current + rdlen], - current) - if cused != rdlen: - raise dns.exception.FormError - if origin is not None: - mbox = mbox.relativize(origin) - txt = txt.relativize(origin) - return cls(rdclass, rdtype, mbox, txt) - - def choose_relativity(self, origin=None, relativize=True): - self.mbox = self.mbox.choose_relativity(origin, relativize) - self.txt = self.txt.choose_relativity(origin, relativize) diff --git a/lib/dns/rdtypes/ANY/RRSIG.py b/lib/dns/rdtypes/ANY/RRSIG.py deleted file mode 100644 index d3756ece..00000000 --- a/lib/dns/rdtypes/ANY/RRSIG.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import base64 -import calendar -import struct -import time - -import dns.dnssec -import dns.exception -import dns.rdata -import dns.rdatatype - - -class BadSigTime(dns.exception.DNSException): - - """Time in DNS SIG or RRSIG resource record cannot be parsed.""" - - -def sigtime_to_posixtime(what): - if len(what) != 14: - raise BadSigTime - year = int(what[0:4]) - month = int(what[4:6]) - day = int(what[6:8]) - hour = int(what[8:10]) - minute = int(what[10:12]) - second = int(what[12:14]) - return calendar.timegm((year, month, day, hour, minute, second, - 0, 0, 0)) - - -def posixtime_to_sigtime(what): - return time.strftime('%Y%m%d%H%M%S', time.gmtime(what)) - - -class RRSIG(dns.rdata.Rdata): - - """RRSIG record - - @ivar type_covered: the rdata type this signature covers - @type type_covered: int - @ivar algorithm: the algorithm used for the sig - @type algorithm: int - @ivar labels: number of labels - @type labels: int - @ivar original_ttl: the original TTL - @type original_ttl: long - @ivar expiration: signature expiration time - @type expiration: long - @ivar inception: signature inception time - @type inception: long - @ivar key_tag: the key tag - @type key_tag: int - @ivar signer: the signer - @type signer: dns.name.Name object - @ivar signature: the signature - @type signature: string""" - - __slots__ = ['type_covered', 'algorithm', 'labels', 'original_ttl', - 'expiration', 'inception', 'key_tag', 'signer', - 'signature'] - - def __init__(self, rdclass, rdtype, type_covered, algorithm, labels, - original_ttl, expiration, inception, key_tag, signer, - signature): - super(RRSIG, self).__init__(rdclass, rdtype) - self.type_covered = type_covered - self.algorithm = algorithm - self.labels = labels - self.original_ttl = original_ttl - self.expiration = expiration - self.inception = inception - self.key_tag = key_tag - self.signer = signer - self.signature = signature - - def covers(self): - return self.type_covered - - def to_text(self, origin=None, relativize=True, **kw): - return '%s %d %d %d %s %s %d %s %s' % ( - dns.rdatatype.to_text(self.type_covered), - self.algorithm, - self.labels, - self.original_ttl, - posixtime_to_sigtime(self.expiration), - posixtime_to_sigtime(self.inception), - self.key_tag, - self.signer.choose_relativity(origin, relativize), - dns.rdata._base64ify(self.signature) - ) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - type_covered = dns.rdatatype.from_text(tok.get_string()) - algorithm = dns.dnssec.algorithm_from_text(tok.get_string()) - labels = tok.get_int() - original_ttl = tok.get_ttl() - expiration = sigtime_to_posixtime(tok.get_string()) - inception = sigtime_to_posixtime(tok.get_string()) - key_tag = tok.get_int() - signer = tok.get_name() - signer = signer.choose_relativity(origin, relativize) - chunks = [] - while 1: - t = tok.get().unescape() - if t.is_eol_or_eof(): - break - if not t.is_identifier(): - raise dns.exception.SyntaxError - chunks.append(t.value.encode()) - b64 = b''.join(chunks) - signature = base64.b64decode(b64) - return cls(rdclass, rdtype, type_covered, algorithm, labels, - original_ttl, expiration, inception, key_tag, signer, - signature) - - def to_wire(self, file, compress=None, origin=None): - header = struct.pack('!HBBIIIH', self.type_covered, - self.algorithm, self.labels, - self.original_ttl, self.expiration, - self.inception, self.key_tag) - file.write(header) - self.signer.to_wire(file, None, origin) - file.write(self.signature) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - header = struct.unpack('!HBBIIIH', wire[current: current + 18]) - current += 18 - rdlen -= 18 - (signer, cused) = dns.name.from_wire(wire[: current + rdlen], current) - current += cused - rdlen -= cused - if origin is not None: - signer = signer.relativize(origin) - signature = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, header[0], header[1], header[2], - header[3], header[4], header[5], header[6], signer, - signature) - - def choose_relativity(self, origin=None, relativize=True): - self.signer = self.signer.choose_relativity(origin, relativize) diff --git a/lib/dns/rdtypes/ANY/RT.py b/lib/dns/rdtypes/ANY/RT.py deleted file mode 100644 index d0feb79e..00000000 --- a/lib/dns/rdtypes/ANY/RT.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.mxbase - - -class RT(dns.rdtypes.mxbase.UncompressedDowncasingMX): - - """RT record""" diff --git a/lib/dns/rdtypes/ANY/SOA.py b/lib/dns/rdtypes/ANY/SOA.py deleted file mode 100644 index aec81cad..00000000 --- a/lib/dns/rdtypes/ANY/SOA.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.name - - -class SOA(dns.rdata.Rdata): - - """SOA record - - @ivar mname: the SOA MNAME (master name) field - @type mname: dns.name.Name object - @ivar rname: the SOA RNAME (responsible name) field - @type rname: dns.name.Name object - @ivar serial: The zone's serial number - @type serial: int - @ivar refresh: The zone's refresh value (in seconds) - @type refresh: int - @ivar retry: The zone's retry value (in seconds) - @type retry: int - @ivar expire: The zone's expiration value (in seconds) - @type expire: int - @ivar minimum: The zone's negative caching time (in seconds, called - "minimum" for historical reasons) - @type minimum: int - @see: RFC 1035""" - - __slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', - 'minimum'] - - def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry, - expire, minimum): - super(SOA, self).__init__(rdclass, rdtype) - self.mname = mname - self.rname = rname - self.serial = serial - self.refresh = refresh - self.retry = retry - self.expire = expire - self.minimum = minimum - - def to_text(self, origin=None, relativize=True, **kw): - mname = self.mname.choose_relativity(origin, relativize) - rname = self.rname.choose_relativity(origin, relativize) - return '%s %s %d %d %d %d %d' % ( - mname, rname, self.serial, self.refresh, self.retry, - self.expire, self.minimum) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - mname = tok.get_name() - rname = tok.get_name() - mname = mname.choose_relativity(origin, relativize) - rname = rname.choose_relativity(origin, relativize) - serial = tok.get_uint32() - refresh = tok.get_ttl() - retry = tok.get_ttl() - expire = tok.get_ttl() - minimum = tok.get_ttl() - tok.get_eol() - return cls(rdclass, rdtype, mname, rname, serial, refresh, retry, - expire, minimum) - - def to_wire(self, file, compress=None, origin=None): - self.mname.to_wire(file, compress, origin) - self.rname.to_wire(file, compress, origin) - five_ints = struct.pack('!IIIII', self.serial, self.refresh, - self.retry, self.expire, self.minimum) - file.write(five_ints) - - def to_digestable(self, origin=None): - return self.mname.to_digestable(origin) + \ - self.rname.to_digestable(origin) + \ - struct.pack('!IIIII', self.serial, self.refresh, - self.retry, self.expire, self.minimum) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (mname, cused) = dns.name.from_wire(wire[: current + rdlen], current) - current += cused - rdlen -= cused - (rname, cused) = dns.name.from_wire(wire[: current + rdlen], current) - current += cused - rdlen -= cused - if rdlen != 20: - raise dns.exception.FormError - five_ints = struct.unpack('!IIIII', - wire[current: current + rdlen]) - if origin is not None: - mname = mname.relativize(origin) - rname = rname.relativize(origin) - return cls(rdclass, rdtype, mname, rname, - five_ints[0], five_ints[1], five_ints[2], five_ints[3], - five_ints[4]) - - def choose_relativity(self, origin=None, relativize=True): - self.mname = self.mname.choose_relativity(origin, relativize) - self.rname = self.rname.choose_relativity(origin, relativize) diff --git a/lib/dns/rdtypes/ANY/SPF.py b/lib/dns/rdtypes/ANY/SPF.py deleted file mode 100644 index 41dee623..00000000 --- a/lib/dns/rdtypes/ANY/SPF.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.txtbase - - -class SPF(dns.rdtypes.txtbase.TXTBase): - - """SPF record - - @see: RFC 4408""" diff --git a/lib/dns/rdtypes/ANY/SSHFP.py b/lib/dns/rdtypes/ANY/SSHFP.py deleted file mode 100644 index c18311e9..00000000 --- a/lib/dns/rdtypes/ANY/SSHFP.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct -import binascii - -import dns.rdata -import dns.rdatatype - - -class SSHFP(dns.rdata.Rdata): - - """SSHFP record - - @ivar algorithm: the algorithm - @type algorithm: int - @ivar fp_type: the digest type - @type fp_type: int - @ivar fingerprint: the fingerprint - @type fingerprint: string - @see: draft-ietf-secsh-dns-05.txt""" - - __slots__ = ['algorithm', 'fp_type', 'fingerprint'] - - def __init__(self, rdclass, rdtype, algorithm, fp_type, - fingerprint): - super(SSHFP, self).__init__(rdclass, rdtype) - self.algorithm = algorithm - self.fp_type = fp_type - self.fingerprint = fingerprint - - def to_text(self, origin=None, relativize=True, **kw): - return '%d %d %s' % (self.algorithm, - self.fp_type, - dns.rdata._hexify(self.fingerprint, - chunksize=128)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - algorithm = tok.get_uint8() - fp_type = tok.get_uint8() - chunks = [] - while 1: - t = tok.get().unescape() - if t.is_eol_or_eof(): - break - if not t.is_identifier(): - raise dns.exception.SyntaxError - chunks.append(t.value.encode()) - fingerprint = b''.join(chunks) - fingerprint = binascii.unhexlify(fingerprint) - return cls(rdclass, rdtype, algorithm, fp_type, fingerprint) - - def to_wire(self, file, compress=None, origin=None): - header = struct.pack("!BB", self.algorithm, self.fp_type) - file.write(header) - file.write(self.fingerprint) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - header = struct.unpack("!BB", wire[current: current + 2]) - current += 2 - rdlen -= 2 - fingerprint = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, header[0], header[1], fingerprint) diff --git a/lib/dns/rdtypes/ANY/TLSA.py b/lib/dns/rdtypes/ANY/TLSA.py deleted file mode 100644 index a135c2b3..00000000 --- a/lib/dns/rdtypes/ANY/TLSA.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct -import binascii - -import dns.rdata -import dns.rdatatype - - -class TLSA(dns.rdata.Rdata): - - """TLSA record - - @ivar usage: The certificate usage - @type usage: int - @ivar selector: The selector field - @type selector: int - @ivar mtype: The 'matching type' field - @type mtype: int - @ivar cert: The 'Certificate Association Data' field - @type cert: string - @see: RFC 6698""" - - __slots__ = ['usage', 'selector', 'mtype', 'cert'] - - def __init__(self, rdclass, rdtype, usage, selector, - mtype, cert): - super(TLSA, self).__init__(rdclass, rdtype) - self.usage = usage - self.selector = selector - self.mtype = mtype - self.cert = cert - - def to_text(self, origin=None, relativize=True, **kw): - return '%d %d %d %s' % (self.usage, - self.selector, - self.mtype, - dns.rdata._hexify(self.cert, - chunksize=128)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - usage = tok.get_uint8() - selector = tok.get_uint8() - mtype = tok.get_uint8() - cert_chunks = [] - while 1: - t = tok.get().unescape() - if t.is_eol_or_eof(): - break - if not t.is_identifier(): - raise dns.exception.SyntaxError - cert_chunks.append(t.value.encode()) - cert = b''.join(cert_chunks) - cert = binascii.unhexlify(cert) - return cls(rdclass, rdtype, usage, selector, mtype, cert) - - def to_wire(self, file, compress=None, origin=None): - header = struct.pack("!BBB", self.usage, self.selector, self.mtype) - file.write(header) - file.write(self.cert) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - header = struct.unpack("!BBB", wire[current: current + 3]) - current += 3 - rdlen -= 3 - cert = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, header[0], header[1], header[2], cert) diff --git a/lib/dns/rdtypes/ANY/TXT.py b/lib/dns/rdtypes/ANY/TXT.py deleted file mode 100644 index c5ae919c..00000000 --- a/lib/dns/rdtypes/ANY/TXT.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.txtbase - - -class TXT(dns.rdtypes.txtbase.TXTBase): - - """TXT record""" diff --git a/lib/dns/rdtypes/ANY/URI.py b/lib/dns/rdtypes/ANY/URI.py deleted file mode 100644 index f5b65ed6..00000000 --- a/lib/dns/rdtypes/ANY/URI.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# Copyright (C) 2015 Red Hat, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.name -from dns._compat import text_type - - -class URI(dns.rdata.Rdata): - - """URI record - - @ivar priority: the priority - @type priority: int - @ivar weight: the weight - @type weight: int - @ivar target: the target host - @type target: dns.name.Name object - @see: draft-faltstrom-uri-13""" - - __slots__ = ['priority', 'weight', 'target'] - - def __init__(self, rdclass, rdtype, priority, weight, target): - super(URI, self).__init__(rdclass, rdtype) - self.priority = priority - self.weight = weight - if len(target) < 1: - raise dns.exception.SyntaxError("URI target cannot be empty") - if isinstance(target, text_type): - self.target = target.encode() - else: - self.target = target - - def to_text(self, origin=None, relativize=True, **kw): - return '%d %d "%s"' % (self.priority, self.weight, - self.target.decode()) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - priority = tok.get_uint16() - weight = tok.get_uint16() - target = tok.get().unescape() - if not (target.is_quoted_string() or target.is_identifier()): - raise dns.exception.SyntaxError("URI target must be a string") - tok.get_eol() - return cls(rdclass, rdtype, priority, weight, target.value) - - def to_wire(self, file, compress=None, origin=None): - two_ints = struct.pack("!HH", self.priority, self.weight) - file.write(two_ints) - file.write(self.target) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - if rdlen < 5: - raise dns.exception.FormError('URI RR is shorter than 5 octets') - - (priority, weight) = struct.unpack('!HH', wire[current: current + 4]) - current += 4 - rdlen -= 4 - target = wire[current: current + rdlen] - current += rdlen - - return cls(rdclass, rdtype, priority, weight, target) diff --git a/lib/dns/rdtypes/ANY/X25.py b/lib/dns/rdtypes/ANY/X25.py deleted file mode 100644 index e530a2c2..00000000 --- a/lib/dns/rdtypes/ANY/X25.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.tokenizer -from dns._compat import text_type - - -class X25(dns.rdata.Rdata): - - """X25 record - - @ivar address: the PSDN address - @type address: string - @see: RFC 1183""" - - __slots__ = ['address'] - - def __init__(self, rdclass, rdtype, address): - super(X25, self).__init__(rdclass, rdtype) - if isinstance(address, text_type): - self.address = address.encode() - else: - self.address = address - - def to_text(self, origin=None, relativize=True, **kw): - return '"%s"' % dns.rdata._escapify(self.address) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - address = tok.get_string() - tok.get_eol() - return cls(rdclass, rdtype, address) - - def to_wire(self, file, compress=None, origin=None): - l = len(self.address) - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(self.address) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - l = wire[current] - current += 1 - rdlen -= 1 - if l != rdlen: - raise dns.exception.FormError - address = wire[current: current + l].unwrap() - return cls(rdclass, rdtype, address) diff --git a/lib/dns/rdtypes/ANY/__init__.py b/lib/dns/rdtypes/ANY/__init__.py deleted file mode 100644 index ca41ef80..00000000 --- a/lib/dns/rdtypes/ANY/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Class ANY (generic) rdata type classes.""" - -__all__ = [ - 'AFSDB', - 'AVC', - 'CAA', - 'CDNSKEY', - 'CDS', - 'CERT', - 'CNAME', - 'CSYNC', - 'DLV', - 'DNAME', - 'DNSKEY', - 'DS', - 'EUI48', - 'EUI64', - 'GPOS', - 'HINFO', - 'HIP', - 'ISDN', - 'LOC', - 'MX', - 'NS', - 'NSEC', - 'NSEC3', - 'NSEC3PARAM', - 'OPENPGPKEY', - 'PTR', - 'RP', - 'RRSIG', - 'RT', - 'SOA', - 'SPF', - 'SSHFP', - 'TLSA', - 'TXT', - 'URI', - 'X25', -] diff --git a/lib/dns/rdtypes/CH/A.py b/lib/dns/rdtypes/CH/A.py deleted file mode 100644 index e65d192d..00000000 --- a/lib/dns/rdtypes/CH/A.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.mxbase -import struct - -class A(dns.rdtypes.mxbase.MXBase): - - """A record for Chaosnet - @ivar domain: the domain of the address - @type domain: dns.name.Name object - @ivar address: the 16-bit address - @type address: int""" - - __slots__ = ['domain', 'address'] - - def __init__(self, rdclass, rdtype, address, domain): - super(A, self).__init__(rdclass, rdtype, address, domain) - self.domain = domain - self.address = address - - def to_text(self, origin=None, relativize=True, **kw): - domain = self.domain.choose_relativity(origin, relativize) - return '%s %o' % (domain, self.address) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - domain = tok.get_name() - address = tok.get_uint16(base=8) - domain = domain.choose_relativity(origin, relativize) - tok.get_eol() - return cls(rdclass, rdtype, address, domain) - - def to_wire(self, file, compress=None, origin=None): - self.domain.to_wire(file, compress, origin) - pref = struct.pack("!H", self.address) - file.write(pref) - - def to_digestable(self, origin=None): - return self.domain.to_digestable(origin) + \ - struct.pack("!H", self.address) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (domain, cused) = dns.name.from_wire(wire[: current + rdlen-2], - current) - current += cused - (address,) = struct.unpack('!H', wire[current: current + 2]) - if cused+2 != rdlen: - raise dns.exception.FormError - if origin is not None: - domain = domain.relativize(origin) - return cls(rdclass, rdtype, address, domain) - - def choose_relativity(self, origin=None, relativize=True): - self.domain = self.domain.choose_relativity(origin, relativize) diff --git a/lib/dns/rdtypes/CH/__init__.py b/lib/dns/rdtypes/CH/__init__.py deleted file mode 100644 index 7184a733..00000000 --- a/lib/dns/rdtypes/CH/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Class CH rdata type classes.""" - -__all__ = [ - 'A', -] diff --git a/lib/dns/rdtypes/IN/A.py b/lib/dns/rdtypes/IN/A.py deleted file mode 100644 index 89989824..00000000 --- a/lib/dns/rdtypes/IN/A.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.exception -import dns.ipv4 -import dns.rdata -import dns.tokenizer - - -class A(dns.rdata.Rdata): - - """A record. - - @ivar address: an IPv4 address - @type address: string (in the standard "dotted quad" format)""" - - __slots__ = ['address'] - - def __init__(self, rdclass, rdtype, address): - super(A, self).__init__(rdclass, rdtype) - # check that it's OK - dns.ipv4.inet_aton(address) - self.address = address - - def to_text(self, origin=None, relativize=True, **kw): - return self.address - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - address = tok.get_identifier() - tok.get_eol() - return cls(rdclass, rdtype, address) - - def to_wire(self, file, compress=None, origin=None): - file.write(dns.ipv4.inet_aton(self.address)) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - address = dns.ipv4.inet_ntoa(wire[current: current + rdlen]) - return cls(rdclass, rdtype, address) diff --git a/lib/dns/rdtypes/IN/AAAA.py b/lib/dns/rdtypes/IN/AAAA.py deleted file mode 100644 index a77c5bf2..00000000 --- a/lib/dns/rdtypes/IN/AAAA.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.exception -import dns.inet -import dns.rdata -import dns.tokenizer - - -class AAAA(dns.rdata.Rdata): - - """AAAA record. - - @ivar address: an IPv6 address - @type address: string (in the standard IPv6 format)""" - - __slots__ = ['address'] - - def __init__(self, rdclass, rdtype, address): - super(AAAA, self).__init__(rdclass, rdtype) - # check that it's OK - dns.inet.inet_pton(dns.inet.AF_INET6, address) - self.address = address - - def to_text(self, origin=None, relativize=True, **kw): - return self.address - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - address = tok.get_identifier() - tok.get_eol() - return cls(rdclass, rdtype, address) - - def to_wire(self, file, compress=None, origin=None): - file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.address)) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - address = dns.inet.inet_ntop(dns.inet.AF_INET6, - wire[current: current + rdlen]) - return cls(rdclass, rdtype, address) diff --git a/lib/dns/rdtypes/IN/APL.py b/lib/dns/rdtypes/IN/APL.py deleted file mode 100644 index 48faf88a..00000000 --- a/lib/dns/rdtypes/IN/APL.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import binascii -import codecs -import struct - -import dns.exception -import dns.inet -import dns.rdata -import dns.tokenizer -from dns._compat import xrange, maybe_chr - - -class APLItem(object): - - """An APL list item. - - @ivar family: the address family (IANA address family registry) - @type family: int - @ivar negation: is this item negated? - @type negation: bool - @ivar address: the address - @type address: string - @ivar prefix: the prefix length - @type prefix: int - """ - - __slots__ = ['family', 'negation', 'address', 'prefix'] - - def __init__(self, family, negation, address, prefix): - self.family = family - self.negation = negation - self.address = address - self.prefix = prefix - - def __str__(self): - if self.negation: - return "!%d:%s/%s" % (self.family, self.address, self.prefix) - else: - return "%d:%s/%s" % (self.family, self.address, self.prefix) - - def to_wire(self, file): - if self.family == 1: - address = dns.inet.inet_pton(dns.inet.AF_INET, self.address) - elif self.family == 2: - address = dns.inet.inet_pton(dns.inet.AF_INET6, self.address) - else: - address = binascii.unhexlify(self.address) - # - # Truncate least significant zero bytes. - # - last = 0 - for i in xrange(len(address) - 1, -1, -1): - if address[i] != maybe_chr(0): - last = i + 1 - break - address = address[0: last] - l = len(address) - assert l < 128 - if self.negation: - l |= 0x80 - header = struct.pack('!HBB', self.family, self.prefix, l) - file.write(header) - file.write(address) - - -class APL(dns.rdata.Rdata): - - """APL record. - - @ivar items: a list of APL items - @type items: list of APL_Item - @see: RFC 3123""" - - __slots__ = ['items'] - - def __init__(self, rdclass, rdtype, items): - super(APL, self).__init__(rdclass, rdtype) - self.items = items - - def to_text(self, origin=None, relativize=True, **kw): - return ' '.join(map(str, self.items)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - items = [] - while 1: - token = tok.get().unescape() - if token.is_eol_or_eof(): - break - item = token.value - if item[0] == '!': - negation = True - item = item[1:] - else: - negation = False - (family, rest) = item.split(':', 1) - family = int(family) - (address, prefix) = rest.split('/', 1) - prefix = int(prefix) - item = APLItem(family, negation, address, prefix) - items.append(item) - - return cls(rdclass, rdtype, items) - - def to_wire(self, file, compress=None, origin=None): - for item in self.items: - item.to_wire(file) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - - items = [] - while 1: - if rdlen == 0: - break - if rdlen < 4: - raise dns.exception.FormError - header = struct.unpack('!HBB', wire[current: current + 4]) - afdlen = header[2] - if afdlen > 127: - negation = True - afdlen -= 128 - else: - negation = False - current += 4 - rdlen -= 4 - if rdlen < afdlen: - raise dns.exception.FormError - address = wire[current: current + afdlen].unwrap() - l = len(address) - if header[0] == 1: - if l < 4: - address += b'\x00' * (4 - l) - address = dns.inet.inet_ntop(dns.inet.AF_INET, address) - elif header[0] == 2: - if l < 16: - address += b'\x00' * (16 - l) - address = dns.inet.inet_ntop(dns.inet.AF_INET6, address) - else: - # - # This isn't really right according to the RFC, but it - # seems better than throwing an exception - # - address = codecs.encode(address, 'hex_codec') - current += afdlen - rdlen -= afdlen - item = APLItem(header[0], negation, address, header[1]) - items.append(item) - return cls(rdclass, rdtype, items) diff --git a/lib/dns/rdtypes/IN/DHCID.py b/lib/dns/rdtypes/IN/DHCID.py deleted file mode 100644 index cec64590..00000000 --- a/lib/dns/rdtypes/IN/DHCID.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import base64 - -import dns.exception - - -class DHCID(dns.rdata.Rdata): - - """DHCID record - - @ivar data: the data (the content of the RR is opaque as far as the - DNS is concerned) - @type data: string - @see: RFC 4701""" - - __slots__ = ['data'] - - def __init__(self, rdclass, rdtype, data): - super(DHCID, self).__init__(rdclass, rdtype) - self.data = data - - def to_text(self, origin=None, relativize=True, **kw): - return dns.rdata._base64ify(self.data) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - chunks = [] - while 1: - t = tok.get().unescape() - if t.is_eol_or_eof(): - break - if not t.is_identifier(): - raise dns.exception.SyntaxError - chunks.append(t.value.encode()) - b64 = b''.join(chunks) - data = base64.b64decode(b64) - return cls(rdclass, rdtype, data) - - def to_wire(self, file, compress=None, origin=None): - file.write(self.data) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - data = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, data) diff --git a/lib/dns/rdtypes/IN/IPSECKEY.py b/lib/dns/rdtypes/IN/IPSECKEY.py deleted file mode 100644 index 8f49ba13..00000000 --- a/lib/dns/rdtypes/IN/IPSECKEY.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct -import base64 - -import dns.exception -import dns.inet -import dns.name - - -class IPSECKEY(dns.rdata.Rdata): - - """IPSECKEY record - - @ivar precedence: the precedence for this key data - @type precedence: int - @ivar gateway_type: the gateway type - @type gateway_type: int - @ivar algorithm: the algorithm to use - @type algorithm: int - @ivar gateway: the public key - @type gateway: None, IPv4 address, IPV6 address, or domain name - @ivar key: the public key - @type key: string - @see: RFC 4025""" - - __slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key'] - - def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm, - gateway, key): - super(IPSECKEY, self).__init__(rdclass, rdtype) - if gateway_type == 0: - if gateway != '.' and gateway is not None: - raise SyntaxError('invalid gateway for gateway type 0') - gateway = None - elif gateway_type == 1: - # check that it's OK - dns.inet.inet_pton(dns.inet.AF_INET, gateway) - elif gateway_type == 2: - # check that it's OK - dns.inet.inet_pton(dns.inet.AF_INET6, gateway) - elif gateway_type == 3: - pass - else: - raise SyntaxError( - 'invalid IPSECKEY gateway type: %d' % gateway_type) - self.precedence = precedence - self.gateway_type = gateway_type - self.algorithm = algorithm - self.gateway = gateway - self.key = key - - def to_text(self, origin=None, relativize=True, **kw): - if self.gateway_type == 0: - gateway = '.' - elif self.gateway_type == 1: - gateway = self.gateway - elif self.gateway_type == 2: - gateway = self.gateway - elif self.gateway_type == 3: - gateway = str(self.gateway.choose_relativity(origin, relativize)) - else: - raise ValueError('invalid gateway type') - return '%d %d %d %s %s' % (self.precedence, self.gateway_type, - self.algorithm, gateway, - dns.rdata._base64ify(self.key)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - precedence = tok.get_uint8() - gateway_type = tok.get_uint8() - algorithm = tok.get_uint8() - if gateway_type == 3: - gateway = tok.get_name().choose_relativity(origin, relativize) - else: - gateway = tok.get_string() - chunks = [] - while 1: - t = tok.get().unescape() - if t.is_eol_or_eof(): - break - if not t.is_identifier(): - raise dns.exception.SyntaxError - chunks.append(t.value.encode()) - b64 = b''.join(chunks) - key = base64.b64decode(b64) - return cls(rdclass, rdtype, precedence, gateway_type, algorithm, - gateway, key) - - def to_wire(self, file, compress=None, origin=None): - header = struct.pack("!BBB", self.precedence, self.gateway_type, - self.algorithm) - file.write(header) - if self.gateway_type == 0: - pass - elif self.gateway_type == 1: - file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway)) - elif self.gateway_type == 2: - file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway)) - elif self.gateway_type == 3: - self.gateway.to_wire(file, None, origin) - else: - raise ValueError('invalid gateway type') - file.write(self.key) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - if rdlen < 3: - raise dns.exception.FormError - header = struct.unpack('!BBB', wire[current: current + 3]) - gateway_type = header[1] - current += 3 - rdlen -= 3 - if gateway_type == 0: - gateway = None - elif gateway_type == 1: - gateway = dns.inet.inet_ntop(dns.inet.AF_INET, - wire[current: current + 4]) - current += 4 - rdlen -= 4 - elif gateway_type == 2: - gateway = dns.inet.inet_ntop(dns.inet.AF_INET6, - wire[current: current + 16]) - current += 16 - rdlen -= 16 - elif gateway_type == 3: - (gateway, cused) = dns.name.from_wire(wire[: current + rdlen], - current) - current += cused - rdlen -= cused - else: - raise dns.exception.FormError('invalid IPSECKEY gateway type') - key = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, header[0], gateway_type, header[2], - gateway, key) diff --git a/lib/dns/rdtypes/IN/KX.py b/lib/dns/rdtypes/IN/KX.py deleted file mode 100644 index 1318a582..00000000 --- a/lib/dns/rdtypes/IN/KX.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.mxbase - - -class KX(dns.rdtypes.mxbase.UncompressedMX): - - """KX record""" diff --git a/lib/dns/rdtypes/IN/NAPTR.py b/lib/dns/rdtypes/IN/NAPTR.py deleted file mode 100644 index 32fa4745..00000000 --- a/lib/dns/rdtypes/IN/NAPTR.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.name -import dns.rdata -from dns._compat import xrange, text_type - - -def _write_string(file, s): - l = len(s) - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(s) - - -def _sanitize(value): - if isinstance(value, text_type): - return value.encode() - return value - - -class NAPTR(dns.rdata.Rdata): - - """NAPTR record - - @ivar order: order - @type order: int - @ivar preference: preference - @type preference: int - @ivar flags: flags - @type flags: string - @ivar service: service - @type service: string - @ivar regexp: regular expression - @type regexp: string - @ivar replacement: replacement name - @type replacement: dns.name.Name object - @see: RFC 3403""" - - __slots__ = ['order', 'preference', 'flags', 'service', 'regexp', - 'replacement'] - - def __init__(self, rdclass, rdtype, order, preference, flags, service, - regexp, replacement): - super(NAPTR, self).__init__(rdclass, rdtype) - self.flags = _sanitize(flags) - self.service = _sanitize(service) - self.regexp = _sanitize(regexp) - self.order = order - self.preference = preference - self.replacement = replacement - - def to_text(self, origin=None, relativize=True, **kw): - replacement = self.replacement.choose_relativity(origin, relativize) - return '%d %d "%s" "%s" "%s" %s' % \ - (self.order, self.preference, - dns.rdata._escapify(self.flags), - dns.rdata._escapify(self.service), - dns.rdata._escapify(self.regexp), - replacement) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - order = tok.get_uint16() - preference = tok.get_uint16() - flags = tok.get_string() - service = tok.get_string() - regexp = tok.get_string() - replacement = tok.get_name() - replacement = replacement.choose_relativity(origin, relativize) - tok.get_eol() - return cls(rdclass, rdtype, order, preference, flags, service, - regexp, replacement) - - def to_wire(self, file, compress=None, origin=None): - two_ints = struct.pack("!HH", self.order, self.preference) - file.write(two_ints) - _write_string(file, self.flags) - _write_string(file, self.service) - _write_string(file, self.regexp) - self.replacement.to_wire(file, compress, origin) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (order, preference) = struct.unpack('!HH', wire[current: current + 4]) - current += 4 - rdlen -= 4 - strings = [] - for i in xrange(3): - l = wire[current] - current += 1 - rdlen -= 1 - if l > rdlen or rdlen < 0: - raise dns.exception.FormError - s = wire[current: current + l].unwrap() - current += l - rdlen -= l - strings.append(s) - (replacement, cused) = dns.name.from_wire(wire[: current + rdlen], - current) - if cused != rdlen: - raise dns.exception.FormError - if origin is not None: - replacement = replacement.relativize(origin) - return cls(rdclass, rdtype, order, preference, strings[0], strings[1], - strings[2], replacement) - - def choose_relativity(self, origin=None, relativize=True): - self.replacement = self.replacement.choose_relativity(origin, - relativize) diff --git a/lib/dns/rdtypes/IN/NSAP.py b/lib/dns/rdtypes/IN/NSAP.py deleted file mode 100644 index 336befc7..00000000 --- a/lib/dns/rdtypes/IN/NSAP.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import binascii - -import dns.exception -import dns.rdata -import dns.tokenizer - - -class NSAP(dns.rdata.Rdata): - - """NSAP record. - - @ivar address: a NASP - @type address: string - @see: RFC 1706""" - - __slots__ = ['address'] - - def __init__(self, rdclass, rdtype, address): - super(NSAP, self).__init__(rdclass, rdtype) - self.address = address - - def to_text(self, origin=None, relativize=True, **kw): - return "0x%s" % binascii.hexlify(self.address).decode() - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - address = tok.get_string() - tok.get_eol() - if address[0:2] != '0x': - raise dns.exception.SyntaxError('string does not start with 0x') - address = address[2:].replace('.', '') - if len(address) % 2 != 0: - raise dns.exception.SyntaxError('hexstring has odd length') - address = binascii.unhexlify(address.encode()) - return cls(rdclass, rdtype, address) - - def to_wire(self, file, compress=None, origin=None): - file.write(self.address) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - address = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, address) diff --git a/lib/dns/rdtypes/IN/NSAP_PTR.py b/lib/dns/rdtypes/IN/NSAP_PTR.py deleted file mode 100644 index a5b66c80..00000000 --- a/lib/dns/rdtypes/IN/NSAP_PTR.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.rdtypes.nsbase - - -class NSAP_PTR(dns.rdtypes.nsbase.UncompressedNS): - - """NSAP-PTR record""" diff --git a/lib/dns/rdtypes/IN/PX.py b/lib/dns/rdtypes/IN/PX.py deleted file mode 100644 index 2dbaee6c..00000000 --- a/lib/dns/rdtypes/IN/PX.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.name - - -class PX(dns.rdata.Rdata): - - """PX record. - - @ivar preference: the preference value - @type preference: int - @ivar map822: the map822 name - @type map822: dns.name.Name object - @ivar mapx400: the mapx400 name - @type mapx400: dns.name.Name object - @see: RFC 2163""" - - __slots__ = ['preference', 'map822', 'mapx400'] - - def __init__(self, rdclass, rdtype, preference, map822, mapx400): - super(PX, self).__init__(rdclass, rdtype) - self.preference = preference - self.map822 = map822 - self.mapx400 = mapx400 - - def to_text(self, origin=None, relativize=True, **kw): - map822 = self.map822.choose_relativity(origin, relativize) - mapx400 = self.mapx400.choose_relativity(origin, relativize) - return '%d %s %s' % (self.preference, map822, mapx400) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - preference = tok.get_uint16() - map822 = tok.get_name() - map822 = map822.choose_relativity(origin, relativize) - mapx400 = tok.get_name(None) - mapx400 = mapx400.choose_relativity(origin, relativize) - tok.get_eol() - return cls(rdclass, rdtype, preference, map822, mapx400) - - def to_wire(self, file, compress=None, origin=None): - pref = struct.pack("!H", self.preference) - file.write(pref) - self.map822.to_wire(file, None, origin) - self.mapx400.to_wire(file, None, origin) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (preference, ) = struct.unpack('!H', wire[current: current + 2]) - current += 2 - rdlen -= 2 - (map822, cused) = dns.name.from_wire(wire[: current + rdlen], - current) - if cused > rdlen: - raise dns.exception.FormError - current += cused - rdlen -= cused - if origin is not None: - map822 = map822.relativize(origin) - (mapx400, cused) = dns.name.from_wire(wire[: current + rdlen], - current) - if cused != rdlen: - raise dns.exception.FormError - if origin is not None: - mapx400 = mapx400.relativize(origin) - return cls(rdclass, rdtype, preference, map822, mapx400) - - def choose_relativity(self, origin=None, relativize=True): - self.map822 = self.map822.choose_relativity(origin, relativize) - self.mapx400 = self.mapx400.choose_relativity(origin, relativize) diff --git a/lib/dns/rdtypes/IN/SRV.py b/lib/dns/rdtypes/IN/SRV.py deleted file mode 100644 index b2c1bc9f..00000000 --- a/lib/dns/rdtypes/IN/SRV.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct - -import dns.exception -import dns.rdata -import dns.name - - -class SRV(dns.rdata.Rdata): - - """SRV record - - @ivar priority: the priority - @type priority: int - @ivar weight: the weight - @type weight: int - @ivar port: the port of the service - @type port: int - @ivar target: the target host - @type target: dns.name.Name object - @see: RFC 2782""" - - __slots__ = ['priority', 'weight', 'port', 'target'] - - def __init__(self, rdclass, rdtype, priority, weight, port, target): - super(SRV, self).__init__(rdclass, rdtype) - self.priority = priority - self.weight = weight - self.port = port - self.target = target - - def to_text(self, origin=None, relativize=True, **kw): - target = self.target.choose_relativity(origin, relativize) - return '%d %d %d %s' % (self.priority, self.weight, self.port, - target) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - priority = tok.get_uint16() - weight = tok.get_uint16() - port = tok.get_uint16() - target = tok.get_name(None) - target = target.choose_relativity(origin, relativize) - tok.get_eol() - return cls(rdclass, rdtype, priority, weight, port, target) - - def to_wire(self, file, compress=None, origin=None): - three_ints = struct.pack("!HHH", self.priority, self.weight, self.port) - file.write(three_ints) - self.target.to_wire(file, compress, origin) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (priority, weight, port) = struct.unpack('!HHH', - wire[current: current + 6]) - current += 6 - rdlen -= 6 - (target, cused) = dns.name.from_wire(wire[: current + rdlen], - current) - if cused != rdlen: - raise dns.exception.FormError - if origin is not None: - target = target.relativize(origin) - return cls(rdclass, rdtype, priority, weight, port, target) - - def choose_relativity(self, origin=None, relativize=True): - self.target = self.target.choose_relativity(origin, relativize) diff --git a/lib/dns/rdtypes/IN/WKS.py b/lib/dns/rdtypes/IN/WKS.py deleted file mode 100644 index 96f98ada..00000000 --- a/lib/dns/rdtypes/IN/WKS.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import socket -import struct - -import dns.ipv4 -import dns.rdata -from dns._compat import xrange - -_proto_tcp = socket.getprotobyname('tcp') -_proto_udp = socket.getprotobyname('udp') - - -class WKS(dns.rdata.Rdata): - - """WKS record - - @ivar address: the address - @type address: string - @ivar protocol: the protocol - @type protocol: int - @ivar bitmap: the bitmap - @type bitmap: string - @see: RFC 1035""" - - __slots__ = ['address', 'protocol', 'bitmap'] - - def __init__(self, rdclass, rdtype, address, protocol, bitmap): - super(WKS, self).__init__(rdclass, rdtype) - self.address = address - self.protocol = protocol - if not isinstance(bitmap, bytearray): - self.bitmap = bytearray(bitmap) - else: - self.bitmap = bitmap - - def to_text(self, origin=None, relativize=True, **kw): - bits = [] - for i in xrange(0, len(self.bitmap)): - byte = self.bitmap[i] - for j in xrange(0, 8): - if byte & (0x80 >> j): - bits.append(str(i * 8 + j)) - text = ' '.join(bits) - return '%s %d %s' % (self.address, self.protocol, text) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - address = tok.get_string() - protocol = tok.get_string() - if protocol.isdigit(): - protocol = int(protocol) - else: - protocol = socket.getprotobyname(protocol) - bitmap = bytearray() - while 1: - token = tok.get().unescape() - if token.is_eol_or_eof(): - break - if token.value.isdigit(): - serv = int(token.value) - else: - if protocol != _proto_udp and protocol != _proto_tcp: - raise NotImplementedError("protocol must be TCP or UDP") - if protocol == _proto_udp: - protocol_text = "udp" - else: - protocol_text = "tcp" - serv = socket.getservbyname(token.value, protocol_text) - i = serv // 8 - l = len(bitmap) - if l < i + 1: - for j in xrange(l, i + 1): - bitmap.append(0) - bitmap[i] = bitmap[i] | (0x80 >> (serv % 8)) - bitmap = dns.rdata._truncate_bitmap(bitmap) - return cls(rdclass, rdtype, address, protocol, bitmap) - - def to_wire(self, file, compress=None, origin=None): - file.write(dns.ipv4.inet_aton(self.address)) - protocol = struct.pack('!B', self.protocol) - file.write(protocol) - file.write(self.bitmap) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - address = dns.ipv4.inet_ntoa(wire[current: current + 4]) - protocol, = struct.unpack('!B', wire[current + 4: current + 5]) - current += 5 - rdlen -= 5 - bitmap = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, address, protocol, bitmap) diff --git a/lib/dns/rdtypes/IN/__init__.py b/lib/dns/rdtypes/IN/__init__.py deleted file mode 100644 index d7e69c9f..00000000 --- a/lib/dns/rdtypes/IN/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Class IN rdata type classes.""" - -__all__ = [ - 'A', - 'AAAA', - 'APL', - 'DHCID', - 'IPSECKEY', - 'KX', - 'NAPTR', - 'NSAP', - 'NSAP_PTR', - 'PX', - 'SRV', - 'WKS', -] diff --git a/lib/dns/rdtypes/__init__.py b/lib/dns/rdtypes/__init__.py deleted file mode 100644 index 1ac137f1..00000000 --- a/lib/dns/rdtypes/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS rdata type classes""" - -__all__ = [ - 'ANY', - 'IN', - 'CH', - 'euibase', - 'mxbase', - 'nsbase', -] diff --git a/lib/dns/rdtypes/dnskeybase.py b/lib/dns/rdtypes/dnskeybase.py deleted file mode 100644 index 3e7e87ef..00000000 --- a/lib/dns/rdtypes/dnskeybase.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import base64 -import struct - -import dns.exception -import dns.dnssec -import dns.rdata - -# wildcard import -__all__ = ["SEP", "REVOKE", "ZONE", - "flags_to_text_set", "flags_from_text_set"] - -# flag constants -SEP = 0x0001 -REVOKE = 0x0080 -ZONE = 0x0100 - -_flag_by_text = { - 'SEP': SEP, - 'REVOKE': REVOKE, - 'ZONE': ZONE -} - -# We construct the inverse mapping programmatically to ensure that we -# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that -# would cause the mapping not to be true inverse. -_flag_by_value = {y: x for x, y in _flag_by_text.items()} - - -def flags_to_text_set(flags): - """Convert a DNSKEY flags value to set texts - @rtype: set([string])""" - - flags_set = set() - mask = 0x1 - while mask <= 0x8000: - if flags & mask: - text = _flag_by_value.get(mask) - if not text: - text = hex(mask) - flags_set.add(text) - mask <<= 1 - return flags_set - - -def flags_from_text_set(texts_set): - """Convert set of DNSKEY flag mnemonic texts to DNSKEY flag value - @rtype: int""" - - flags = 0 - for text in texts_set: - try: - flags += _flag_by_text[text] - except KeyError: - raise NotImplementedError( - "DNSKEY flag '%s' is not supported" % text) - return flags - - -class DNSKEYBase(dns.rdata.Rdata): - - """Base class for rdata that is like a DNSKEY record - - @ivar flags: the key flags - @type flags: int - @ivar protocol: the protocol for which this key may be used - @type protocol: int - @ivar algorithm: the algorithm used for the key - @type algorithm: int - @ivar key: the public key - @type key: string""" - - __slots__ = ['flags', 'protocol', 'algorithm', 'key'] - - def __init__(self, rdclass, rdtype, flags, protocol, algorithm, key): - super(DNSKEYBase, self).__init__(rdclass, rdtype) - self.flags = flags - self.protocol = protocol - self.algorithm = algorithm - self.key = key - - def to_text(self, origin=None, relativize=True, **kw): - return '%d %d %d %s' % (self.flags, self.protocol, self.algorithm, - dns.rdata._base64ify(self.key)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - flags = tok.get_uint16() - protocol = tok.get_uint8() - algorithm = dns.dnssec.algorithm_from_text(tok.get_string()) - chunks = [] - while 1: - t = tok.get().unescape() - if t.is_eol_or_eof(): - break - if not t.is_identifier(): - raise dns.exception.SyntaxError - chunks.append(t.value.encode()) - b64 = b''.join(chunks) - key = base64.b64decode(b64) - return cls(rdclass, rdtype, flags, protocol, algorithm, key) - - def to_wire(self, file, compress=None, origin=None): - header = struct.pack("!HBB", self.flags, self.protocol, self.algorithm) - file.write(header) - file.write(self.key) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - if rdlen < 4: - raise dns.exception.FormError - header = struct.unpack('!HBB', wire[current: current + 4]) - current += 4 - rdlen -= 4 - key = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, header[0], header[1], header[2], - key) - - def flags_to_text_set(self): - """Convert a DNSKEY flags value to set texts - @rtype: set([string])""" - return flags_to_text_set(self.flags) diff --git a/lib/dns/rdtypes/dnskeybase.pyi b/lib/dns/rdtypes/dnskeybase.pyi deleted file mode 100644 index e102a698..00000000 --- a/lib/dns/rdtypes/dnskeybase.pyi +++ /dev/null @@ -1,37 +0,0 @@ -from typing import Set, Any - -SEP : int -REVOKE : int -ZONE : int - -def flags_to_text_set(flags : int) -> Set[str]: - ... - -def flags_from_text_set(texts_set) -> int: - ... - -from .. import rdata - -class DNSKEYBase(rdata.Rdata): - def __init__(self, rdclass, rdtype, flags, protocol, algorithm, key): - self.flags : int - self.protocol : int - self.key : str - self.algorithm : int - - def to_text(self, origin : Any = None, relativize=True, **kw : Any): - ... - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - ... - - def to_wire(self, file, compress=None, origin=None): - ... - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - ... - - def flags_to_text_set(self) -> Set[str]: - ... diff --git a/lib/dns/rdtypes/dsbase.py b/lib/dns/rdtypes/dsbase.py deleted file mode 100644 index 26ae9d5c..00000000 --- a/lib/dns/rdtypes/dsbase.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2010, 2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import struct -import binascii - -import dns.rdata -import dns.rdatatype - - -class DSBase(dns.rdata.Rdata): - - """Base class for rdata that is like a DS record - - @ivar key_tag: the key tag - @type key_tag: int - @ivar algorithm: the algorithm - @type algorithm: int - @ivar digest_type: the digest type - @type digest_type: int - @ivar digest: the digest - @type digest: int - @see: draft-ietf-dnsext-delegation-signer-14.txt""" - - __slots__ = ['key_tag', 'algorithm', 'digest_type', 'digest'] - - def __init__(self, rdclass, rdtype, key_tag, algorithm, digest_type, - digest): - super(DSBase, self).__init__(rdclass, rdtype) - self.key_tag = key_tag - self.algorithm = algorithm - self.digest_type = digest_type - self.digest = digest - - def to_text(self, origin=None, relativize=True, **kw): - return '%d %d %d %s' % (self.key_tag, self.algorithm, - self.digest_type, - dns.rdata._hexify(self.digest, - chunksize=128)) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - key_tag = tok.get_uint16() - algorithm = tok.get_uint8() - digest_type = tok.get_uint8() - chunks = [] - while 1: - t = tok.get().unescape() - if t.is_eol_or_eof(): - break - if not t.is_identifier(): - raise dns.exception.SyntaxError - chunks.append(t.value.encode()) - digest = b''.join(chunks) - digest = binascii.unhexlify(digest) - return cls(rdclass, rdtype, key_tag, algorithm, digest_type, - digest) - - def to_wire(self, file, compress=None, origin=None): - header = struct.pack("!HBB", self.key_tag, self.algorithm, - self.digest_type) - file.write(header) - file.write(self.digest) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - header = struct.unpack("!HBB", wire[current: current + 4]) - current += 4 - rdlen -= 4 - digest = wire[current: current + rdlen].unwrap() - return cls(rdclass, rdtype, header[0], header[1], header[2], digest) diff --git a/lib/dns/rdtypes/euibase.py b/lib/dns/rdtypes/euibase.py deleted file mode 100644 index cc5fdaa6..00000000 --- a/lib/dns/rdtypes/euibase.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2015 Red Hat, Inc. -# Author: Petr Spacek <pspacek@redhat.com> -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import binascii - -import dns.rdata -from dns._compat import xrange - - -class EUIBase(dns.rdata.Rdata): - - """EUIxx record - - @ivar fingerprint: xx-bit Extended Unique Identifier (EUI-xx) - @type fingerprint: string - @see: rfc7043.txt""" - - __slots__ = ['eui'] - # define these in subclasses - # byte_len = 6 # 0123456789ab (in hex) - # text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab - - def __init__(self, rdclass, rdtype, eui): - super(EUIBase, self).__init__(rdclass, rdtype) - if len(eui) != self.byte_len: - raise dns.exception.FormError('EUI%s rdata has to have %s bytes' - % (self.byte_len * 8, self.byte_len)) - self.eui = eui - - def to_text(self, origin=None, relativize=True, **kw): - return dns.rdata._hexify(self.eui, chunksize=2).replace(' ', '-') - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - text = tok.get_string() - tok.get_eol() - if len(text) != cls.text_len: - raise dns.exception.SyntaxError( - 'Input text must have %s characters' % cls.text_len) - expected_dash_idxs = xrange(2, cls.byte_len * 3 - 1, 3) - for i in expected_dash_idxs: - if text[i] != '-': - raise dns.exception.SyntaxError('Dash expected at position %s' - % i) - text = text.replace('-', '') - try: - data = binascii.unhexlify(text.encode()) - except (ValueError, TypeError) as ex: - raise dns.exception.SyntaxError('Hex decoding error: %s' % str(ex)) - return cls(rdclass, rdtype, data) - - def to_wire(self, file, compress=None, origin=None): - file.write(self.eui) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - eui = wire[current:current + rdlen].unwrap() - return cls(rdclass, rdtype, eui) diff --git a/lib/dns/rdtypes/mxbase.py b/lib/dns/rdtypes/mxbase.py deleted file mode 100644 index 9a3fa623..00000000 --- a/lib/dns/rdtypes/mxbase.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""MX-like base classes.""" - -from io import BytesIO -import struct - -import dns.exception -import dns.rdata -import dns.name - - -class MXBase(dns.rdata.Rdata): - - """Base class for rdata that is like an MX record. - - @ivar preference: the preference value - @type preference: int - @ivar exchange: the exchange name - @type exchange: dns.name.Name object""" - - __slots__ = ['preference', 'exchange'] - - def __init__(self, rdclass, rdtype, preference, exchange): - super(MXBase, self).__init__(rdclass, rdtype) - self.preference = preference - self.exchange = exchange - - def to_text(self, origin=None, relativize=True, **kw): - exchange = self.exchange.choose_relativity(origin, relativize) - return '%d %s' % (self.preference, exchange) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - preference = tok.get_uint16() - exchange = tok.get_name() - exchange = exchange.choose_relativity(origin, relativize) - tok.get_eol() - return cls(rdclass, rdtype, preference, exchange) - - def to_wire(self, file, compress=None, origin=None): - pref = struct.pack("!H", self.preference) - file.write(pref) - self.exchange.to_wire(file, compress, origin) - - def to_digestable(self, origin=None): - return struct.pack("!H", self.preference) + \ - self.exchange.to_digestable(origin) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (preference, ) = struct.unpack('!H', wire[current: current + 2]) - current += 2 - rdlen -= 2 - (exchange, cused) = dns.name.from_wire(wire[: current + rdlen], - current) - if cused != rdlen: - raise dns.exception.FormError - if origin is not None: - exchange = exchange.relativize(origin) - return cls(rdclass, rdtype, preference, exchange) - - def choose_relativity(self, origin=None, relativize=True): - self.exchange = self.exchange.choose_relativity(origin, relativize) - - -class UncompressedMX(MXBase): - - """Base class for rdata that is like an MX record, but whose name - is not compressed when converted to DNS wire format, and whose - digestable form is not downcased.""" - - def to_wire(self, file, compress=None, origin=None): - super(UncompressedMX, self).to_wire(file, None, origin) - - def to_digestable(self, origin=None): - f = BytesIO() - self.to_wire(f, None, origin) - return f.getvalue() - - -class UncompressedDowncasingMX(MXBase): - - """Base class for rdata that is like an MX record, but whose name - is not compressed when convert to DNS wire format.""" - - def to_wire(self, file, compress=None, origin=None): - super(UncompressedDowncasingMX, self).to_wire(file, None, origin) diff --git a/lib/dns/rdtypes/nsbase.py b/lib/dns/rdtypes/nsbase.py deleted file mode 100644 index 97a22326..00000000 --- a/lib/dns/rdtypes/nsbase.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""NS-like base classes.""" - -from io import BytesIO - -import dns.exception -import dns.rdata -import dns.name - - -class NSBase(dns.rdata.Rdata): - - """Base class for rdata that is like an NS record. - - @ivar target: the target name of the rdata - @type target: dns.name.Name object""" - - __slots__ = ['target'] - - def __init__(self, rdclass, rdtype, target): - super(NSBase, self).__init__(rdclass, rdtype) - self.target = target - - def to_text(self, origin=None, relativize=True, **kw): - target = self.target.choose_relativity(origin, relativize) - return str(target) - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - target = tok.get_name() - target = target.choose_relativity(origin, relativize) - tok.get_eol() - return cls(rdclass, rdtype, target) - - def to_wire(self, file, compress=None, origin=None): - self.target.to_wire(file, compress, origin) - - def to_digestable(self, origin=None): - return self.target.to_digestable(origin) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - (target, cused) = dns.name.from_wire(wire[: current + rdlen], - current) - if cused != rdlen: - raise dns.exception.FormError - if origin is not None: - target = target.relativize(origin) - return cls(rdclass, rdtype, target) - - def choose_relativity(self, origin=None, relativize=True): - self.target = self.target.choose_relativity(origin, relativize) - - -class UncompressedNS(NSBase): - - """Base class for rdata that is like an NS record, but whose name - is not compressed when convert to DNS wire format, and whose - digestable form is not downcased.""" - - def to_wire(self, file, compress=None, origin=None): - super(UncompressedNS, self).to_wire(file, None, origin) - - def to_digestable(self, origin=None): - f = BytesIO() - self.to_wire(f, None, origin) - return f.getvalue() diff --git a/lib/dns/rdtypes/txtbase.py b/lib/dns/rdtypes/txtbase.py deleted file mode 100644 index 645a57ec..00000000 --- a/lib/dns/rdtypes/txtbase.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2006-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""TXT-like base class.""" - -import struct - -import dns.exception -import dns.rdata -import dns.tokenizer -from dns._compat import binary_type, string_types - - -class TXTBase(dns.rdata.Rdata): - - """Base class for rdata that is like a TXT record - - @ivar strings: the strings - @type strings: list of binary - @see: RFC 1035""" - - __slots__ = ['strings'] - - def __init__(self, rdclass, rdtype, strings): - super(TXTBase, self).__init__(rdclass, rdtype) - if isinstance(strings, binary_type) or \ - isinstance(strings, string_types): - strings = [strings] - self.strings = [] - for string in strings: - if isinstance(string, string_types): - string = string.encode() - self.strings.append(string) - - def to_text(self, origin=None, relativize=True, **kw): - txt = '' - prefix = '' - for s in self.strings: - txt += '{}"{}"'.format(prefix, dns.rdata._escapify(s)) - prefix = ' ' - return txt - - @classmethod - def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True): - strings = [] - while 1: - token = tok.get().unescape() - if token.is_eol_or_eof(): - break - if not (token.is_quoted_string() or token.is_identifier()): - raise dns.exception.SyntaxError("expected a string") - if len(token.value) > 255: - raise dns.exception.SyntaxError("string too long") - value = token.value - if isinstance(value, binary_type): - strings.append(value) - else: - strings.append(value.encode()) - if len(strings) == 0: - raise dns.exception.UnexpectedEnd - return cls(rdclass, rdtype, strings) - - def to_wire(self, file, compress=None, origin=None): - for s in self.strings: - l = len(s) - assert l < 256 - file.write(struct.pack('!B', l)) - file.write(s) - - @classmethod - def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None): - strings = [] - while rdlen > 0: - l = wire[current] - current += 1 - rdlen -= 1 - if l > rdlen: - raise dns.exception.FormError - s = wire[current: current + l].unwrap() - current += l - rdlen -= l - strings.append(s) - return cls(rdclass, rdtype, strings) diff --git a/lib/dns/rdtypes/txtbase.pyi b/lib/dns/rdtypes/txtbase.pyi deleted file mode 100644 index af447d50..00000000 --- a/lib/dns/rdtypes/txtbase.pyi +++ /dev/null @@ -1,6 +0,0 @@ -from .. import rdata - -class TXTBase(rdata.Rdata): - ... -class TXT(TXTBase): - ... diff --git a/lib/dns/renderer.py b/lib/dns/renderer.py deleted file mode 100644 index d7ef8c7f..00000000 --- a/lib/dns/renderer.py +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Help for building DNS wire format messages""" - -from io import BytesIO -import struct -import random -import time - -import dns.exception -import dns.tsig -from ._compat import long - - -QUESTION = 0 -ANSWER = 1 -AUTHORITY = 2 -ADDITIONAL = 3 - - -class Renderer(object): - """Helper class for building DNS wire-format messages. - - Most applications can use the higher-level L{dns.message.Message} - class and its to_wire() method to generate wire-format messages. - This class is for those applications which need finer control - over the generation of messages. - - Typical use:: - - r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512) - r.add_question(qname, qtype, qclass) - r.add_rrset(dns.renderer.ANSWER, rrset_1) - r.add_rrset(dns.renderer.ANSWER, rrset_2) - r.add_rrset(dns.renderer.AUTHORITY, ns_rrset) - r.add_edns(0, 0, 4096) - r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_1) - r.add_rrset(dns.renderer.ADDTIONAL, ad_rrset_2) - r.write_header() - r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac) - wire = r.get_wire() - - output, a BytesIO, where rendering is written - - id: the message id - - flags: the message flags - - max_size: the maximum size of the message - - origin: the origin to use when rendering relative names - - compress: the compression table - - section: an int, the section currently being rendered - - counts: list of the number of RRs in each section - - mac: the MAC of the rendered message (if TSIG was used) - """ - - def __init__(self, id=None, flags=0, max_size=65535, origin=None): - """Initialize a new renderer.""" - - self.output = BytesIO() - if id is None: - self.id = random.randint(0, 65535) - else: - self.id = id - self.flags = flags - self.max_size = max_size - self.origin = origin - self.compress = {} - self.section = QUESTION - self.counts = [0, 0, 0, 0] - self.output.write(b'\x00' * 12) - self.mac = '' - - def _rollback(self, where): - """Truncate the output buffer at offset *where*, and remove any - compression table entries that pointed beyond the truncation - point. - """ - - self.output.seek(where) - self.output.truncate() - keys_to_delete = [] - for k, v in self.compress.items(): - if v >= where: - keys_to_delete.append(k) - for k in keys_to_delete: - del self.compress[k] - - def _set_section(self, section): - """Set the renderer's current section. - - Sections must be rendered order: QUESTION, ANSWER, AUTHORITY, - ADDITIONAL. Sections may be empty. - - Raises dns.exception.FormError if an attempt was made to set - a section value less than the current section. - """ - - if self.section != section: - if self.section > section: - raise dns.exception.FormError - self.section = section - - def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN): - """Add a question to the message.""" - - self._set_section(QUESTION) - before = self.output.tell() - qname.to_wire(self.output, self.compress, self.origin) - self.output.write(struct.pack("!HH", rdtype, rdclass)) - after = self.output.tell() - if after >= self.max_size: - self._rollback(before) - raise dns.exception.TooBig - self.counts[QUESTION] += 1 - - def add_rrset(self, section, rrset, **kw): - """Add the rrset to the specified section. - - Any keyword arguments are passed on to the rdataset's to_wire() - routine. - """ - - self._set_section(section) - before = self.output.tell() - n = rrset.to_wire(self.output, self.compress, self.origin, **kw) - after = self.output.tell() - if after >= self.max_size: - self._rollback(before) - raise dns.exception.TooBig - self.counts[section] += n - - def add_rdataset(self, section, name, rdataset, **kw): - """Add the rdataset to the specified section, using the specified - name as the owner name. - - Any keyword arguments are passed on to the rdataset's to_wire() - routine. - """ - - self._set_section(section) - before = self.output.tell() - n = rdataset.to_wire(name, self.output, self.compress, self.origin, - **kw) - after = self.output.tell() - if after >= self.max_size: - self._rollback(before) - raise dns.exception.TooBig - self.counts[section] += n - - def add_edns(self, edns, ednsflags, payload, options=None): - """Add an EDNS OPT record to the message.""" - - # make sure the EDNS version in ednsflags agrees with edns - ednsflags &= long(0xFF00FFFF) - ednsflags |= (edns << 16) - self._set_section(ADDITIONAL) - before = self.output.tell() - self.output.write(struct.pack('!BHHIH', 0, dns.rdatatype.OPT, payload, - ednsflags, 0)) - if options is not None: - lstart = self.output.tell() - for opt in options: - stuff = struct.pack("!HH", opt.otype, 0) - self.output.write(stuff) - start = self.output.tell() - opt.to_wire(self.output) - end = self.output.tell() - assert end - start < 65536 - self.output.seek(start - 2) - stuff = struct.pack("!H", end - start) - self.output.write(stuff) - self.output.seek(0, 2) - lend = self.output.tell() - assert lend - lstart < 65536 - self.output.seek(lstart - 2) - stuff = struct.pack("!H", lend - lstart) - self.output.write(stuff) - self.output.seek(0, 2) - after = self.output.tell() - if after >= self.max_size: - self._rollback(before) - raise dns.exception.TooBig - self.counts[ADDITIONAL] += 1 - - def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data, - request_mac, algorithm=dns.tsig.default_algorithm): - """Add a TSIG signature to the message.""" - - s = self.output.getvalue() - (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s, - keyname, - secret, - int(time.time()), - fudge, - id, - tsig_error, - other_data, - request_mac, - algorithm=algorithm) - self._write_tsig(tsig_rdata, keyname) - - def add_multi_tsig(self, ctx, keyname, secret, fudge, id, tsig_error, - other_data, request_mac, - algorithm=dns.tsig.default_algorithm): - """Add a TSIG signature to the message. Unlike add_tsig(), this can be - used for a series of consecutive DNS envelopes, e.g. for a zone - transfer over TCP [RFC2845, 4.4]. - - For the first message in the sequence, give ctx=None. For each - subsequent message, give the ctx that was returned from the - add_multi_tsig() call for the previous message.""" - - s = self.output.getvalue() - (tsig_rdata, self.mac, ctx) = dns.tsig.sign(s, - keyname, - secret, - int(time.time()), - fudge, - id, - tsig_error, - other_data, - request_mac, - ctx=ctx, - first=ctx is None, - multi=True, - algorithm=algorithm) - self._write_tsig(tsig_rdata, keyname) - return ctx - - def _write_tsig(self, tsig_rdata, keyname): - self._set_section(ADDITIONAL) - before = self.output.tell() - - keyname.to_wire(self.output, self.compress, self.origin) - self.output.write(struct.pack('!HHIH', dns.rdatatype.TSIG, - dns.rdataclass.ANY, 0, 0)) - rdata_start = self.output.tell() - self.output.write(tsig_rdata) - - after = self.output.tell() - assert after - rdata_start < 65536 - if after >= self.max_size: - self._rollback(before) - raise dns.exception.TooBig - - self.output.seek(rdata_start - 2) - self.output.write(struct.pack('!H', after - rdata_start)) - self.counts[ADDITIONAL] += 1 - self.output.seek(10) - self.output.write(struct.pack('!H', self.counts[ADDITIONAL])) - self.output.seek(0, 2) - - def write_header(self): - """Write the DNS message header. - - Writing the DNS message header is done after all sections - have been rendered, but before the optional TSIG signature - is added. - """ - - self.output.seek(0) - self.output.write(struct.pack('!HHHHHH', self.id, self.flags, - self.counts[0], self.counts[1], - self.counts[2], self.counts[3])) - self.output.seek(0, 2) - - def get_wire(self): - """Return the wire format message.""" - - return self.output.getvalue() diff --git a/lib/dns/resolver.py b/lib/dns/resolver.py deleted file mode 100644 index 806e5b2b..00000000 --- a/lib/dns/resolver.py +++ /dev/null @@ -1,1383 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS stub resolver.""" - -import socket -import sys -import time -import random - -try: - import threading as _threading -except ImportError: - import dummy_threading as _threading - -import dns.exception -import dns.flags -import dns.ipv4 -import dns.ipv6 -import dns.message -import dns.name -import dns.query -import dns.rcode -import dns.rdataclass -import dns.rdatatype -import dns.reversename -import dns.tsig -from ._compat import xrange, string_types - -if sys.platform == 'win32': - try: - import winreg as _winreg - except ImportError: - import _winreg # pylint: disable=import-error - -class NXDOMAIN(dns.exception.DNSException): - """The DNS query name does not exist.""" - supp_kwargs = {'qnames', 'responses'} - fmt = None # we have our own __str__ implementation - - def _check_kwargs(self, qnames, responses=None): - if not isinstance(qnames, (list, tuple, set)): - raise AttributeError("qnames must be a list, tuple or set") - if len(qnames) == 0: - raise AttributeError("qnames must contain at least one element") - if responses is None: - responses = {} - elif not isinstance(responses, dict): - raise AttributeError("responses must be a dict(qname=response)") - kwargs = dict(qnames=qnames, responses=responses) - return kwargs - - def __str__(self): - if 'qnames' not in self.kwargs: - return super(NXDOMAIN, self).__str__() - qnames = self.kwargs['qnames'] - if len(qnames) > 1: - msg = 'None of DNS query names exist' - else: - msg = 'The DNS query name does not exist' - qnames = ', '.join(map(str, qnames)) - return "{}: {}".format(msg, qnames) - - def canonical_name(self): - if not 'qnames' in self.kwargs: - raise TypeError("parametrized exception required") - IN = dns.rdataclass.IN - CNAME = dns.rdatatype.CNAME - cname = None - for qname in self.kwargs['qnames']: - response = self.kwargs['responses'][qname] - for answer in response.answer: - if answer.rdtype != CNAME or answer.rdclass != IN: - continue - cname = answer.items[0].target.to_text() - if cname is not None: - return dns.name.from_text(cname) - return self.kwargs['qnames'][0] - canonical_name = property(canonical_name, doc=( - "Return the unresolved canonical name.")) - - def __add__(self, e_nx): - """Augment by results from another NXDOMAIN exception.""" - qnames0 = list(self.kwargs.get('qnames', [])) - responses0 = dict(self.kwargs.get('responses', {})) - responses1 = e_nx.kwargs.get('responses', {}) - for qname1 in e_nx.kwargs.get('qnames', []): - if qname1 not in qnames0: - qnames0.append(qname1) - if qname1 in responses1: - responses0[qname1] = responses1[qname1] - return NXDOMAIN(qnames=qnames0, responses=responses0) - - def qnames(self): - """All of the names that were tried. - - Returns a list of ``dns.name.Name``. - """ - return self.kwargs['qnames'] - - def responses(self): - """A map from queried names to their NXDOMAIN responses. - - Returns a dict mapping a ``dns.name.Name`` to a - ``dns.message.Message``. - """ - return self.kwargs['responses'] - - def response(self, qname): - """The response for query *qname*. - - Returns a ``dns.message.Message``. - """ - return self.kwargs['responses'][qname] - - -class YXDOMAIN(dns.exception.DNSException): - """The DNS query name is too long after DNAME substitution.""" - -# The definition of the Timeout exception has moved from here to the -# dns.exception module. We keep dns.resolver.Timeout defined for -# backwards compatibility. - -Timeout = dns.exception.Timeout - - -class NoAnswer(dns.exception.DNSException): - """The DNS response does not contain an answer to the question.""" - fmt = 'The DNS response does not contain an answer ' + \ - 'to the question: {query}' - supp_kwargs = {'response'} - - def _fmt_kwargs(self, **kwargs): - return super(NoAnswer, self)._fmt_kwargs( - query=kwargs['response'].question) - - -class NoNameservers(dns.exception.DNSException): - """All nameservers failed to answer the query. - - errors: list of servers and respective errors - The type of errors is - [(server IP address, any object convertible to string)]. - Non-empty errors list will add explanatory message () - """ - - msg = "All nameservers failed to answer the query." - fmt = "%s {query}: {errors}" % msg[:-1] - supp_kwargs = {'request', 'errors'} - - def _fmt_kwargs(self, **kwargs): - srv_msgs = [] - for err in kwargs['errors']: - srv_msgs.append('Server {} {} port {} answered {}'.format(err[0], - 'TCP' if err[1] else 'UDP', err[2], err[3])) - return super(NoNameservers, self)._fmt_kwargs( - query=kwargs['request'].question, errors='; '.join(srv_msgs)) - - -class NotAbsolute(dns.exception.DNSException): - """An absolute domain name is required but a relative name was provided.""" - - -class NoRootSOA(dns.exception.DNSException): - """There is no SOA RR at the DNS root name. This should never happen!""" - - -class NoMetaqueries(dns.exception.DNSException): - """DNS metaqueries are not allowed.""" - - -class Answer(object): - """DNS stub resolver answer. - - Instances of this class bundle up the result of a successful DNS - resolution. - - For convenience, the answer object implements much of the sequence - protocol, forwarding to its ``rrset`` attribute. E.g. - ``for a in answer`` is equivalent to ``for a in answer.rrset``. - ``answer[i]`` is equivalent to ``answer.rrset[i]``, and - ``answer[i:j]`` is equivalent to ``answer.rrset[i:j]``. - - Note that CNAMEs or DNAMEs in the response may mean that answer - RRset's name might not be the query name. - """ - - def __init__(self, qname, rdtype, rdclass, response, - raise_on_no_answer=True): - self.qname = qname - self.rdtype = rdtype - self.rdclass = rdclass - self.response = response - min_ttl = -1 - rrset = None - for count in xrange(0, 15): - try: - rrset = response.find_rrset(response.answer, qname, - rdclass, rdtype) - if min_ttl == -1 or rrset.ttl < min_ttl: - min_ttl = rrset.ttl - break - except KeyError: - if rdtype != dns.rdatatype.CNAME: - try: - crrset = response.find_rrset(response.answer, - qname, - rdclass, - dns.rdatatype.CNAME) - if min_ttl == -1 or crrset.ttl < min_ttl: - min_ttl = crrset.ttl - for rd in crrset: - qname = rd.target - break - continue - except KeyError: - if raise_on_no_answer: - raise NoAnswer(response=response) - if raise_on_no_answer: - raise NoAnswer(response=response) - if rrset is None and raise_on_no_answer: - raise NoAnswer(response=response) - self.canonical_name = qname - self.rrset = rrset - if rrset is None: - while 1: - # Look for a SOA RR whose owner name is a superdomain - # of qname. - try: - srrset = response.find_rrset(response.authority, qname, - rdclass, dns.rdatatype.SOA) - if min_ttl == -1 or srrset.ttl < min_ttl: - min_ttl = srrset.ttl - if srrset[0].minimum < min_ttl: - min_ttl = srrset[0].minimum - break - except KeyError: - try: - qname = qname.parent() - except dns.name.NoParent: - break - self.expiration = time.time() + min_ttl - - def __getattr__(self, attr): - if attr == 'name': - return self.rrset.name - elif attr == 'ttl': - return self.rrset.ttl - elif attr == 'covers': - return self.rrset.covers - elif attr == 'rdclass': - return self.rrset.rdclass - elif attr == 'rdtype': - return self.rrset.rdtype - else: - raise AttributeError(attr) - - def __len__(self): - return self.rrset and len(self.rrset) or 0 - - def __iter__(self): - return self.rrset and iter(self.rrset) or iter(tuple()) - - def __getitem__(self, i): - if self.rrset is None: - raise IndexError - return self.rrset[i] - - def __delitem__(self, i): - if self.rrset is None: - raise IndexError - del self.rrset[i] - - -class Cache(object): - """Simple thread-safe DNS answer cache.""" - - def __init__(self, cleaning_interval=300.0): - """*cleaning_interval*, a ``float`` is the number of seconds between - periodic cleanings. - """ - - self.data = {} - self.cleaning_interval = cleaning_interval - self.next_cleaning = time.time() + self.cleaning_interval - self.lock = _threading.Lock() - - def _maybe_clean(self): - """Clean the cache if it's time to do so.""" - - now = time.time() - if self.next_cleaning <= now: - keys_to_delete = [] - for (k, v) in self.data.items(): - if v.expiration <= now: - keys_to_delete.append(k) - for k in keys_to_delete: - del self.data[k] - now = time.time() - self.next_cleaning = now + self.cleaning_interval - - def get(self, key): - """Get the answer associated with *key*. - - Returns None if no answer is cached for the key. - - *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the - query name, rdtype, and rdclass respectively. - - Returns a ``dns.resolver.Answer`` or ``None``. - """ - - try: - self.lock.acquire() - self._maybe_clean() - v = self.data.get(key) - if v is None or v.expiration <= time.time(): - return None - return v - finally: - self.lock.release() - - def put(self, key, value): - """Associate key and value in the cache. - - *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the - query name, rdtype, and rdclass respectively. - - *value*, a ``dns.resolver.Answer``, the answer. - """ - - try: - self.lock.acquire() - self._maybe_clean() - self.data[key] = value - finally: - self.lock.release() - - def flush(self, key=None): - """Flush the cache. - - If *key* is not ``None``, only that item is flushed. Otherwise - the entire cache is flushed. - - *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the - query name, rdtype, and rdclass respectively. - """ - - try: - self.lock.acquire() - if key is not None: - if key in self.data: - del self.data[key] - else: - self.data = {} - self.next_cleaning = time.time() + self.cleaning_interval - finally: - self.lock.release() - - -class LRUCacheNode(object): - """LRUCache node.""" - - def __init__(self, key, value): - self.key = key - self.value = value - self.prev = self - self.next = self - - def link_before(self, node): - self.prev = node.prev - self.next = node - node.prev.next = self - node.prev = self - - def link_after(self, node): - self.prev = node - self.next = node.next - node.next.prev = self - node.next = self - - def unlink(self): - self.next.prev = self.prev - self.prev.next = self.next - - -class LRUCache(object): - """Thread-safe, bounded, least-recently-used DNS answer cache. - - This cache is better than the simple cache (above) if you're - running a web crawler or other process that does a lot of - resolutions. The LRUCache has a maximum number of nodes, and when - it is full, the least-recently used node is removed to make space - for a new one. - """ - - def __init__(self, max_size=100000): - """*max_size*, an ``int``, is the maximum number of nodes to cache; - it must be greater than 0. - """ - - self.data = {} - self.set_max_size(max_size) - self.sentinel = LRUCacheNode(None, None) - self.lock = _threading.Lock() - - def set_max_size(self, max_size): - if max_size < 1: - max_size = 1 - self.max_size = max_size - - def get(self, key): - """Get the answer associated with *key*. - - Returns None if no answer is cached for the key. - - *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the - query name, rdtype, and rdclass respectively. - - Returns a ``dns.resolver.Answer`` or ``None``. - """ - - try: - self.lock.acquire() - node = self.data.get(key) - if node is None: - return None - # Unlink because we're either going to move the node to the front - # of the LRU list or we're going to free it. - node.unlink() - if node.value.expiration <= time.time(): - del self.data[node.key] - return None - node.link_after(self.sentinel) - return node.value - finally: - self.lock.release() - - def put(self, key, value): - """Associate key and value in the cache. - - *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the - query name, rdtype, and rdclass respectively. - - *value*, a ``dns.resolver.Answer``, the answer. - """ - - try: - self.lock.acquire() - node = self.data.get(key) - if node is not None: - node.unlink() - del self.data[node.key] - while len(self.data) >= self.max_size: - node = self.sentinel.prev - node.unlink() - del self.data[node.key] - node = LRUCacheNode(key, value) - node.link_after(self.sentinel) - self.data[key] = node - finally: - self.lock.release() - - def flush(self, key=None): - """Flush the cache. - - If *key* is not ``None``, only that item is flushed. Otherwise - the entire cache is flushed. - - *key*, a ``(dns.name.Name, int, int)`` tuple whose values are the - query name, rdtype, and rdclass respectively. - """ - - try: - self.lock.acquire() - if key is not None: - node = self.data.get(key) - if node is not None: - node.unlink() - del self.data[node.key] - else: - node = self.sentinel.next - while node != self.sentinel: - next = node.next - node.prev = None - node.next = None - node = next - self.data = {} - finally: - self.lock.release() - - -class Resolver(object): - """DNS stub resolver.""" - - def __init__(self, filename='/etc/resolv.conf', configure=True): - """*filename*, a ``text`` or file object, specifying a file - in standard /etc/resolv.conf format. This parameter is meaningful - only when *configure* is true and the platform is POSIX. - - *configure*, a ``bool``. If True (the default), the resolver - instance is configured in the normal fashion for the operating - system the resolver is running on. (I.e. by reading a - /etc/resolv.conf file on POSIX systems and from the registry - on Windows systems.) - """ - - self.domain = None - self.nameservers = None - self.nameserver_ports = None - self.port = None - self.search = None - self.timeout = None - self.lifetime = None - self.keyring = None - self.keyname = None - self.keyalgorithm = None - self.edns = None - self.ednsflags = None - self.payload = None - self.cache = None - self.flags = None - self.retry_servfail = False - self.rotate = False - - self.reset() - if configure: - if sys.platform == 'win32': - self.read_registry() - elif filename: - self.read_resolv_conf(filename) - - def reset(self): - """Reset all resolver configuration to the defaults.""" - - self.domain = \ - dns.name.Name(dns.name.from_text(socket.gethostname())[1:]) - if len(self.domain) == 0: - self.domain = dns.name.root - self.nameservers = [] - self.nameserver_ports = {} - self.port = 53 - self.search = [] - self.timeout = 2.0 - self.lifetime = 30.0 - self.keyring = None - self.keyname = None - self.keyalgorithm = dns.tsig.default_algorithm - self.edns = -1 - self.ednsflags = 0 - self.payload = 0 - self.cache = None - self.flags = None - self.retry_servfail = False - self.rotate = False - - def read_resolv_conf(self, f): - """Process *f* as a file in the /etc/resolv.conf format. If f is - a ``text``, it is used as the name of the file to open; otherwise it - is treated as the file itself.""" - - if isinstance(f, string_types): - try: - f = open(f, 'r') - except IOError: - # /etc/resolv.conf doesn't exist, can't be read, etc. - # We'll just use the default resolver configuration. - self.nameservers = ['127.0.0.1'] - return - want_close = True - else: - want_close = False - try: - for l in f: - if len(l) == 0 or l[0] == '#' or l[0] == ';': - continue - tokens = l.split() - - # Any line containing less than 2 tokens is malformed - if len(tokens) < 2: - continue - - if tokens[0] == 'nameserver': - self.nameservers.append(tokens[1]) - elif tokens[0] == 'domain': - self.domain = dns.name.from_text(tokens[1]) - elif tokens[0] == 'search': - for suffix in tokens[1:]: - self.search.append(dns.name.from_text(suffix)) - elif tokens[0] == 'options': - if 'rotate' in tokens[1:]: - self.rotate = True - finally: - if want_close: - f.close() - if len(self.nameservers) == 0: - self.nameservers.append('127.0.0.1') - - def _determine_split_char(self, entry): - # - # The windows registry irritatingly changes the list element - # delimiter in between ' ' and ',' (and vice-versa) in various - # versions of windows. - # - if entry.find(' ') >= 0: - split_char = ' ' - elif entry.find(',') >= 0: - split_char = ',' - else: - # probably a singleton; treat as a space-separated list. - split_char = ' ' - return split_char - - def _config_win32_nameservers(self, nameservers): - # we call str() on nameservers to convert it from unicode to ascii - nameservers = str(nameservers) - split_char = self._determine_split_char(nameservers) - ns_list = nameservers.split(split_char) - for ns in ns_list: - if ns not in self.nameservers: - self.nameservers.append(ns) - - def _config_win32_domain(self, domain): - # we call str() on domain to convert it from unicode to ascii - self.domain = dns.name.from_text(str(domain)) - - def _config_win32_search(self, search): - # we call str() on search to convert it from unicode to ascii - search = str(search) - split_char = self._determine_split_char(search) - search_list = search.split(split_char) - for s in search_list: - if s not in self.search: - self.search.append(dns.name.from_text(s)) - - def _config_win32_fromkey(self, key, always_try_domain): - try: - servers, rtype = _winreg.QueryValueEx(key, 'NameServer') - except WindowsError: # pylint: disable=undefined-variable - servers = None - if servers: - self._config_win32_nameservers(servers) - if servers or always_try_domain: - try: - dom, rtype = _winreg.QueryValueEx(key, 'Domain') - if dom: - self._config_win32_domain(dom) - except WindowsError: # pylint: disable=undefined-variable - pass - else: - try: - servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer') - except WindowsError: # pylint: disable=undefined-variable - servers = None - if servers: - self._config_win32_nameservers(servers) - try: - dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain') - if dom: - self._config_win32_domain(dom) - except WindowsError: # pylint: disable=undefined-variable - pass - try: - search, rtype = _winreg.QueryValueEx(key, 'SearchList') - except WindowsError: # pylint: disable=undefined-variable - search = None - if search: - self._config_win32_search(search) - - def read_registry(self): - """Extract resolver configuration from the Windows registry.""" - - lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) - want_scan = False - try: - try: - # XP, 2000 - tcp_params = _winreg.OpenKey(lm, - r'SYSTEM\CurrentControlSet' - r'\Services\Tcpip\Parameters') - want_scan = True - except EnvironmentError: - # ME - tcp_params = _winreg.OpenKey(lm, - r'SYSTEM\CurrentControlSet' - r'\Services\VxD\MSTCP') - try: - self._config_win32_fromkey(tcp_params, True) - finally: - tcp_params.Close() - if want_scan: - interfaces = _winreg.OpenKey(lm, - r'SYSTEM\CurrentControlSet' - r'\Services\Tcpip\Parameters' - r'\Interfaces') - try: - i = 0 - while True: - try: - guid = _winreg.EnumKey(interfaces, i) - i += 1 - key = _winreg.OpenKey(interfaces, guid) - if not self._win32_is_nic_enabled(lm, guid, key): - continue - try: - self._config_win32_fromkey(key, False) - finally: - key.Close() - except EnvironmentError: - break - finally: - interfaces.Close() - finally: - lm.Close() - - def _win32_is_nic_enabled(self, lm, guid, interface_key): - # Look in the Windows Registry to determine whether the network - # interface corresponding to the given guid is enabled. - # - # (Code contributed by Paul Marks, thanks!) - # - try: - # This hard-coded location seems to be consistent, at least - # from Windows 2000 through Vista. - connection_key = _winreg.OpenKey( - lm, - r'SYSTEM\CurrentControlSet\Control\Network' - r'\{4D36E972-E325-11CE-BFC1-08002BE10318}' - r'\%s\Connection' % guid) - - try: - # The PnpInstanceID points to a key inside Enum - (pnp_id, ttype) = _winreg.QueryValueEx( - connection_key, 'PnpInstanceID') - - if ttype != _winreg.REG_SZ: - raise ValueError - - device_key = _winreg.OpenKey( - lm, r'SYSTEM\CurrentControlSet\Enum\%s' % pnp_id) - - try: - # Get ConfigFlags for this device - (flags, ttype) = _winreg.QueryValueEx( - device_key, 'ConfigFlags') - - if ttype != _winreg.REG_DWORD: - raise ValueError - - # Based on experimentation, bit 0x1 indicates that the - # device is disabled. - return not flags & 0x1 - - finally: - device_key.Close() - finally: - connection_key.Close() - except (EnvironmentError, ValueError): - # Pre-vista, enabled interfaces seem to have a non-empty - # NTEContextList; this was how dnspython detected enabled - # nics before the code above was contributed. We've retained - # the old method since we don't know if the code above works - # on Windows 95/98/ME. - try: - (nte, ttype) = _winreg.QueryValueEx(interface_key, - 'NTEContextList') - return nte is not None - except WindowsError: # pylint: disable=undefined-variable - return False - - def _compute_timeout(self, start, lifetime=None): - lifetime = self.lifetime if lifetime is None else lifetime - now = time.time() - duration = now - start - if duration < 0: - if duration < -1: - # Time going backwards is bad. Just give up. - raise Timeout(timeout=duration) - else: - # Time went backwards, but only a little. This can - # happen, e.g. under vmware with older linux kernels. - # Pretend it didn't happen. - now = start - if duration >= lifetime: - raise Timeout(timeout=duration) - return min(lifetime - duration, self.timeout) - - def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, - tcp=False, source=None, raise_on_no_answer=True, source_port=0, - lifetime=None): - """Query nameservers to find the answer to the question. - - The *qname*, *rdtype*, and *rdclass* parameters may be objects - of the appropriate type, or strings that can be converted into objects - of the appropriate type. - - *qname*, a ``dns.name.Name`` or ``text``, the query name. - - *rdtype*, an ``int`` or ``text``, the query type. - - *rdclass*, an ``int`` or ``text``, the query class. - - *tcp*, a ``bool``. If ``True``, use TCP to make the query. - - *source*, a ``text`` or ``None``. If not ``None``, bind to this IP - address when making queries. - - *raise_on_no_answer*, a ``bool``. If ``True``, raise - ``dns.resolver.NoAnswer`` if there's no answer to the question. - - *source_port*, an ``int``, the port from which to send the message. - - *lifetime*, a ``float``, how long query should run before timing out. - - Raises ``dns.exception.Timeout`` if no answers could be found - in the specified lifetime. - - Raises ``dns.resolver.NXDOMAIN`` if the query name does not exist. - - Raises ``dns.resolver.YXDOMAIN`` if the query name is too long after - DNAME substitution. - - Raises ``dns.resolver.NoAnswer`` if *raise_on_no_answer* is - ``True`` and the query name exists but has no RRset of the - desired type and class. - - Raises ``dns.resolver.NoNameservers`` if no non-broken - nameservers are available to answer the question. - - Returns a ``dns.resolver.Answer`` instance. - """ - - if isinstance(qname, string_types): - qname = dns.name.from_text(qname, None) - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - if dns.rdatatype.is_metatype(rdtype): - raise NoMetaqueries - if isinstance(rdclass, string_types): - rdclass = dns.rdataclass.from_text(rdclass) - if dns.rdataclass.is_metaclass(rdclass): - raise NoMetaqueries - qnames_to_try = [] - if qname.is_absolute(): - qnames_to_try.append(qname) - else: - if len(qname) > 1: - qnames_to_try.append(qname.concatenate(dns.name.root)) - if self.search: - for suffix in self.search: - qnames_to_try.append(qname.concatenate(suffix)) - else: - qnames_to_try.append(qname.concatenate(self.domain)) - all_nxdomain = True - nxdomain_responses = {} - start = time.time() - _qname = None # make pylint happy - for _qname in qnames_to_try: - if self.cache: - answer = self.cache.get((_qname, rdtype, rdclass)) - if answer is not None: - if answer.rrset is None and raise_on_no_answer: - raise NoAnswer(response=answer.response) - else: - return answer - request = dns.message.make_query(_qname, rdtype, rdclass) - if self.keyname is not None: - request.use_tsig(self.keyring, self.keyname, - algorithm=self.keyalgorithm) - request.use_edns(self.edns, self.ednsflags, self.payload) - if self.flags is not None: - request.flags = self.flags - response = None - # - # make a copy of the servers list so we can alter it later. - # - nameservers = self.nameservers[:] - errors = [] - if self.rotate: - random.shuffle(nameservers) - backoff = 0.10 - while response is None: - if len(nameservers) == 0: - raise NoNameservers(request=request, errors=errors) - for nameserver in nameservers[:]: - timeout = self._compute_timeout(start, lifetime) - port = self.nameserver_ports.get(nameserver, self.port) - try: - tcp_attempt = tcp - if tcp: - response = dns.query.tcp(request, nameserver, - timeout, port, - source=source, - source_port=source_port) - else: - response = dns.query.udp(request, nameserver, - timeout, port, - source=source, - source_port=source_port) - if response.flags & dns.flags.TC: - # Response truncated; retry with TCP. - tcp_attempt = True - timeout = self._compute_timeout(start, lifetime) - response = \ - dns.query.tcp(request, nameserver, - timeout, port, - source=source, - source_port=source_port) - except (socket.error, dns.exception.Timeout) as ex: - # - # Communication failure or timeout. Go to the - # next server - # - errors.append((nameserver, tcp_attempt, port, ex, - response)) - response = None - continue - except dns.query.UnexpectedSource as ex: - # - # Who knows? Keep going. - # - errors.append((nameserver, tcp_attempt, port, ex, - response)) - response = None - continue - except dns.exception.FormError as ex: - # - # We don't understand what this server is - # saying. Take it out of the mix and - # continue. - # - nameservers.remove(nameserver) - errors.append((nameserver, tcp_attempt, port, ex, - response)) - response = None - continue - except EOFError as ex: - # - # We're using TCP and they hung up on us. - # Probably they don't support TCP (though - # they're supposed to!). Take it out of the - # mix and continue. - # - nameservers.remove(nameserver) - errors.append((nameserver, tcp_attempt, port, ex, - response)) - response = None - continue - rcode = response.rcode() - if rcode == dns.rcode.YXDOMAIN: - ex = YXDOMAIN() - errors.append((nameserver, tcp_attempt, port, ex, - response)) - raise ex - if rcode == dns.rcode.NOERROR or \ - rcode == dns.rcode.NXDOMAIN: - break - # - # We got a response, but we're not happy with the - # rcode in it. Remove the server from the mix if - # the rcode isn't SERVFAIL. - # - if rcode != dns.rcode.SERVFAIL or not self.retry_servfail: - nameservers.remove(nameserver) - errors.append((nameserver, tcp_attempt, port, - dns.rcode.to_text(rcode), response)) - response = None - if response is not None: - break - # - # All nameservers failed! - # - if len(nameservers) > 0: - # - # But we still have servers to try. Sleep a bit - # so we don't pound them! - # - timeout = self._compute_timeout(start, lifetime) - sleep_time = min(timeout, backoff) - backoff *= 2 - time.sleep(sleep_time) - if response.rcode() == dns.rcode.NXDOMAIN: - nxdomain_responses[_qname] = response - continue - all_nxdomain = False - break - if all_nxdomain: - raise NXDOMAIN(qnames=qnames_to_try, responses=nxdomain_responses) - answer = Answer(_qname, rdtype, rdclass, response, - raise_on_no_answer) - if self.cache: - self.cache.put((_qname, rdtype, rdclass), answer) - return answer - - def use_tsig(self, keyring, keyname=None, - algorithm=dns.tsig.default_algorithm): - """Add a TSIG signature to the query. - - See the documentation of the Message class for a complete - description of the keyring dictionary. - - *keyring*, a ``dict``, the TSIG keyring to use. If a - *keyring* is specified but a *keyname* is not, then the key - used will be the first key in the *keyring*. Note that the - order of keys in a dictionary is not defined, so applications - should supply a keyname when a keyring is used, unless they - know the keyring contains only one key. - - *keyname*, a ``dns.name.Name`` or ``None``, the name of the TSIG key - to use; defaults to ``None``. The key must be defined in the keyring. - - *algorithm*, a ``dns.name.Name``, the TSIG algorithm to use. - """ - - self.keyring = keyring - if keyname is None: - self.keyname = list(self.keyring.keys())[0] - else: - self.keyname = keyname - self.keyalgorithm = algorithm - - def use_edns(self, edns, ednsflags, payload): - """Configure EDNS behavior. - - *edns*, an ``int``, is the EDNS level to use. Specifying - ``None``, ``False``, or ``-1`` means "do not use EDNS", and in this case - the other parameters are ignored. Specifying ``True`` is - equivalent to specifying 0, i.e. "use EDNS0". - - *ednsflags*, an ``int``, the EDNS flag values. - - *payload*, an ``int``, is the EDNS sender's payload field, which is the - maximum size of UDP datagram the sender can handle. I.e. how big - a response to this message can be. - """ - - if edns is None: - edns = -1 - self.edns = edns - self.ednsflags = ednsflags - self.payload = payload - - def set_flags(self, flags): - """Overrides the default flags with your own. - - *flags*, an ``int``, the message flags to use. - """ - - self.flags = flags - - -#: The default resolver. -default_resolver = None - - -def get_default_resolver(): - """Get the default resolver, initializing it if necessary.""" - if default_resolver is None: - reset_default_resolver() - return default_resolver - - -def reset_default_resolver(): - """Re-initialize default resolver. - - Note that the resolver configuration (i.e. /etc/resolv.conf on UNIX - systems) will be re-read immediately. - """ - - global default_resolver - default_resolver = Resolver() - - -def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, - tcp=False, source=None, raise_on_no_answer=True, - source_port=0, lifetime=None): - """Query nameservers to find the answer to the question. - - This is a convenience function that uses the default resolver - object to make the query. - - See ``dns.resolver.Resolver.query`` for more information on the - parameters. - """ - - return get_default_resolver().query(qname, rdtype, rdclass, tcp, source, - raise_on_no_answer, source_port, - lifetime) - - -def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None): - """Find the name of the zone which contains the specified name. - - *name*, an absolute ``dns.name.Name`` or ``text``, the query name. - - *rdclass*, an ``int``, the query class. - - *tcp*, a ``bool``. If ``True``, use TCP to make the query. - - *resolver*, a ``dns.resolver.Resolver`` or ``None``, the resolver to use. - If ``None``, the default resolver is used. - - Raises ``dns.resolver.NoRootSOA`` if there is no SOA RR at the DNS - root. (This is only likely to happen if you're using non-default - root servers in your network and they are misconfigured.) - - Returns a ``dns.name.Name``. - """ - - if isinstance(name, string_types): - name = dns.name.from_text(name, dns.name.root) - if resolver is None: - resolver = get_default_resolver() - if not name.is_absolute(): - raise NotAbsolute(name) - while 1: - try: - answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp) - if answer.rrset.name == name: - return name - # otherwise we were CNAMEd or DNAMEd and need to look higher - except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): - pass - try: - name = name.parent() - except dns.name.NoParent: - raise NoRootSOA - -# -# Support for overriding the system resolver for all python code in the -# running process. -# - -_protocols_for_socktype = { - socket.SOCK_DGRAM: [socket.SOL_UDP], - socket.SOCK_STREAM: [socket.SOL_TCP], -} - -_resolver = None -_original_getaddrinfo = socket.getaddrinfo -_original_getnameinfo = socket.getnameinfo -_original_getfqdn = socket.getfqdn -_original_gethostbyname = socket.gethostbyname -_original_gethostbyname_ex = socket.gethostbyname_ex -_original_gethostbyaddr = socket.gethostbyaddr - - -def _getaddrinfo(host=None, service=None, family=socket.AF_UNSPEC, socktype=0, - proto=0, flags=0): - if flags & (socket.AI_ADDRCONFIG | socket.AI_V4MAPPED) != 0: - raise NotImplementedError - if host is None and service is None: - raise socket.gaierror(socket.EAI_NONAME) - v6addrs = [] - v4addrs = [] - canonical_name = None - try: - # Is host None or a V6 address literal? - if host is None: - canonical_name = 'localhost' - if flags & socket.AI_PASSIVE != 0: - v6addrs.append('::') - v4addrs.append('0.0.0.0') - else: - v6addrs.append('::1') - v4addrs.append('127.0.0.1') - else: - parts = host.split('%') - if len(parts) == 2: - ahost = parts[0] - else: - ahost = host - addr = dns.ipv6.inet_aton(ahost) - v6addrs.append(host) - canonical_name = host - except Exception: - try: - # Is it a V4 address literal? - addr = dns.ipv4.inet_aton(host) - v4addrs.append(host) - canonical_name = host - except Exception: - if flags & socket.AI_NUMERICHOST == 0: - try: - if family == socket.AF_INET6 or family == socket.AF_UNSPEC: - v6 = _resolver.query(host, dns.rdatatype.AAAA, - raise_on_no_answer=False) - # Note that setting host ensures we query the same name - # for A as we did for AAAA. - host = v6.qname - canonical_name = v6.canonical_name.to_text(True) - if v6.rrset is not None: - for rdata in v6.rrset: - v6addrs.append(rdata.address) - if family == socket.AF_INET or family == socket.AF_UNSPEC: - v4 = _resolver.query(host, dns.rdatatype.A, - raise_on_no_answer=False) - host = v4.qname - canonical_name = v4.canonical_name.to_text(True) - if v4.rrset is not None: - for rdata in v4.rrset: - v4addrs.append(rdata.address) - except dns.resolver.NXDOMAIN: - raise socket.gaierror(socket.EAI_NONAME) - except Exception: - raise socket.gaierror(socket.EAI_SYSTEM) - port = None - try: - # Is it a port literal? - if service is None: - port = 0 - else: - port = int(service) - except Exception: - if flags & socket.AI_NUMERICSERV == 0: - try: - port = socket.getservbyname(service) - except Exception: - pass - if port is None: - raise socket.gaierror(socket.EAI_NONAME) - tuples = [] - if socktype == 0: - socktypes = [socket.SOCK_DGRAM, socket.SOCK_STREAM] - else: - socktypes = [socktype] - if flags & socket.AI_CANONNAME != 0: - cname = canonical_name - else: - cname = '' - if family == socket.AF_INET6 or family == socket.AF_UNSPEC: - for addr in v6addrs: - for socktype in socktypes: - for proto in _protocols_for_socktype[socktype]: - tuples.append((socket.AF_INET6, socktype, proto, - cname, (addr, port, 0, 0))) - if family == socket.AF_INET or family == socket.AF_UNSPEC: - for addr in v4addrs: - for socktype in socktypes: - for proto in _protocols_for_socktype[socktype]: - tuples.append((socket.AF_INET, socktype, proto, - cname, (addr, port))) - if len(tuples) == 0: - raise socket.gaierror(socket.EAI_NONAME) - return tuples - - -def _getnameinfo(sockaddr, flags=0): - host = sockaddr[0] - port = sockaddr[1] - if len(sockaddr) == 4: - scope = sockaddr[3] - family = socket.AF_INET6 - else: - scope = None - family = socket.AF_INET - tuples = _getaddrinfo(host, port, family, socket.SOCK_STREAM, - socket.SOL_TCP, 0) - if len(tuples) > 1: - raise socket.error('sockaddr resolved to multiple addresses') - addr = tuples[0][4][0] - if flags & socket.NI_DGRAM: - pname = 'udp' - else: - pname = 'tcp' - qname = dns.reversename.from_address(addr) - if flags & socket.NI_NUMERICHOST == 0: - try: - answer = _resolver.query(qname, 'PTR') - hostname = answer.rrset[0].target.to_text(True) - except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): - if flags & socket.NI_NAMEREQD: - raise socket.gaierror(socket.EAI_NONAME) - hostname = addr - if scope is not None: - hostname += '%' + str(scope) - else: - hostname = addr - if scope is not None: - hostname += '%' + str(scope) - if flags & socket.NI_NUMERICSERV: - service = str(port) - else: - service = socket.getservbyport(port, pname) - return (hostname, service) - - -def _getfqdn(name=None): - if name is None: - name = socket.gethostname() - try: - return _getnameinfo(_getaddrinfo(name, 80)[0][4])[0] - except Exception: - return name - - -def _gethostbyname(name): - return _gethostbyname_ex(name)[2][0] - - -def _gethostbyname_ex(name): - aliases = [] - addresses = [] - tuples = _getaddrinfo(name, 0, socket.AF_INET, socket.SOCK_STREAM, - socket.SOL_TCP, socket.AI_CANONNAME) - canonical = tuples[0][3] - for item in tuples: - addresses.append(item[4][0]) - # XXX we just ignore aliases - return (canonical, aliases, addresses) - - -def _gethostbyaddr(ip): - try: - dns.ipv6.inet_aton(ip) - sockaddr = (ip, 80, 0, 0) - family = socket.AF_INET6 - except Exception: - sockaddr = (ip, 80) - family = socket.AF_INET - (name, port) = _getnameinfo(sockaddr, socket.NI_NAMEREQD) - aliases = [] - addresses = [] - tuples = _getaddrinfo(name, 0, family, socket.SOCK_STREAM, socket.SOL_TCP, - socket.AI_CANONNAME) - canonical = tuples[0][3] - for item in tuples: - addresses.append(item[4][0]) - # XXX we just ignore aliases - return (canonical, aliases, addresses) - - -def override_system_resolver(resolver=None): - """Override the system resolver routines in the socket module with - versions which use dnspython's resolver. - - This can be useful in testing situations where you want to control - the resolution behavior of python code without having to change - the system's resolver settings (e.g. /etc/resolv.conf). - - The resolver to use may be specified; if it's not, the default - resolver will be used. - - resolver, a ``dns.resolver.Resolver`` or ``None``, the resolver to use. - """ - - if resolver is None: - resolver = get_default_resolver() - global _resolver - _resolver = resolver - socket.getaddrinfo = _getaddrinfo - socket.getnameinfo = _getnameinfo - socket.getfqdn = _getfqdn - socket.gethostbyname = _gethostbyname - socket.gethostbyname_ex = _gethostbyname_ex - socket.gethostbyaddr = _gethostbyaddr - - -def restore_system_resolver(): - """Undo the effects of prior override_system_resolver().""" - - global _resolver - _resolver = None - socket.getaddrinfo = _original_getaddrinfo - socket.getnameinfo = _original_getnameinfo - socket.getfqdn = _original_getfqdn - socket.gethostbyname = _original_gethostbyname - socket.gethostbyname_ex = _original_gethostbyname_ex - socket.gethostbyaddr = _original_gethostbyaddr diff --git a/lib/dns/resolver.pyi b/lib/dns/resolver.pyi deleted file mode 100644 index e839ec21..00000000 --- a/lib/dns/resolver.pyi +++ /dev/null @@ -1,31 +0,0 @@ -from typing import Union, Optional, List -from . import exception, rdataclass, name, rdatatype - -import socket -_gethostbyname = socket.gethostbyname -class NXDOMAIN(exception.DNSException): - ... -def query(qname : str, rdtype : Union[int,str] = 0, rdclass : Union[int,str] = 0, - tcp=False, source=None, raise_on_no_answer=True, - source_port=0): - ... -class LRUCache: - def __init__(self, max_size=1000): - ... - def get(self, key): - ... - def put(self, key, val): - ... -class Answer: - def __init__(self, qname, rdtype, rdclass, response, - raise_on_no_answer=True): - ... -def zone_for_name(name, rdclass : int = rdataclass.IN, tcp=False, resolver : Optional[Resolver] = None): - ... - -class Resolver: - def __init__(self, configure): - self.nameservers : List[str] - def query(self, qname : str, rdtype : Union[int,str] = rdatatype.A, rdclass : Union[int,str] = rdataclass.IN, - tcp : bool = False, source : Optional[str] = None, raise_on_no_answer=True, source_port : int = 0): - ... diff --git a/lib/dns/reversename.py b/lib/dns/reversename.py deleted file mode 100644 index 8f095fa9..00000000 --- a/lib/dns/reversename.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2006-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Reverse Map Names.""" - -import binascii - -import dns.name -import dns.ipv6 -import dns.ipv4 - -from dns._compat import PY3 - -ipv4_reverse_domain = dns.name.from_text('in-addr.arpa.') -ipv6_reverse_domain = dns.name.from_text('ip6.arpa.') - - -def from_address(text): - """Convert an IPv4 or IPv6 address in textual form into a Name object whose - value is the reverse-map domain name of the address. - - *text*, a ``text``, is an IPv4 or IPv6 address in textual form - (e.g. '127.0.0.1', '::1') - - Raises ``dns.exception.SyntaxError`` if the address is badly formed. - - Returns a ``dns.name.Name``. - """ - - try: - v6 = dns.ipv6.inet_aton(text) - if dns.ipv6.is_mapped(v6): - if PY3: - parts = ['%d' % byte for byte in v6[12:]] - else: - parts = ['%d' % ord(byte) for byte in v6[12:]] - origin = ipv4_reverse_domain - else: - parts = [x for x in str(binascii.hexlify(v6).decode())] - origin = ipv6_reverse_domain - except Exception: - parts = ['%d' % - byte for byte in bytearray(dns.ipv4.inet_aton(text))] - origin = ipv4_reverse_domain - parts.reverse() - return dns.name.from_text('.'.join(parts), origin=origin) - - -def to_address(name): - """Convert a reverse map domain name into textual address form. - - *name*, a ``dns.name.Name``, an IPv4 or IPv6 address in reverse-map name - form. - - Raises ``dns.exception.SyntaxError`` if the name does not have a - reverse-map form. - - Returns a ``text``. - """ - - if name.is_subdomain(ipv4_reverse_domain): - name = name.relativize(ipv4_reverse_domain) - labels = list(name.labels) - labels.reverse() - text = b'.'.join(labels) - # run through inet_aton() to check syntax and make pretty. - return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text)) - elif name.is_subdomain(ipv6_reverse_domain): - name = name.relativize(ipv6_reverse_domain) - labels = list(name.labels) - labels.reverse() - parts = [] - i = 0 - l = len(labels) - while i < l: - parts.append(b''.join(labels[i:i + 4])) - i += 4 - text = b':'.join(parts) - # run through inet_aton() to check syntax and make pretty. - return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text)) - else: - raise dns.exception.SyntaxError('unknown reverse-map address family') diff --git a/lib/dns/reversename.pyi b/lib/dns/reversename.pyi deleted file mode 100644 index 97f072ea..00000000 --- a/lib/dns/reversename.pyi +++ /dev/null @@ -1,6 +0,0 @@ -from . import name -def from_address(text : str) -> name.Name: - ... - -def to_address(name : name.Name) -> str: - ... diff --git a/lib/dns/rrset.py b/lib/dns/rrset.py deleted file mode 100644 index a53ec324..00000000 --- a/lib/dns/rrset.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS RRsets (an RRset is a named rdataset)""" - - -import dns.name -import dns.rdataset -import dns.rdataclass -import dns.renderer -from ._compat import string_types - - -class RRset(dns.rdataset.Rdataset): - - """A DNS RRset (named rdataset). - - RRset inherits from Rdataset, and RRsets can be treated as - Rdatasets in most cases. There are, however, a few notable - exceptions. RRsets have different to_wire() and to_text() method - arguments, reflecting the fact that RRsets always have an owner - name. - """ - - __slots__ = ['name', 'deleting'] - - def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE, - deleting=None): - """Create a new RRset.""" - - super(RRset, self).__init__(rdclass, rdtype, covers) - self.name = name - self.deleting = deleting - - def _clone(self): - obj = super(RRset, self)._clone() - obj.name = self.name - obj.deleting = self.deleting - return obj - - def __repr__(self): - if self.covers == 0: - ctext = '' - else: - ctext = '(' + dns.rdatatype.to_text(self.covers) + ')' - if self.deleting is not None: - dtext = ' delete=' + dns.rdataclass.to_text(self.deleting) - else: - dtext = '' - return '<DNS ' + str(self.name) + ' ' + \ - dns.rdataclass.to_text(self.rdclass) + ' ' + \ - dns.rdatatype.to_text(self.rdtype) + ctext + dtext + ' RRset>' - - def __str__(self): - return self.to_text() - - def __eq__(self, other): - if not isinstance(other, RRset): - return False - if self.name != other.name: - return False - return super(RRset, self).__eq__(other) - - def match(self, name, rdclass, rdtype, covers, deleting=None): - """Returns ``True`` if this rrset matches the specified class, type, - covers, and deletion state. - """ - - if not super(RRset, self).match(rdclass, rdtype, covers): - return False - if self.name != name or self.deleting != deleting: - return False - return True - - def to_text(self, origin=None, relativize=True, **kw): - """Convert the RRset into DNS master file format. - - See ``dns.name.Name.choose_relativity`` for more information - on how *origin* and *relativize* determine the way names - are emitted. - - Any additional keyword arguments are passed on to the rdata - ``to_text()`` method. - - *origin*, a ``dns.name.Name`` or ``None``, the origin for relative - names. - - *relativize*, a ``bool``. If ``True``, names will be relativized - to *origin*. - """ - - return super(RRset, self).to_text(self.name, origin, relativize, - self.deleting, **kw) - - def to_wire(self, file, compress=None, origin=None, **kw): - """Convert the RRset to wire format. - - All keyword arguments are passed to ``dns.rdataset.to_wire()``; see - that function for details. - - Returns an ``int``, the number of records emitted. - """ - - return super(RRset, self).to_wire(self.name, file, compress, origin, - self.deleting, **kw) - - def to_rdataset(self): - """Convert an RRset into an Rdataset. - - Returns a ``dns.rdataset.Rdataset``. - """ - return dns.rdataset.from_rdata_list(self.ttl, list(self)) - - -def from_text_list(name, ttl, rdclass, rdtype, text_rdatas, - idna_codec=None): - """Create an RRset with the specified name, TTL, class, and type, and with - the specified list of rdatas in text format. - - Returns a ``dns.rrset.RRset`` object. - """ - - if isinstance(name, string_types): - name = dns.name.from_text(name, None, idna_codec=idna_codec) - if isinstance(rdclass, string_types): - rdclass = dns.rdataclass.from_text(rdclass) - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - r = RRset(name, rdclass, rdtype) - r.update_ttl(ttl) - for t in text_rdatas: - rd = dns.rdata.from_text(r.rdclass, r.rdtype, t) - r.add(rd) - return r - - -def from_text(name, ttl, rdclass, rdtype, *text_rdatas): - """Create an RRset with the specified name, TTL, class, and type and with - the specified rdatas in text format. - - Returns a ``dns.rrset.RRset`` object. - """ - - return from_text_list(name, ttl, rdclass, rdtype, text_rdatas) - - -def from_rdata_list(name, ttl, rdatas, idna_codec=None): - """Create an RRset with the specified name and TTL, and with - the specified list of rdata objects. - - Returns a ``dns.rrset.RRset`` object. - """ - - if isinstance(name, string_types): - name = dns.name.from_text(name, None, idna_codec=idna_codec) - - if len(rdatas) == 0: - raise ValueError("rdata list must not be empty") - r = None - for rd in rdatas: - if r is None: - r = RRset(name, rd.rdclass, rd.rdtype) - r.update_ttl(ttl) - r.add(rd) - return r - - -def from_rdata(name, ttl, *rdatas): - """Create an RRset with the specified name and TTL, and with - the specified rdata objects. - - Returns a ``dns.rrset.RRset`` object. - """ - - return from_rdata_list(name, ttl, rdatas) diff --git a/lib/dns/rrset.pyi b/lib/dns/rrset.pyi deleted file mode 100644 index 0a81a2a0..00000000 --- a/lib/dns/rrset.pyi +++ /dev/null @@ -1,10 +0,0 @@ -from typing import List, Optional -from . import rdataset, rdatatype - -class RRset(rdataset.Rdataset): - def __init__(self, name, rdclass : int , rdtype : int, covers=rdatatype.NONE, - deleting : Optional[int] =None) -> None: - self.name = name - self.deleting = deleting -def from_text(name : str, ttl : int, rdclass : str, rdtype : str, *text_rdatas : str): - ... diff --git a/lib/dns/set.py b/lib/dns/set.py deleted file mode 100644 index 81329bf4..00000000 --- a/lib/dns/set.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -class Set(object): - - """A simple set class. - - This class was originally used to deal with sets being missing in - ancient versions of python, but dnspython will continue to use it - as these sets are based on lists and are thus indexable, and this - ability is widely used in dnspython applications. - """ - - __slots__ = ['items'] - - def __init__(self, items=None): - """Initialize the set. - - *items*, an iterable or ``None``, the initial set of items. - """ - - self.items = [] - if items is not None: - for item in items: - self.add(item) - - def __repr__(self): - return "dns.simpleset.Set(%s)" % repr(self.items) - - def add(self, item): - """Add an item to the set. - """ - - if item not in self.items: - self.items.append(item) - - def remove(self, item): - """Remove an item from the set. - """ - - self.items.remove(item) - - def discard(self, item): - """Remove an item from the set if present. - """ - - try: - self.items.remove(item) - except ValueError: - pass - - def _clone(self): - """Make a (shallow) copy of the set. - - There is a 'clone protocol' that subclasses of this class - should use. To make a copy, first call your super's _clone() - method, and use the object returned as the new instance. Then - make shallow copies of the attributes defined in the subclass. - - This protocol allows us to write the set algorithms that - return new instances (e.g. union) once, and keep using them in - subclasses. - """ - - cls = self.__class__ - obj = cls.__new__(cls) - obj.items = list(self.items) - return obj - - def __copy__(self): - """Make a (shallow) copy of the set. - """ - - return self._clone() - - def copy(self): - """Make a (shallow) copy of the set. - """ - - return self._clone() - - def union_update(self, other): - """Update the set, adding any elements from other which are not - already in the set. - """ - - if not isinstance(other, Set): - raise ValueError('other must be a Set instance') - if self is other: - return - for item in other.items: - self.add(item) - - def intersection_update(self, other): - """Update the set, removing any elements from other which are not - in both sets. - """ - - if not isinstance(other, Set): - raise ValueError('other must be a Set instance') - if self is other: - return - # we make a copy of the list so that we can remove items from - # the list without breaking the iterator. - for item in list(self.items): - if item not in other.items: - self.items.remove(item) - - def difference_update(self, other): - """Update the set, removing any elements from other which are in - the set. - """ - - if not isinstance(other, Set): - raise ValueError('other must be a Set instance') - if self is other: - self.items = [] - else: - for item in other.items: - self.discard(item) - - def union(self, other): - """Return a new set which is the union of ``self`` and ``other``. - - Returns the same Set type as this set. - """ - - obj = self._clone() - obj.union_update(other) - return obj - - def intersection(self, other): - """Return a new set which is the intersection of ``self`` and - ``other``. - - Returns the same Set type as this set. - """ - - obj = self._clone() - obj.intersection_update(other) - return obj - - def difference(self, other): - """Return a new set which ``self`` - ``other``, i.e. the items - in ``self`` which are not also in ``other``. - - Returns the same Set type as this set. - """ - - obj = self._clone() - obj.difference_update(other) - return obj - - def __or__(self, other): - return self.union(other) - - def __and__(self, other): - return self.intersection(other) - - def __add__(self, other): - return self.union(other) - - def __sub__(self, other): - return self.difference(other) - - def __ior__(self, other): - self.union_update(other) - return self - - def __iand__(self, other): - self.intersection_update(other) - return self - - def __iadd__(self, other): - self.union_update(other) - return self - - def __isub__(self, other): - self.difference_update(other) - return self - - def update(self, other): - """Update the set, adding any elements from other which are not - already in the set. - - *other*, the collection of items with which to update the set, which - may be any iterable type. - """ - - for item in other: - self.add(item) - - def clear(self): - """Make the set empty.""" - self.items = [] - - def __eq__(self, other): - # Yes, this is inefficient but the sets we're dealing with are - # usually quite small, so it shouldn't hurt too much. - for item in self.items: - if item not in other.items: - return False - for item in other.items: - if item not in self.items: - return False - return True - - def __ne__(self, other): - return not self.__eq__(other) - - def __len__(self): - return len(self.items) - - def __iter__(self): - return iter(self.items) - - def __getitem__(self, i): - return self.items[i] - - def __delitem__(self, i): - del self.items[i] - - def issubset(self, other): - """Is this set a subset of *other*? - - Returns a ``bool``. - """ - - if not isinstance(other, Set): - raise ValueError('other must be a Set instance') - for item in self.items: - if item not in other.items: - return False - return True - - def issuperset(self, other): - """Is this set a superset of *other*? - - Returns a ``bool``. - """ - - if not isinstance(other, Set): - raise ValueError('other must be a Set instance') - for item in other.items: - if item not in self.items: - return False - return True diff --git a/lib/dns/tokenizer.py b/lib/dns/tokenizer.py deleted file mode 100644 index 880b71ce..00000000 --- a/lib/dns/tokenizer.py +++ /dev/null @@ -1,571 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Tokenize DNS master file format""" - -from io import StringIO -import sys - -import dns.exception -import dns.name -import dns.ttl -from ._compat import long, text_type, binary_type - -_DELIMITERS = { - ' ': True, - '\t': True, - '\n': True, - ';': True, - '(': True, - ')': True, - '"': True} - -_QUOTING_DELIMITERS = {'"': True} - -EOF = 0 -EOL = 1 -WHITESPACE = 2 -IDENTIFIER = 3 -QUOTED_STRING = 4 -COMMENT = 5 -DELIMITER = 6 - - -class UngetBufferFull(dns.exception.DNSException): - """An attempt was made to unget a token when the unget buffer was full.""" - - -class Token(object): - """A DNS master file format token. - - ttype: The token type - value: The token value - has_escape: Does the token value contain escapes? - """ - - def __init__(self, ttype, value='', has_escape=False): - """Initialize a token instance.""" - - self.ttype = ttype - self.value = value - self.has_escape = has_escape - - def is_eof(self): - return self.ttype == EOF - - def is_eol(self): - return self.ttype == EOL - - def is_whitespace(self): - return self.ttype == WHITESPACE - - def is_identifier(self): - return self.ttype == IDENTIFIER - - def is_quoted_string(self): - return self.ttype == QUOTED_STRING - - def is_comment(self): - return self.ttype == COMMENT - - def is_delimiter(self): - return self.ttype == DELIMITER - - def is_eol_or_eof(self): - return self.ttype == EOL or self.ttype == EOF - - def __eq__(self, other): - if not isinstance(other, Token): - return False - return (self.ttype == other.ttype and - self.value == other.value) - - def __ne__(self, other): - if not isinstance(other, Token): - return True - return (self.ttype != other.ttype or - self.value != other.value) - - def __str__(self): - return '%d "%s"' % (self.ttype, self.value) - - def unescape(self): - if not self.has_escape: - return self - unescaped = '' - l = len(self.value) - i = 0 - while i < l: - c = self.value[i] - i += 1 - if c == '\\': - if i >= l: - raise dns.exception.UnexpectedEnd - c = self.value[i] - i += 1 - if c.isdigit(): - if i >= l: - raise dns.exception.UnexpectedEnd - c2 = self.value[i] - i += 1 - if i >= l: - raise dns.exception.UnexpectedEnd - c3 = self.value[i] - i += 1 - if not (c2.isdigit() and c3.isdigit()): - raise dns.exception.SyntaxError - c = chr(int(c) * 100 + int(c2) * 10 + int(c3)) - unescaped += c - return Token(self.ttype, unescaped) - - # compatibility for old-style tuple tokens - - def __len__(self): - return 2 - - def __iter__(self): - return iter((self.ttype, self.value)) - - def __getitem__(self, i): - if i == 0: - return self.ttype - elif i == 1: - return self.value - else: - raise IndexError - - -class Tokenizer(object): - """A DNS master file format tokenizer. - - A token object is basically a (type, value) tuple. The valid - types are EOF, EOL, WHITESPACE, IDENTIFIER, QUOTED_STRING, - COMMENT, and DELIMITER. - - file: The file to tokenize - - ungotten_char: The most recently ungotten character, or None. - - ungotten_token: The most recently ungotten token, or None. - - multiline: The current multiline level. This value is increased - by one every time a '(' delimiter is read, and decreased by one every time - a ')' delimiter is read. - - quoting: This variable is true if the tokenizer is currently - reading a quoted string. - - eof: This variable is true if the tokenizer has encountered EOF. - - delimiters: The current delimiter dictionary. - - line_number: The current line number - - filename: A filename that will be returned by the where() method. - """ - - def __init__(self, f=sys.stdin, filename=None): - """Initialize a tokenizer instance. - - f: The file to tokenize. The default is sys.stdin. - This parameter may also be a string, in which case the tokenizer - will take its input from the contents of the string. - - filename: the name of the filename that the where() method - will return. - """ - - if isinstance(f, text_type): - f = StringIO(f) - if filename is None: - filename = '<string>' - elif isinstance(f, binary_type): - f = StringIO(f.decode()) - if filename is None: - filename = '<string>' - else: - if filename is None: - if f is sys.stdin: - filename = '<stdin>' - else: - filename = '<file>' - self.file = f - self.ungotten_char = None - self.ungotten_token = None - self.multiline = 0 - self.quoting = False - self.eof = False - self.delimiters = _DELIMITERS - self.line_number = 1 - self.filename = filename - - def _get_char(self): - """Read a character from input. - """ - - if self.ungotten_char is None: - if self.eof: - c = '' - else: - c = self.file.read(1) - if c == '': - self.eof = True - elif c == '\n': - self.line_number += 1 - else: - c = self.ungotten_char - self.ungotten_char = None - return c - - def where(self): - """Return the current location in the input. - - Returns a (string, int) tuple. The first item is the filename of - the input, the second is the current line number. - """ - - return (self.filename, self.line_number) - - def _unget_char(self, c): - """Unget a character. - - The unget buffer for characters is only one character large; it is - an error to try to unget a character when the unget buffer is not - empty. - - c: the character to unget - raises UngetBufferFull: there is already an ungotten char - """ - - if self.ungotten_char is not None: - raise UngetBufferFull - self.ungotten_char = c - - def skip_whitespace(self): - """Consume input until a non-whitespace character is encountered. - - The non-whitespace character is then ungotten, and the number of - whitespace characters consumed is returned. - - If the tokenizer is in multiline mode, then newlines are whitespace. - - Returns the number of characters skipped. - """ - - skipped = 0 - while True: - c = self._get_char() - if c != ' ' and c != '\t': - if (c != '\n') or not self.multiline: - self._unget_char(c) - return skipped - skipped += 1 - - def get(self, want_leading=False, want_comment=False): - """Get the next token. - - want_leading: If True, return a WHITESPACE token if the - first character read is whitespace. The default is False. - - want_comment: If True, return a COMMENT token if the - first token read is a comment. The default is False. - - Raises dns.exception.UnexpectedEnd: input ended prematurely - - Raises dns.exception.SyntaxError: input was badly formed - - Returns a Token. - """ - - if self.ungotten_token is not None: - token = self.ungotten_token - self.ungotten_token = None - if token.is_whitespace(): - if want_leading: - return token - elif token.is_comment(): - if want_comment: - return token - else: - return token - skipped = self.skip_whitespace() - if want_leading and skipped > 0: - return Token(WHITESPACE, ' ') - token = '' - ttype = IDENTIFIER - has_escape = False - while True: - c = self._get_char() - if c == '' or c in self.delimiters: - if c == '' and self.quoting: - raise dns.exception.UnexpectedEnd - if token == '' and ttype != QUOTED_STRING: - if c == '(': - self.multiline += 1 - self.skip_whitespace() - continue - elif c == ')': - if self.multiline <= 0: - raise dns.exception.SyntaxError - self.multiline -= 1 - self.skip_whitespace() - continue - elif c == '"': - if not self.quoting: - self.quoting = True - self.delimiters = _QUOTING_DELIMITERS - ttype = QUOTED_STRING - continue - else: - self.quoting = False - self.delimiters = _DELIMITERS - self.skip_whitespace() - continue - elif c == '\n': - return Token(EOL, '\n') - elif c == ';': - while 1: - c = self._get_char() - if c == '\n' or c == '': - break - token += c - if want_comment: - self._unget_char(c) - return Token(COMMENT, token) - elif c == '': - if self.multiline: - raise dns.exception.SyntaxError( - 'unbalanced parentheses') - return Token(EOF) - elif self.multiline: - self.skip_whitespace() - token = '' - continue - else: - return Token(EOL, '\n') - else: - # This code exists in case we ever want a - # delimiter to be returned. It never produces - # a token currently. - token = c - ttype = DELIMITER - else: - self._unget_char(c) - break - elif self.quoting: - if c == '\\': - c = self._get_char() - if c == '': - raise dns.exception.UnexpectedEnd - if c.isdigit(): - c2 = self._get_char() - if c2 == '': - raise dns.exception.UnexpectedEnd - c3 = self._get_char() - if c == '': - raise dns.exception.UnexpectedEnd - if not (c2.isdigit() and c3.isdigit()): - raise dns.exception.SyntaxError - c = chr(int(c) * 100 + int(c2) * 10 + int(c3)) - elif c == '\n': - raise dns.exception.SyntaxError('newline in quoted string') - elif c == '\\': - # - # It's an escape. Put it and the next character into - # the token; it will be checked later for goodness. - # - token += c - has_escape = True - c = self._get_char() - if c == '' or c == '\n': - raise dns.exception.UnexpectedEnd - token += c - if token == '' and ttype != QUOTED_STRING: - if self.multiline: - raise dns.exception.SyntaxError('unbalanced parentheses') - ttype = EOF - return Token(ttype, token, has_escape) - - def unget(self, token): - """Unget a token. - - The unget buffer for tokens is only one token large; it is - an error to try to unget a token when the unget buffer is not - empty. - - token: the token to unget - - Raises UngetBufferFull: there is already an ungotten token - """ - - if self.ungotten_token is not None: - raise UngetBufferFull - self.ungotten_token = token - - def next(self): - """Return the next item in an iteration. - - Returns a Token. - """ - - token = self.get() - if token.is_eof(): - raise StopIteration - return token - - __next__ = next - - def __iter__(self): - return self - - # Helpers - - def get_int(self, base=10): - """Read the next token and interpret it as an integer. - - Raises dns.exception.SyntaxError if not an integer. - - Returns an int. - """ - - token = self.get().unescape() - if not token.is_identifier(): - raise dns.exception.SyntaxError('expecting an identifier') - if not token.value.isdigit(): - raise dns.exception.SyntaxError('expecting an integer') - return int(token.value, base) - - def get_uint8(self): - """Read the next token and interpret it as an 8-bit unsigned - integer. - - Raises dns.exception.SyntaxError if not an 8-bit unsigned integer. - - Returns an int. - """ - - value = self.get_int() - if value < 0 or value > 255: - raise dns.exception.SyntaxError( - '%d is not an unsigned 8-bit integer' % value) - return value - - def get_uint16(self, base=10): - """Read the next token and interpret it as a 16-bit unsigned - integer. - - Raises dns.exception.SyntaxError if not a 16-bit unsigned integer. - - Returns an int. - """ - - value = self.get_int(base=base) - if value < 0 or value > 65535: - if base == 8: - raise dns.exception.SyntaxError( - '%o is not an octal unsigned 16-bit integer' % value) - else: - raise dns.exception.SyntaxError( - '%d is not an unsigned 16-bit integer' % value) - return value - - def get_uint32(self): - """Read the next token and interpret it as a 32-bit unsigned - integer. - - Raises dns.exception.SyntaxError if not a 32-bit unsigned integer. - - Returns an int. - """ - - token = self.get().unescape() - if not token.is_identifier(): - raise dns.exception.SyntaxError('expecting an identifier') - if not token.value.isdigit(): - raise dns.exception.SyntaxError('expecting an integer') - value = long(token.value) - if value < 0 or value > long(4294967296): - raise dns.exception.SyntaxError( - '%d is not an unsigned 32-bit integer' % value) - return value - - def get_string(self, origin=None): - """Read the next token and interpret it as a string. - - Raises dns.exception.SyntaxError if not a string. - - Returns a string. - """ - - token = self.get().unescape() - if not (token.is_identifier() or token.is_quoted_string()): - raise dns.exception.SyntaxError('expecting a string') - return token.value - - def get_identifier(self, origin=None): - """Read the next token, which should be an identifier. - - Raises dns.exception.SyntaxError if not an identifier. - - Returns a string. - """ - - token = self.get().unescape() - if not token.is_identifier(): - raise dns.exception.SyntaxError('expecting an identifier') - return token.value - - def get_name(self, origin=None): - """Read the next token and interpret it as a DNS name. - - Raises dns.exception.SyntaxError if not a name. - - Returns a dns.name.Name. - """ - - token = self.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError('expecting an identifier') - return dns.name.from_text(token.value, origin) - - def get_eol(self): - """Read the next token and raise an exception if it isn't EOL or - EOF. - - Returns a string. - """ - - token = self.get() - if not token.is_eol_or_eof(): - raise dns.exception.SyntaxError( - 'expected EOL or EOF, got %d "%s"' % (token.ttype, - token.value)) - return token.value - - def get_ttl(self): - """Read the next token and interpret it as a DNS TTL. - - Raises dns.exception.SyntaxError or dns.ttl.BadTTL if not an - identifier or badly formed. - - Returns an int. - """ - - token = self.get().unescape() - if not token.is_identifier(): - raise dns.exception.SyntaxError('expecting an identifier') - return dns.ttl.from_text(token.value) diff --git a/lib/dns/tsig.py b/lib/dns/tsig.py deleted file mode 100644 index 3daa3878..00000000 --- a/lib/dns/tsig.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS TSIG support.""" - -import hashlib -import hmac -import struct - -import dns.exception -import dns.rdataclass -import dns.name -from ._compat import long, string_types, text_type - -class BadTime(dns.exception.DNSException): - - """The current time is not within the TSIG's validity time.""" - - -class BadSignature(dns.exception.DNSException): - - """The TSIG signature fails to verify.""" - - -class PeerError(dns.exception.DNSException): - - """Base class for all TSIG errors generated by the remote peer""" - - -class PeerBadKey(PeerError): - - """The peer didn't know the key we used""" - - -class PeerBadSignature(PeerError): - - """The peer didn't like the signature we sent""" - - -class PeerBadTime(PeerError): - - """The peer didn't like the time we sent""" - - -class PeerBadTruncation(PeerError): - - """The peer didn't like amount of truncation in the TSIG we sent""" - -# TSIG Algorithms - -HMAC_MD5 = dns.name.from_text("HMAC-MD5.SIG-ALG.REG.INT") -HMAC_SHA1 = dns.name.from_text("hmac-sha1") -HMAC_SHA224 = dns.name.from_text("hmac-sha224") -HMAC_SHA256 = dns.name.from_text("hmac-sha256") -HMAC_SHA384 = dns.name.from_text("hmac-sha384") -HMAC_SHA512 = dns.name.from_text("hmac-sha512") - -_hashes = { - HMAC_SHA224: hashlib.sha224, - HMAC_SHA256: hashlib.sha256, - HMAC_SHA384: hashlib.sha384, - HMAC_SHA512: hashlib.sha512, - HMAC_SHA1: hashlib.sha1, - HMAC_MD5: hashlib.md5, -} - -default_algorithm = HMAC_MD5 - -BADSIG = 16 -BADKEY = 17 -BADTIME = 18 -BADTRUNC = 22 - - -def sign(wire, keyname, secret, time, fudge, original_id, error, - other_data, request_mac, ctx=None, multi=False, first=True, - algorithm=default_algorithm): - """Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata - for the input parameters, the HMAC MAC calculated by applying the - TSIG signature algorithm, and the TSIG digest context. - @rtype: (string, string, hmac.HMAC object) - @raises ValueError: I{other_data} is too long - @raises NotImplementedError: I{algorithm} is not supported - """ - - if isinstance(other_data, text_type): - other_data = other_data.encode() - (algorithm_name, digestmod) = get_algorithm(algorithm) - if first: - ctx = hmac.new(secret, digestmod=digestmod) - ml = len(request_mac) - if ml > 0: - ctx.update(struct.pack('!H', ml)) - ctx.update(request_mac) - id = struct.pack('!H', original_id) - ctx.update(id) - ctx.update(wire[2:]) - if first: - ctx.update(keyname.to_digestable()) - ctx.update(struct.pack('!H', dns.rdataclass.ANY)) - ctx.update(struct.pack('!I', 0)) - long_time = time + long(0) - upper_time = (long_time >> 32) & long(0xffff) - lower_time = long_time & long(0xffffffff) - time_mac = struct.pack('!HIH', upper_time, lower_time, fudge) - pre_mac = algorithm_name + time_mac - ol = len(other_data) - if ol > 65535: - raise ValueError('TSIG Other Data is > 65535 bytes') - post_mac = struct.pack('!HH', error, ol) + other_data - if first: - ctx.update(pre_mac) - ctx.update(post_mac) - else: - ctx.update(time_mac) - mac = ctx.digest() - mpack = struct.pack('!H', len(mac)) - tsig_rdata = pre_mac + mpack + mac + id + post_mac - if multi: - ctx = hmac.new(secret, digestmod=digestmod) - ml = len(mac) - ctx.update(struct.pack('!H', ml)) - ctx.update(mac) - else: - ctx = None - return (tsig_rdata, mac, ctx) - - -def hmac_md5(wire, keyname, secret, time, fudge, original_id, error, - other_data, request_mac, ctx=None, multi=False, first=True, - algorithm=default_algorithm): - return sign(wire, keyname, secret, time, fudge, original_id, error, - other_data, request_mac, ctx, multi, first, algorithm) - - -def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata, - tsig_rdlen, ctx=None, multi=False, first=True): - """Validate the specified TSIG rdata against the other input parameters. - - @raises FormError: The TSIG is badly formed. - @raises BadTime: There is too much time skew between the client and the - server. - @raises BadSignature: The TSIG signature did not validate - @rtype: hmac.HMAC object""" - - (adcount,) = struct.unpack("!H", wire[10:12]) - if adcount == 0: - raise dns.exception.FormError - adcount -= 1 - new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start] - current = tsig_rdata - (aname, used) = dns.name.from_wire(wire, current) - current = current + used - (upper_time, lower_time, fudge, mac_size) = \ - struct.unpack("!HIHH", wire[current:current + 10]) - time = ((upper_time + long(0)) << 32) + (lower_time + long(0)) - current += 10 - mac = wire[current:current + mac_size] - current += mac_size - (original_id, error, other_size) = \ - struct.unpack("!HHH", wire[current:current + 6]) - current += 6 - other_data = wire[current:current + other_size] - current += other_size - if current != tsig_rdata + tsig_rdlen: - raise dns.exception.FormError - if error != 0: - if error == BADSIG: - raise PeerBadSignature - elif error == BADKEY: - raise PeerBadKey - elif error == BADTIME: - raise PeerBadTime - elif error == BADTRUNC: - raise PeerBadTruncation - else: - raise PeerError('unknown TSIG error code %d' % error) - time_low = time - fudge - time_high = time + fudge - if now < time_low or now > time_high: - raise BadTime - (junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge, - original_id, error, other_data, - request_mac, ctx, multi, first, aname) - if our_mac != mac: - raise BadSignature - return ctx - - -def get_algorithm(algorithm): - """Returns the wire format string and the hash module to use for the - specified TSIG algorithm - - @rtype: (string, hash constructor) - @raises NotImplementedError: I{algorithm} is not supported - """ - - if isinstance(algorithm, string_types): - algorithm = dns.name.from_text(algorithm) - - try: - return (algorithm.to_digestable(), _hashes[algorithm]) - except KeyError: - raise NotImplementedError("TSIG algorithm " + str(algorithm) + - " is not supported") - - -def get_algorithm_and_mac(wire, tsig_rdata, tsig_rdlen): - """Return the tsig algorithm for the specified tsig_rdata - @raises FormError: The TSIG is badly formed. - """ - current = tsig_rdata - (aname, used) = dns.name.from_wire(wire, current) - current = current + used - (upper_time, lower_time, fudge, mac_size) = \ - struct.unpack("!HIHH", wire[current:current + 10]) - current += 10 - mac = wire[current:current + mac_size] - current += mac_size - if current > tsig_rdata + tsig_rdlen: - raise dns.exception.FormError - return (aname, mac) diff --git a/lib/dns/tsigkeyring.py b/lib/dns/tsigkeyring.py deleted file mode 100644 index 5e5fe1cb..00000000 --- a/lib/dns/tsigkeyring.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""A place to store TSIG keys.""" - -from dns._compat import maybe_decode, maybe_encode - -import base64 - -import dns.name - - -def from_text(textring): - """Convert a dictionary containing (textual DNS name, base64 secret) pairs - into a binary keyring which has (dns.name.Name, binary secret) pairs. - @rtype: dict""" - - keyring = {} - for keytext in textring: - keyname = dns.name.from_text(keytext) - secret = base64.decodestring(maybe_encode(textring[keytext])) - keyring[keyname] = secret - return keyring - - -def to_text(keyring): - """Convert a dictionary containing (dns.name.Name, binary secret) pairs - into a text keyring which has (textual DNS name, base64 secret) pairs. - @rtype: dict""" - - textring = {} - for keyname in keyring: - keytext = maybe_decode(keyname.to_text()) - secret = maybe_decode(base64.encodestring(keyring[keyname])) - textring[keytext] = secret - return textring diff --git a/lib/dns/tsigkeyring.pyi b/lib/dns/tsigkeyring.pyi deleted file mode 100644 index b5d51e15..00000000 --- a/lib/dns/tsigkeyring.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from typing import Dict -from . import name - -def from_text(textring : Dict[str,str]) -> Dict[name.Name,bytes]: - ... -def to_text(keyring : Dict[name.Name,bytes]) -> Dict[str, str]: - ... diff --git a/lib/dns/ttl.py b/lib/dns/ttl.py deleted file mode 100644 index 4be16bee..00000000 --- a/lib/dns/ttl.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS TTL conversion.""" - -import dns.exception -from ._compat import long - - -class BadTTL(dns.exception.SyntaxError): - """DNS TTL value is not well-formed.""" - - -def from_text(text): - """Convert the text form of a TTL to an integer. - - The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported. - - *text*, a ``text``, the textual TTL. - - Raises ``dns.ttl.BadTTL`` if the TTL is not well-formed. - - Returns an ``int``. - """ - - if text.isdigit(): - total = long(text) - else: - if not text[0].isdigit(): - raise BadTTL - total = long(0) - current = long(0) - for c in text: - if c.isdigit(): - current *= 10 - current += long(c) - else: - c = c.lower() - if c == 'w': - total += current * long(604800) - elif c == 'd': - total += current * long(86400) - elif c == 'h': - total += current * long(3600) - elif c == 'm': - total += current * long(60) - elif c == 's': - total += current - else: - raise BadTTL("unknown unit '%s'" % c) - current = 0 - if not current == 0: - raise BadTTL("trailing integer") - if total < long(0) or total > long(2147483647): - raise BadTTL("TTL should be between 0 and 2^31 - 1 (inclusive)") - return total diff --git a/lib/dns/update.py b/lib/dns/update.py deleted file mode 100644 index 96a00d5d..00000000 --- a/lib/dns/update.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Dynamic Update Support""" - - -import dns.message -import dns.name -import dns.opcode -import dns.rdata -import dns.rdataclass -import dns.rdataset -import dns.tsig -from ._compat import string_types - - -class Update(dns.message.Message): - - def __init__(self, zone, rdclass=dns.rdataclass.IN, keyring=None, - keyname=None, keyalgorithm=dns.tsig.default_algorithm): - """Initialize a new DNS Update object. - - See the documentation of the Message class for a complete - description of the keyring dictionary. - - *zone*, a ``dns.name.Name`` or ``text``, the zone which is being - updated. - - *rdclass*, an ``int`` or ``text``, the class of the zone. - - *keyring*, a ``dict``, the TSIG keyring to use. If a - *keyring* is specified but a *keyname* is not, then the key - used will be the first key in the *keyring*. Note that the - order of keys in a dictionary is not defined, so applications - should supply a keyname when a keyring is used, unless they - know the keyring contains only one key. - - *keyname*, a ``dns.name.Name`` or ``None``, the name of the TSIG key - to use; defaults to ``None``. The key must be defined in the keyring. - - *keyalgorithm*, a ``dns.name.Name``, the TSIG algorithm to use. - """ - super(Update, self).__init__() - self.flags |= dns.opcode.to_flags(dns.opcode.UPDATE) - if isinstance(zone, string_types): - zone = dns.name.from_text(zone) - self.origin = zone - if isinstance(rdclass, string_types): - rdclass = dns.rdataclass.from_text(rdclass) - self.zone_rdclass = rdclass - self.find_rrset(self.question, self.origin, rdclass, dns.rdatatype.SOA, - create=True, force_unique=True) - if keyring is not None: - self.use_tsig(keyring, keyname, algorithm=keyalgorithm) - - def _add_rr(self, name, ttl, rd, deleting=None, section=None): - """Add a single RR to the update section.""" - - if section is None: - section = self.authority - covers = rd.covers() - rrset = self.find_rrset(section, name, self.zone_rdclass, rd.rdtype, - covers, deleting, True, True) - rrset.add(rd, ttl) - - def _add(self, replace, section, name, *args): - """Add records. - - *replace* is the replacement mode. If ``False``, - RRs are added to an existing RRset; if ``True``, the RRset - is replaced with the specified contents. The second - argument is the section to add to. The third argument - is always a name. The other arguments can be: - - - rdataset... - - - ttl, rdata... - - - ttl, rdtype, string... - """ - - if isinstance(name, string_types): - name = dns.name.from_text(name, None) - if isinstance(args[0], dns.rdataset.Rdataset): - for rds in args: - if replace: - self.delete(name, rds.rdtype) - for rd in rds: - self._add_rr(name, rds.ttl, rd, section=section) - else: - args = list(args) - ttl = int(args.pop(0)) - if isinstance(args[0], dns.rdata.Rdata): - if replace: - self.delete(name, args[0].rdtype) - for rd in args: - self._add_rr(name, ttl, rd, section=section) - else: - rdtype = args.pop(0) - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - if replace: - self.delete(name, rdtype) - for s in args: - rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s, - self.origin) - self._add_rr(name, ttl, rd, section=section) - - def add(self, name, *args): - """Add records. - - The first argument is always a name. The other - arguments can be: - - - rdataset... - - - ttl, rdata... - - - ttl, rdtype, string... - """ - - self._add(False, self.authority, name, *args) - - def delete(self, name, *args): - """Delete records. - - The first argument is always a name. The other - arguments can be: - - - *empty* - - - rdataset... - - - rdata... - - - rdtype, [string...] - """ - - if isinstance(name, string_types): - name = dns.name.from_text(name, None) - if len(args) == 0: - self.find_rrset(self.authority, name, dns.rdataclass.ANY, - dns.rdatatype.ANY, dns.rdatatype.NONE, - dns.rdatatype.ANY, True, True) - elif isinstance(args[0], dns.rdataset.Rdataset): - for rds in args: - for rd in rds: - self._add_rr(name, 0, rd, dns.rdataclass.NONE) - else: - args = list(args) - if isinstance(args[0], dns.rdata.Rdata): - for rd in args: - self._add_rr(name, 0, rd, dns.rdataclass.NONE) - else: - rdtype = args.pop(0) - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - if len(args) == 0: - self.find_rrset(self.authority, name, - self.zone_rdclass, rdtype, - dns.rdatatype.NONE, - dns.rdataclass.ANY, - True, True) - else: - for s in args: - rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s, - self.origin) - self._add_rr(name, 0, rd, dns.rdataclass.NONE) - - def replace(self, name, *args): - """Replace records. - - The first argument is always a name. The other - arguments can be: - - - rdataset... - - - ttl, rdata... - - - ttl, rdtype, string... - - Note that if you want to replace the entire node, you should do - a delete of the name followed by one or more calls to add. - """ - - self._add(True, self.authority, name, *args) - - def present(self, name, *args): - """Require that an owner name (and optionally an rdata type, - or specific rdataset) exists as a prerequisite to the - execution of the update. - - The first argument is always a name. - The other arguments can be: - - - rdataset... - - - rdata... - - - rdtype, string... - """ - - if isinstance(name, string_types): - name = dns.name.from_text(name, None) - if len(args) == 0: - self.find_rrset(self.answer, name, - dns.rdataclass.ANY, dns.rdatatype.ANY, - dns.rdatatype.NONE, None, - True, True) - elif isinstance(args[0], dns.rdataset.Rdataset) or \ - isinstance(args[0], dns.rdata.Rdata) or \ - len(args) > 1: - if not isinstance(args[0], dns.rdataset.Rdataset): - # Add a 0 TTL - args = list(args) - args.insert(0, 0) - self._add(False, self.answer, name, *args) - else: - rdtype = args[0] - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - self.find_rrset(self.answer, name, - dns.rdataclass.ANY, rdtype, - dns.rdatatype.NONE, None, - True, True) - - def absent(self, name, rdtype=None): - """Require that an owner name (and optionally an rdata type) does - not exist as a prerequisite to the execution of the update.""" - - if isinstance(name, string_types): - name = dns.name.from_text(name, None) - if rdtype is None: - self.find_rrset(self.answer, name, - dns.rdataclass.NONE, dns.rdatatype.ANY, - dns.rdatatype.NONE, None, - True, True) - else: - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - self.find_rrset(self.answer, name, - dns.rdataclass.NONE, rdtype, - dns.rdatatype.NONE, None, - True, True) - - def to_wire(self, origin=None, max_size=65535): - """Return a string containing the update in DNS compressed wire - format. - - *origin*, a ``dns.name.Name`` or ``None``, the origin to be - appended to any relative names. If *origin* is ``None``, then - the origin of the ``dns.update.Update`` message object is used - (i.e. the *zone* parameter passed when the Update object was - created). - - *max_size*, an ``int``, the maximum size of the wire format - output; default is 0, which means "the message's request - payload, if nonzero, or 65535". - - Returns a ``binary``. - """ - - if origin is None: - origin = self.origin - return super(Update, self).to_wire(origin, max_size) diff --git a/lib/dns/update.pyi b/lib/dns/update.pyi deleted file mode 100644 index eeac0591..00000000 --- a/lib/dns/update.pyi +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Optional,Dict,Union,Any - -from . import message, tsig, rdataclass, name - -class Update(message.Message): - def __init__(self, zone : Union[name.Name, str], rdclass : Union[int,str] = rdataclass.IN, keyring : Optional[Dict[name.Name,bytes]] = None, - keyname : Optional[name.Name] = None, keyalgorithm : Optional[name.Name] = tsig.default_algorithm) -> None: - self.id : int - def add(self, name : Union[str,name.Name], *args : Any): - ... - def delete(self, name, *args : Any): - ... - def replace(self, name : Union[str,name.Name], *args : Any): - ... - def present(self, name : Union[str,name.Name], *args : Any): - ... - def absent(self, name : Union[str,name.Name], rdtype=None): - """Require that an owner name (and optionally an rdata type) does - not exist as a prerequisite to the execution of the update.""" - def to_wire(self, origin : Optional[name.Name] = None, max_size=65535, **kw) -> bytes: - ... diff --git a/lib/dns/version.py b/lib/dns/version.py deleted file mode 100644 index f116904b..00000000 --- a/lib/dns/version.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""dnspython release version information.""" - -#: MAJOR -MAJOR = 1 -#: MINOR -MINOR = 16 -#: MICRO -MICRO = 0 -#: RELEASELEVEL -RELEASELEVEL = 0x0f -#: SERIAL -SERIAL = 0 - -if RELEASELEVEL == 0x0f: - #: version - version = '%d.%d.%d' % (MAJOR, MINOR, MICRO) -elif RELEASELEVEL == 0x00: - version = '%d.%d.%dx%d' % \ - (MAJOR, MINOR, MICRO, SERIAL) -else: - version = '%d.%d.%d%x%d' % \ - (MAJOR, MINOR, MICRO, RELEASELEVEL, SERIAL) - -#: hexversion -hexversion = MAJOR << 24 | MINOR << 16 | MICRO << 8 | RELEASELEVEL << 4 | \ - SERIAL diff --git a/lib/dns/wiredata.py b/lib/dns/wiredata.py deleted file mode 100644 index ea3c1e67..00000000 --- a/lib/dns/wiredata.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2011,2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Wire Data Helper""" - -import dns.exception -from ._compat import binary_type, string_types, PY2 - -# Figure out what constant python passes for an unspecified slice bound. -# It's supposed to be sys.maxint, yet on 64-bit windows sys.maxint is 2^31 - 1 -# but Python uses 2^63 - 1 as the constant. Rather than making pointless -# extra comparisons, duplicating code, or weakening WireData, we just figure -# out what constant Python will use. - - -class _SliceUnspecifiedBound(binary_type): - - def __getitem__(self, key): - return key.stop - - if PY2: - def __getslice__(self, i, j): # pylint: disable=getslice-method - return self.__getitem__(slice(i, j)) - -_unspecified_bound = _SliceUnspecifiedBound()[1:] - - -class WireData(binary_type): - # WireData is a binary type with stricter slicing - - def __getitem__(self, key): - try: - if isinstance(key, slice): - # make sure we are not going outside of valid ranges, - # do stricter control of boundaries than python does - # by default - start = key.start - stop = key.stop - - if PY2: - if stop == _unspecified_bound: - # handle the case where the right bound is unspecified - stop = len(self) - - if start < 0 or stop < 0: - raise dns.exception.FormError - # If it's not an empty slice, access left and right bounds - # to make sure they're valid - if start != stop: - super(WireData, self).__getitem__(start) - super(WireData, self).__getitem__(stop - 1) - else: - for index in (start, stop): - if index is None: - continue - elif abs(index) > len(self): - raise dns.exception.FormError - - return WireData(super(WireData, self).__getitem__( - slice(start, stop))) - return bytearray(self.unwrap())[key] - except IndexError: - raise dns.exception.FormError - - if PY2: - def __getslice__(self, i, j): # pylint: disable=getslice-method - return self.__getitem__(slice(i, j)) - - def __iter__(self): - i = 0 - while 1: - try: - yield self[i] - i += 1 - except dns.exception.FormError: - raise StopIteration - - def unwrap(self): - return binary_type(self) - - -def maybe_wrap(wire): - if isinstance(wire, WireData): - return wire - elif isinstance(wire, binary_type): - return WireData(wire) - elif isinstance(wire, string_types): - return WireData(wire.encode()) - raise ValueError("unhandled type %s" % type(wire)) diff --git a/lib/dns/zone.py b/lib/dns/zone.py deleted file mode 100644 index 1e2fe781..00000000 --- a/lib/dns/zone.py +++ /dev/null @@ -1,1127 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""DNS Zones.""" - -from __future__ import generators - -import sys -import re -import os -from io import BytesIO - -import dns.exception -import dns.name -import dns.node -import dns.rdataclass -import dns.rdatatype -import dns.rdata -import dns.rdtypes.ANY.SOA -import dns.rrset -import dns.tokenizer -import dns.ttl -import dns.grange -from ._compat import string_types, text_type, PY3 - - -class BadZone(dns.exception.DNSException): - - """The DNS zone is malformed.""" - - -class NoSOA(BadZone): - - """The DNS zone has no SOA RR at its origin.""" - - -class NoNS(BadZone): - - """The DNS zone has no NS RRset at its origin.""" - - -class UnknownOrigin(BadZone): - - """The DNS zone's origin is unknown.""" - - -class Zone(object): - - """A DNS zone. - - A Zone is a mapping from names to nodes. The zone object may be - treated like a Python dictionary, e.g. zone[name] will retrieve - the node associated with that name. The I{name} may be a - dns.name.Name object, or it may be a string. In the either case, - if the name is relative it is treated as relative to the origin of - the zone. - - @ivar rdclass: The zone's rdata class; the default is class IN. - @type rdclass: int - @ivar origin: The origin of the zone. - @type origin: dns.name.Name object - @ivar nodes: A dictionary mapping the names of nodes in the zone to the - nodes themselves. - @type nodes: dict - @ivar relativize: should names in the zone be relativized? - @type relativize: bool - @cvar node_factory: the factory used to create a new node - @type node_factory: class or callable - """ - - node_factory = dns.node.Node - - __slots__ = ['rdclass', 'origin', 'nodes', 'relativize'] - - def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True): - """Initialize a zone object. - - @param origin: The origin of the zone. - @type origin: dns.name.Name object - @param rdclass: The zone's rdata class; the default is class IN. - @type rdclass: int""" - - if origin is not None: - if isinstance(origin, string_types): - origin = dns.name.from_text(origin) - elif not isinstance(origin, dns.name.Name): - raise ValueError("origin parameter must be convertible to a " - "DNS name") - if not origin.is_absolute(): - raise ValueError("origin parameter must be an absolute name") - self.origin = origin - self.rdclass = rdclass - self.nodes = {} - self.relativize = relativize - - def __eq__(self, other): - """Two zones are equal if they have the same origin, class, and - nodes. - @rtype: bool - """ - - if not isinstance(other, Zone): - return False - if self.rdclass != other.rdclass or \ - self.origin != other.origin or \ - self.nodes != other.nodes: - return False - return True - - def __ne__(self, other): - """Are two zones not equal? - @rtype: bool - """ - - return not self.__eq__(other) - - def _validate_name(self, name): - if isinstance(name, string_types): - name = dns.name.from_text(name, None) - elif not isinstance(name, dns.name.Name): - raise KeyError("name parameter must be convertible to a DNS name") - if name.is_absolute(): - if not name.is_subdomain(self.origin): - raise KeyError( - "name parameter must be a subdomain of the zone origin") - if self.relativize: - name = name.relativize(self.origin) - return name - - def __getitem__(self, key): - key = self._validate_name(key) - return self.nodes[key] - - def __setitem__(self, key, value): - key = self._validate_name(key) - self.nodes[key] = value - - def __delitem__(self, key): - key = self._validate_name(key) - del self.nodes[key] - - def __iter__(self): - return self.nodes.__iter__() - - def iterkeys(self): - if PY3: - return self.nodes.keys() # pylint: disable=dict-keys-not-iterating - else: - return self.nodes.iterkeys() # pylint: disable=dict-iter-method - - def keys(self): - return self.nodes.keys() # pylint: disable=dict-keys-not-iterating - - def itervalues(self): - if PY3: - return self.nodes.values() # pylint: disable=dict-values-not-iterating - else: - return self.nodes.itervalues() # pylint: disable=dict-iter-method - - def values(self): - return self.nodes.values() # pylint: disable=dict-values-not-iterating - - def items(self): - return self.nodes.items() # pylint: disable=dict-items-not-iterating - - iteritems = items - - def get(self, key): - key = self._validate_name(key) - return self.nodes.get(key) - - def __contains__(self, other): - return other in self.nodes - - def find_node(self, name, create=False): - """Find a node in the zone, possibly creating it. - - @param name: the name of the node to find - @type name: dns.name.Name object or string - @param create: should the node be created if it doesn't exist? - @type create: bool - @raises KeyError: the name is not known and create was not specified. - @rtype: dns.node.Node object - """ - - name = self._validate_name(name) - node = self.nodes.get(name) - if node is None: - if not create: - raise KeyError - node = self.node_factory() - self.nodes[name] = node - return node - - def get_node(self, name, create=False): - """Get a node in the zone, possibly creating it. - - This method is like L{find_node}, except it returns None instead - of raising an exception if the node does not exist and creation - has not been requested. - - @param name: the name of the node to find - @type name: dns.name.Name object or string - @param create: should the node be created if it doesn't exist? - @type create: bool - @rtype: dns.node.Node object or None - """ - - try: - node = self.find_node(name, create) - except KeyError: - node = None - return node - - def delete_node(self, name): - """Delete the specified node if it exists. - - It is not an error if the node does not exist. - """ - - name = self._validate_name(name) - if name in self.nodes: - del self.nodes[name] - - def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE, - create=False): - """Look for rdata with the specified name and type in the zone, - and return an rdataset encapsulating it. - - The I{name}, I{rdtype}, and I{covers} parameters may be - strings, in which case they will be converted to their proper - type. - - The rdataset returned is not a copy; changes to it will change - the zone. - - KeyError is raised if the name or type are not found. - Use L{get_rdataset} if you want to have None returned instead. - - @param name: the owner name to look for - @type name: DNS.name.Name object or string - @param rdtype: the rdata type desired - @type rdtype: int or string - @param covers: the covered type (defaults to None) - @type covers: int or string - @param create: should the node and rdataset be created if they do not - exist? - @type create: bool - @raises KeyError: the node or rdata could not be found - @rtype: dns.rdataset.Rdataset object - """ - - name = self._validate_name(name) - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - if isinstance(covers, string_types): - covers = dns.rdatatype.from_text(covers) - node = self.find_node(name, create) - return node.find_rdataset(self.rdclass, rdtype, covers, create) - - def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE, - create=False): - """Look for rdata with the specified name and type in the zone, - and return an rdataset encapsulating it. - - The I{name}, I{rdtype}, and I{covers} parameters may be - strings, in which case they will be converted to their proper - type. - - The rdataset returned is not a copy; changes to it will change - the zone. - - None is returned if the name or type are not found. - Use L{find_rdataset} if you want to have KeyError raised instead. - - @param name: the owner name to look for - @type name: DNS.name.Name object or string - @param rdtype: the rdata type desired - @type rdtype: int or string - @param covers: the covered type (defaults to None) - @type covers: int or string - @param create: should the node and rdataset be created if they do not - exist? - @type create: bool - @rtype: dns.rdataset.Rdataset object or None - """ - - try: - rdataset = self.find_rdataset(name, rdtype, covers, create) - except KeyError: - rdataset = None - return rdataset - - def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE): - """Delete the rdataset matching I{rdtype} and I{covers}, if it - exists at the node specified by I{name}. - - The I{name}, I{rdtype}, and I{covers} parameters may be - strings, in which case they will be converted to their proper - type. - - It is not an error if the node does not exist, or if there is no - matching rdataset at the node. - - If the node has no rdatasets after the deletion, it will itself - be deleted. - - @param name: the owner name to look for - @type name: DNS.name.Name object or string - @param rdtype: the rdata type desired - @type rdtype: int or string - @param covers: the covered type (defaults to None) - @type covers: int or string - """ - - name = self._validate_name(name) - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - if isinstance(covers, string_types): - covers = dns.rdatatype.from_text(covers) - node = self.get_node(name) - if node is not None: - node.delete_rdataset(self.rdclass, rdtype, covers) - if len(node) == 0: - self.delete_node(name) - - def replace_rdataset(self, name, replacement): - """Replace an rdataset at name. - - It is not an error if there is no rdataset matching I{replacement}. - - Ownership of the I{replacement} object is transferred to the zone; - in other words, this method does not store a copy of I{replacement} - at the node, it stores I{replacement} itself. - - If the I{name} node does not exist, it is created. - - @param name: the owner name - @type name: DNS.name.Name object or string - @param replacement: the replacement rdataset - @type replacement: dns.rdataset.Rdataset - """ - - if replacement.rdclass != self.rdclass: - raise ValueError('replacement.rdclass != zone.rdclass') - node = self.find_node(name, True) - node.replace_rdataset(replacement) - - def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE): - """Look for rdata with the specified name and type in the zone, - and return an RRset encapsulating it. - - The I{name}, I{rdtype}, and I{covers} parameters may be - strings, in which case they will be converted to their proper - type. - - This method is less efficient than the similar - L{find_rdataset} because it creates an RRset instead of - returning the matching rdataset. It may be more convenient - for some uses since it returns an object which binds the owner - name to the rdata. - - This method may not be used to create new nodes or rdatasets; - use L{find_rdataset} instead. - - KeyError is raised if the name or type are not found. - Use L{get_rrset} if you want to have None returned instead. - - @param name: the owner name to look for - @type name: DNS.name.Name object or string - @param rdtype: the rdata type desired - @type rdtype: int or string - @param covers: the covered type (defaults to None) - @type covers: int or string - @raises KeyError: the node or rdata could not be found - @rtype: dns.rrset.RRset object - """ - - name = self._validate_name(name) - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - if isinstance(covers, string_types): - covers = dns.rdatatype.from_text(covers) - rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers) - rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers) - rrset.update(rdataset) - return rrset - - def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE): - """Look for rdata with the specified name and type in the zone, - and return an RRset encapsulating it. - - The I{name}, I{rdtype}, and I{covers} parameters may be - strings, in which case they will be converted to their proper - type. - - This method is less efficient than the similar L{get_rdataset} - because it creates an RRset instead of returning the matching - rdataset. It may be more convenient for some uses since it - returns an object which binds the owner name to the rdata. - - This method may not be used to create new nodes or rdatasets; - use L{find_rdataset} instead. - - None is returned if the name or type are not found. - Use L{find_rrset} if you want to have KeyError raised instead. - - @param name: the owner name to look for - @type name: DNS.name.Name object or string - @param rdtype: the rdata type desired - @type rdtype: int or string - @param covers: the covered type (defaults to None) - @type covers: int or string - @rtype: dns.rrset.RRset object - """ - - try: - rrset = self.find_rrset(name, rdtype, covers) - except KeyError: - rrset = None - return rrset - - def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY, - covers=dns.rdatatype.NONE): - """Return a generator which yields (name, rdataset) tuples for - all rdatasets in the zone which have the specified I{rdtype} - and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default, - then all rdatasets will be matched. - - @param rdtype: int or string - @type rdtype: int or string - @param covers: the covered type (defaults to None) - @type covers: int or string - """ - - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - if isinstance(covers, string_types): - covers = dns.rdatatype.from_text(covers) - for (name, node) in self.iteritems(): # pylint: disable=dict-iter-method - for rds in node: - if rdtype == dns.rdatatype.ANY or \ - (rds.rdtype == rdtype and rds.covers == covers): - yield (name, rds) - - def iterate_rdatas(self, rdtype=dns.rdatatype.ANY, - covers=dns.rdatatype.NONE): - """Return a generator which yields (name, ttl, rdata) tuples for - all rdatas in the zone which have the specified I{rdtype} - and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default, - then all rdatas will be matched. - - @param rdtype: int or string - @type rdtype: int or string - @param covers: the covered type (defaults to None) - @type covers: int or string - """ - - if isinstance(rdtype, string_types): - rdtype = dns.rdatatype.from_text(rdtype) - if isinstance(covers, string_types): - covers = dns.rdatatype.from_text(covers) - for (name, node) in self.iteritems(): # pylint: disable=dict-iter-method - for rds in node: - if rdtype == dns.rdatatype.ANY or \ - (rds.rdtype == rdtype and rds.covers == covers): - for rdata in rds: - yield (name, rds.ttl, rdata) - - def to_file(self, f, sorted=True, relativize=True, nl=None): - """Write a zone to a file. - - @param f: file or string. If I{f} is a string, it is treated - as the name of a file to open. - @param sorted: if True, the file will be written with the - names sorted in DNSSEC order from least to greatest. Otherwise - the names will be written in whatever order they happen to have - in the zone's dictionary. - @param relativize: if True, domain names in the output will be - relativized to the zone's origin (if possible). - @type relativize: bool - @param nl: The end of line string. If not specified, the - output will use the platform's native end-of-line marker (i.e. - LF on POSIX, CRLF on Windows, CR on Macintosh). - @type nl: string or None - """ - - if isinstance(f, string_types): - f = open(f, 'wb') - want_close = True - else: - want_close = False - - # must be in this way, f.encoding may contain None, or even attribute - # may not be there - file_enc = getattr(f, 'encoding', None) - if file_enc is None: - file_enc = 'utf-8' - - if nl is None: - nl_b = os.linesep.encode(file_enc) # binary mode, '\n' is not enough - nl = u'\n' - elif isinstance(nl, string_types): - nl_b = nl.encode(file_enc) - else: - nl_b = nl - nl = nl.decode() - - try: - if sorted: - names = list(self.keys()) - names.sort() - else: - names = self.iterkeys() # pylint: disable=dict-iter-method - for n in names: - l = self[n].to_text(n, origin=self.origin, - relativize=relativize) - if isinstance(l, text_type): - l_b = l.encode(file_enc) - else: - l_b = l - l = l.decode() - - try: - f.write(l_b) - f.write(nl_b) - except TypeError: # textual mode - f.write(l) - f.write(nl) - finally: - if want_close: - f.close() - - def to_text(self, sorted=True, relativize=True, nl=None): - """Return a zone's text as though it were written to a file. - - @param sorted: if True, the file will be written with the - names sorted in DNSSEC order from least to greatest. Otherwise - the names will be written in whatever order they happen to have - in the zone's dictionary. - @param relativize: if True, domain names in the output will be - relativized to the zone's origin (if possible). - @type relativize: bool - @param nl: The end of line string. If not specified, the - output will use the platform's native end-of-line marker (i.e. - LF on POSIX, CRLF on Windows, CR on Macintosh). - @type nl: string or None - """ - temp_buffer = BytesIO() - self.to_file(temp_buffer, sorted, relativize, nl) - return_value = temp_buffer.getvalue() - temp_buffer.close() - return return_value - - def check_origin(self): - """Do some simple checking of the zone's origin. - - @raises dns.zone.NoSOA: there is no SOA RR - @raises dns.zone.NoNS: there is no NS RRset - @raises KeyError: there is no origin node - """ - if self.relativize: - name = dns.name.empty - else: - name = self.origin - if self.get_rdataset(name, dns.rdatatype.SOA) is None: - raise NoSOA - if self.get_rdataset(name, dns.rdatatype.NS) is None: - raise NoNS - - -class _MasterReader(object): - - """Read a DNS master file - - @ivar tok: The tokenizer - @type tok: dns.tokenizer.Tokenizer object - @ivar last_ttl: The last seen explicit TTL for an RR - @type last_ttl: int - @ivar last_ttl_known: Has last TTL been detected - @type last_ttl_known: bool - @ivar default_ttl: The default TTL from a $TTL directive or SOA RR - @type default_ttl: int - @ivar default_ttl_known: Has default TTL been detected - @type default_ttl_known: bool - @ivar last_name: The last name read - @type last_name: dns.name.Name object - @ivar current_origin: The current origin - @type current_origin: dns.name.Name object - @ivar relativize: should names in the zone be relativized? - @type relativize: bool - @ivar zone: the zone - @type zone: dns.zone.Zone object - @ivar saved_state: saved reader state (used when processing $INCLUDE) - @type saved_state: list of (tokenizer, current_origin, last_name, file, - last_ttl, last_ttl_known, default_ttl, default_ttl_known) tuples. - @ivar current_file: the file object of the $INCLUDed file being parsed - (None if no $INCLUDE is active). - @ivar allow_include: is $INCLUDE allowed? - @type allow_include: bool - @ivar check_origin: should sanity checks of the origin node be done? - The default is True. - @type check_origin: bool - """ - - def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone, - allow_include=False, check_origin=True): - if isinstance(origin, string_types): - origin = dns.name.from_text(origin) - self.tok = tok - self.current_origin = origin - self.relativize = relativize - self.last_ttl = 0 - self.last_ttl_known = False - self.default_ttl = 0 - self.default_ttl_known = False - self.last_name = self.current_origin - self.zone = zone_factory(origin, rdclass, relativize=relativize) - self.saved_state = [] - self.current_file = None - self.allow_include = allow_include - self.check_origin = check_origin - - def _eat_line(self): - while 1: - token = self.tok.get() - if token.is_eol_or_eof(): - break - - def _rr_line(self): - """Process one line from a DNS master file.""" - # Name - if self.current_origin is None: - raise UnknownOrigin - token = self.tok.get(want_leading=True) - if not token.is_whitespace(): - self.last_name = dns.name.from_text( - token.value, self.current_origin) - else: - token = self.tok.get() - if token.is_eol_or_eof(): - # treat leading WS followed by EOL/EOF as if they were EOL/EOF. - return - self.tok.unget(token) - name = self.last_name - if not name.is_subdomain(self.zone.origin): - self._eat_line() - return - if self.relativize: - name = name.relativize(self.zone.origin) - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - # TTL - try: - ttl = dns.ttl.from_text(token.value) - self.last_ttl = ttl - self.last_ttl_known = True - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - except dns.ttl.BadTTL: - if not (self.last_ttl_known or self.default_ttl_known): - raise dns.exception.SyntaxError("Missing default TTL value") - if self.default_ttl_known: - ttl = self.default_ttl - else: - ttl = self.last_ttl - # Class - try: - rdclass = dns.rdataclass.from_text(token.value) - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - except dns.exception.SyntaxError: - raise dns.exception.SyntaxError - except Exception: - rdclass = self.zone.rdclass - if rdclass != self.zone.rdclass: - raise dns.exception.SyntaxError("RR class is not zone's class") - # Type - try: - rdtype = dns.rdatatype.from_text(token.value) - except: - raise dns.exception.SyntaxError( - "unknown rdatatype '%s'" % token.value) - n = self.zone.nodes.get(name) - if n is None: - n = self.zone.node_factory() - self.zone.nodes[name] = n - try: - rd = dns.rdata.from_text(rdclass, rdtype, self.tok, - self.current_origin, False) - except dns.exception.SyntaxError: - # Catch and reraise. - (ty, va) = sys.exc_info()[:2] - raise va - except: - # All exceptions that occur in the processing of rdata - # are treated as syntax errors. This is not strictly - # correct, but it is correct almost all of the time. - # We convert them to syntax errors so that we can emit - # helpful filename:line info. - (ty, va) = sys.exc_info()[:2] - raise dns.exception.SyntaxError( - "caught exception {}: {}".format(str(ty), str(va))) - - if not self.default_ttl_known and isinstance(rd, dns.rdtypes.ANY.SOA.SOA): - # The pre-RFC2308 and pre-BIND9 behavior inherits the zone default - # TTL from the SOA minttl if no $TTL statement is present before the - # SOA is parsed. - self.default_ttl = rd.minimum - self.default_ttl_known = True - - rd.choose_relativity(self.zone.origin, self.relativize) - covers = rd.covers() - rds = n.find_rdataset(rdclass, rdtype, covers, True) - rds.add(rd, ttl) - - def _parse_modify(self, side): - # Here we catch everything in '{' '}' in a group so we can replace it - # with ''. - is_generate1 = re.compile("^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$") - is_generate2 = re.compile("^.*\$({(\+|-?)(\d+)}).*$") - is_generate3 = re.compile("^.*\$({(\+|-?)(\d+),(\d+)}).*$") - # Sometimes there are modifiers in the hostname. These come after - # the dollar sign. They are in the form: ${offset[,width[,base]]}. - # Make names - g1 = is_generate1.match(side) - if g1: - mod, sign, offset, width, base = g1.groups() - if sign == '': - sign = '+' - g2 = is_generate2.match(side) - if g2: - mod, sign, offset = g2.groups() - if sign == '': - sign = '+' - width = 0 - base = 'd' - g3 = is_generate3.match(side) - if g3: - mod, sign, offset, width = g1.groups() - if sign == '': - sign = '+' - width = g1.groups()[2] - base = 'd' - - if not (g1 or g2 or g3): - mod = '' - sign = '+' - offset = 0 - width = 0 - base = 'd' - - if base != 'd': - raise NotImplementedError() - - return mod, sign, offset, width, base - - def _generate_line(self): - # range lhs [ttl] [class] type rhs [ comment ] - """Process one line containing the GENERATE statement from a DNS - master file.""" - if self.current_origin is None: - raise UnknownOrigin - - token = self.tok.get() - # Range (required) - try: - start, stop, step = dns.grange.from_text(token.value) - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - except: - raise dns.exception.SyntaxError - - # lhs (required) - try: - lhs = token.value - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - except: - raise dns.exception.SyntaxError - - # TTL - try: - ttl = dns.ttl.from_text(token.value) - self.last_ttl = ttl - self.last_ttl_known = True - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - except dns.ttl.BadTTL: - if not (self.last_ttl_known or self.default_ttl_known): - raise dns.exception.SyntaxError("Missing default TTL value") - if self.default_ttl_known: - ttl = self.default_ttl - else: - ttl = self.last_ttl - # Class - try: - rdclass = dns.rdataclass.from_text(token.value) - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - except dns.exception.SyntaxError: - raise dns.exception.SyntaxError - except Exception: - rdclass = self.zone.rdclass - if rdclass != self.zone.rdclass: - raise dns.exception.SyntaxError("RR class is not zone's class") - # Type - try: - rdtype = dns.rdatatype.from_text(token.value) - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError - except Exception: - raise dns.exception.SyntaxError("unknown rdatatype '%s'" % - token.value) - - # lhs (required) - try: - rhs = token.value - except: - raise dns.exception.SyntaxError - - lmod, lsign, loffset, lwidth, lbase = self._parse_modify(lhs) - rmod, rsign, roffset, rwidth, rbase = self._parse_modify(rhs) - for i in range(start, stop + 1, step): - # +1 because bind is inclusive and python is exclusive - - if lsign == u'+': - lindex = i + int(loffset) - elif lsign == u'-': - lindex = i - int(loffset) - - if rsign == u'-': - rindex = i - int(roffset) - elif rsign == u'+': - rindex = i + int(roffset) - - lzfindex = str(lindex).zfill(int(lwidth)) - rzfindex = str(rindex).zfill(int(rwidth)) - - name = lhs.replace(u'$%s' % (lmod), lzfindex) - rdata = rhs.replace(u'$%s' % (rmod), rzfindex) - - self.last_name = dns.name.from_text(name, self.current_origin) - name = self.last_name - if not name.is_subdomain(self.zone.origin): - self._eat_line() - return - if self.relativize: - name = name.relativize(self.zone.origin) - - n = self.zone.nodes.get(name) - if n is None: - n = self.zone.node_factory() - self.zone.nodes[name] = n - try: - rd = dns.rdata.from_text(rdclass, rdtype, rdata, - self.current_origin, False) - except dns.exception.SyntaxError: - # Catch and reraise. - (ty, va) = sys.exc_info()[:2] - raise va - except: - # All exceptions that occur in the processing of rdata - # are treated as syntax errors. This is not strictly - # correct, but it is correct almost all of the time. - # We convert them to syntax errors so that we can emit - # helpful filename:line info. - (ty, va) = sys.exc_info()[:2] - raise dns.exception.SyntaxError("caught exception %s: %s" % - (str(ty), str(va))) - - rd.choose_relativity(self.zone.origin, self.relativize) - covers = rd.covers() - rds = n.find_rdataset(rdclass, rdtype, covers, True) - rds.add(rd, ttl) - - def read(self): - """Read a DNS master file and build a zone object. - - @raises dns.zone.NoSOA: No SOA RR was found at the zone origin - @raises dns.zone.NoNS: No NS RRset was found at the zone origin - """ - - try: - while 1: - token = self.tok.get(True, True) - if token.is_eof(): - if self.current_file is not None: - self.current_file.close() - if len(self.saved_state) > 0: - (self.tok, - self.current_origin, - self.last_name, - self.current_file, - self.last_ttl, - self.last_ttl_known, - self.default_ttl, - self.default_ttl_known) = self.saved_state.pop(-1) - continue - break - elif token.is_eol(): - continue - elif token.is_comment(): - self.tok.get_eol() - continue - elif token.value[0] == u'$': - c = token.value.upper() - if c == u'$TTL': - token = self.tok.get() - if not token.is_identifier(): - raise dns.exception.SyntaxError("bad $TTL") - self.default_ttl = dns.ttl.from_text(token.value) - self.default_ttl_known = True - self.tok.get_eol() - elif c == u'$ORIGIN': - self.current_origin = self.tok.get_name() - self.tok.get_eol() - if self.zone.origin is None: - self.zone.origin = self.current_origin - elif c == u'$INCLUDE' and self.allow_include: - token = self.tok.get() - filename = token.value - token = self.tok.get() - if token.is_identifier(): - new_origin =\ - dns.name.from_text(token.value, - self.current_origin) - self.tok.get_eol() - elif not token.is_eol_or_eof(): - raise dns.exception.SyntaxError( - "bad origin in $INCLUDE") - else: - new_origin = self.current_origin - self.saved_state.append((self.tok, - self.current_origin, - self.last_name, - self.current_file, - self.last_ttl, - self.last_ttl_known, - self.default_ttl, - self.default_ttl_known)) - self.current_file = open(filename, 'r') - self.tok = dns.tokenizer.Tokenizer(self.current_file, - filename) - self.current_origin = new_origin - elif c == u'$GENERATE': - self._generate_line() - else: - raise dns.exception.SyntaxError( - "Unknown master file directive '" + c + "'") - continue - self.tok.unget(token) - self._rr_line() - except dns.exception.SyntaxError as detail: - (filename, line_number) = self.tok.where() - if detail is None: - detail = "syntax error" - raise dns.exception.SyntaxError( - "%s:%d: %s" % (filename, line_number, detail)) - - # Now that we're done reading, do some basic checking of the zone. - if self.check_origin: - self.zone.check_origin() - - -def from_text(text, origin=None, rdclass=dns.rdataclass.IN, - relativize=True, zone_factory=Zone, filename=None, - allow_include=False, check_origin=True): - """Build a zone object from a master file format string. - - @param text: the master file format input - @type text: string. - @param origin: The origin of the zone; if not specified, the first - $ORIGIN statement in the master file will determine the origin of the - zone. - @type origin: dns.name.Name object or string - @param rdclass: The zone's rdata class; the default is class IN. - @type rdclass: int - @param relativize: should names be relativized? The default is True - @type relativize: bool - @param zone_factory: The zone factory to use - @type zone_factory: function returning a Zone - @param filename: The filename to emit when describing where an error - occurred; the default is '<string>'. - @type filename: string - @param allow_include: is $INCLUDE allowed? - @type allow_include: bool - @param check_origin: should sanity checks of the origin node be done? - The default is True. - @type check_origin: bool - @raises dns.zone.NoSOA: No SOA RR was found at the zone origin - @raises dns.zone.NoNS: No NS RRset was found at the zone origin - @rtype: dns.zone.Zone object - """ - - # 'text' can also be a file, but we don't publish that fact - # since it's an implementation detail. The official file - # interface is from_file(). - - if filename is None: - filename = '<string>' - tok = dns.tokenizer.Tokenizer(text, filename) - reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory, - allow_include=allow_include, - check_origin=check_origin) - reader.read() - return reader.zone - - -def from_file(f, origin=None, rdclass=dns.rdataclass.IN, - relativize=True, zone_factory=Zone, filename=None, - allow_include=True, check_origin=True): - """Read a master file and build a zone object. - - @param f: file or string. If I{f} is a string, it is treated - as the name of a file to open. - @param origin: The origin of the zone; if not specified, the first - $ORIGIN statement in the master file will determine the origin of the - zone. - @type origin: dns.name.Name object or string - @param rdclass: The zone's rdata class; the default is class IN. - @type rdclass: int - @param relativize: should names be relativized? The default is True - @type relativize: bool - @param zone_factory: The zone factory to use - @type zone_factory: function returning a Zone - @param filename: The filename to emit when describing where an error - occurred; the default is '<file>', or the value of I{f} if I{f} is a - string. - @type filename: string - @param allow_include: is $INCLUDE allowed? - @type allow_include: bool - @param check_origin: should sanity checks of the origin node be done? - The default is True. - @type check_origin: bool - @raises dns.zone.NoSOA: No SOA RR was found at the zone origin - @raises dns.zone.NoNS: No NS RRset was found at the zone origin - @rtype: dns.zone.Zone object - """ - - str_type = string_types - if PY3: - opts = 'r' - else: - opts = 'rU' - - if isinstance(f, str_type): - if filename is None: - filename = f - f = open(f, opts) - want_close = True - else: - if filename is None: - filename = '<file>' - want_close = False - - try: - z = from_text(f, origin, rdclass, relativize, zone_factory, - filename, allow_include, check_origin) - finally: - if want_close: - f.close() - return z - - -def from_xfr(xfr, zone_factory=Zone, relativize=True, check_origin=True): - """Convert the output of a zone transfer generator into a zone object. - - @param xfr: The xfr generator - @type xfr: generator of dns.message.Message objects - @param relativize: should names be relativized? The default is True. - It is essential that the relativize setting matches the one specified - to dns.query.xfr(). - @type relativize: bool - @param check_origin: should sanity checks of the origin node be done? - The default is True. - @type check_origin: bool - @raises dns.zone.NoSOA: No SOA RR was found at the zone origin - @raises dns.zone.NoNS: No NS RRset was found at the zone origin - @rtype: dns.zone.Zone object - """ - - z = None - for r in xfr: - if z is None: - if relativize: - origin = r.origin - else: - origin = r.answer[0].name - rdclass = r.answer[0].rdclass - z = zone_factory(origin, rdclass, relativize=relativize) - for rrset in r.answer: - znode = z.nodes.get(rrset.name) - if not znode: - znode = z.node_factory() - z.nodes[rrset.name] = znode - zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype, - rrset.covers, True) - zrds.update_ttl(rrset.ttl) - for rd in rrset: - rd.choose_relativity(z.origin, relativize) - zrds.add(rd) - if check_origin: - z.check_origin() - return z diff --git a/lib/dns/zone.pyi b/lib/dns/zone.pyi deleted file mode 100644 index 911d7a01..00000000 --- a/lib/dns/zone.pyi +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Generator, Optional, Union, Tuple, Iterable, Callable, Any, Iterator, TextIO, BinaryIO, Dict -from . import rdata, zone, rdataclass, name, rdataclass, message, rdatatype, exception, node, rdataset, rrset, rdatatype - -class BadZone(exception.DNSException): ... -class NoSOA(BadZone): ... -class NoNS(BadZone): ... -class UnknownOrigin(BadZone): ... - -class Zone: - def __getitem__(self, key : str) -> node.Node: - ... - def __init__(self, origin : Union[str,name.Name], rdclass : int = rdataclass.IN, relativize : bool = True) -> None: - self.nodes : Dict[str,node.Node] - self.origin = origin - def values(self): - return self.nodes.values() - def iterate_rdatas(self, rdtype : Union[int,str] = rdatatype.ANY, covers : Union[int,str] = None) -> Iterable[Tuple[name.Name, int, rdata.Rdata]]: - ... - def __iter__(self) -> Iterator[str]: - ... - def get_node(self, name : Union[name.Name,str], create=False) -> Optional[node.Node]: - ... - def find_rrset(self, name : Union[str,name.Name], rdtype : Union[int,str], covers=rdatatype.NONE) -> rrset.RRset: - ... - def find_rdataset(self, name : Union[str,name.Name], rdtype : Union[str,int], covers=rdatatype.NONE, - create=False) -> rdataset.Rdataset: - ... - def get_rdataset(self, name : Union[str,name.Name], rdtype : Union[str,int], covers=rdatatype.NONE, create=False) -> Optional[rdataset.Rdataset]: - ... - def get_rrset(self, name : Union[str,name.Name], rdtype : Union[str,int], covers=rdatatype.NONE) -> Optional[rrset.RRset]: - ... - def replace_rdataset(self, name : Union[str,name.Name], replacement : rdataset.Rdataset) -> None: - ... - def delete_rdataset(self, name : Union[str,name.Name], rdtype : Union[str,int], covers=rdatatype.NONE) -> None: - ... - def iterate_rdatasets(self, rdtype : Union[str,int] =rdatatype.ANY, - covers : Union[str,int] =rdatatype.NONE): - ... - def to_file(self, f : Union[TextIO, BinaryIO, str], sorted=True, relativize=True, nl : Optional[bytes] = None): - ... - def to_text(self, sorted=True, relativize=True, nl : Optional[bytes] = None) -> bytes: - ... - -def from_xfr(xfr : Generator[Any,Any,message.Message], zone_factory : Callable[..., zone.Zone] = zone.Zone, relativize=True, check_origin=True): - ... - -def from_text(text : str, origin : Optional[Union[str,name.Name]] = None, rdclass : int = rdataclass.IN, - relativize=True, zone_factory : Callable[...,zone.Zone] = zone.Zone, filename : Optional[str] = None, - allow_include=False, check_origin=True) -> zone.Zone: - ... - -def from_file(f, origin : Optional[Union[str,name.Name]] = None, rdclass=rdataclass.IN, - relativize=True, zone_factory : Callable[..., zone.Zone] = Zone, filename : Optional[str] = None, - allow_include=True, check_origin=True) -> zone.Zone: - ... diff --git a/lib/guessit/__init__.py b/lib/guessit/__init__.py new file mode 100644 index 00000000..03f8d208 --- /dev/null +++ b/lib/guessit/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Extracts as much information as possible from a video file. +""" +from . import monkeypatch as _monkeypatch + +from .api import guessit, GuessItApi +from .options import ConfigurationException +from .rules.common.quantity import Size + +from .__version__ import __version__ + +_monkeypatch.monkeypatch_rebulk() diff --git a/lib/guessit/__main__.py b/lib/guessit/__main__.py new file mode 100644 index 00000000..fad196d6 --- /dev/null +++ b/lib/guessit/__main__.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Entry point module +""" +# pragma: no cover +from __future__ import print_function + +import json +import logging +import os +import sys + +import six +from rebulk.__version__ import __version__ as __rebulk_version__ + +from guessit import api +from guessit.__version__ import __version__ +from guessit.jsonutils import GuessitEncoder +from guessit.options import argument_parser, parse_options, load_config, merge_options + + +try: + from collections import OrderedDict +except ImportError: # pragma: no-cover + from ordereddict import OrderedDict # pylint:disable=import-error + + +def guess_filename(filename, options): + """ + Guess a single filename using given options + :param filename: filename to parse + :type filename: str + :param options: + :type options: dict + :return: + :rtype: + """ + if not options.get('yaml') and not options.get('json') and not options.get('show_property'): + print('For:', filename) + + guess = api.guessit(filename, options) + + if options.get('show_property'): + print(guess.get(options.get('show_property'), '')) + return + + if options.get('json'): + print(json.dumps(guess, cls=GuessitEncoder, ensure_ascii=False)) + elif options.get('yaml'): + import yaml + from guessit import yamlutils + + ystr = yaml.dump({filename: OrderedDict(guess)}, Dumper=yamlutils.CustomDumper, default_flow_style=False, + allow_unicode=True) + i = 0 + for yline in ystr.splitlines(): + if i == 0: + print("? " + yline[:-1]) + elif i == 1: + print(":" + yline[1:]) + else: + print(yline) + i += 1 + else: + print('GuessIt found:', json.dumps(guess, cls=GuessitEncoder, indent=4, ensure_ascii=False)) + + +def display_properties(options): + """ + Display properties + """ + properties = api.properties(options) + + if options.get('json'): + if options.get('values'): + print(json.dumps(properties, cls=GuessitEncoder, ensure_ascii=False)) + else: + print(json.dumps(list(properties.keys()), cls=GuessitEncoder, ensure_ascii=False)) + elif options.get('yaml'): + import yaml + from guessit import yamlutils + if options.get('values'): + print(yaml.dump(properties, Dumper=yamlutils.CustomDumper, default_flow_style=False, allow_unicode=True)) + else: + print(yaml.dump(list(properties.keys()), Dumper=yamlutils.CustomDumper, default_flow_style=False, + allow_unicode=True)) + else: + print('GuessIt properties:') + + properties_list = list(sorted(properties.keys())) + for property_name in properties_list: + property_values = properties.get(property_name) + print(2 * ' ' + '[+] %s' % (property_name,)) + if property_values and options.get('values'): + for property_value in property_values: + print(4 * ' ' + '[!] %s' % (property_value,)) + + +def fix_argv_encoding(): + """ + Fix encoding of sys.argv on windows Python 2 + """ + if six.PY2 and os.name == 'nt': # pragma: no cover + # see http://bugs.python.org/issue2128 + import locale + + for i, j in enumerate(sys.argv): + sys.argv[i] = j.decode(locale.getpreferredencoding()) + + +def main(args=None): # pylint:disable=too-many-branches + """ + Main function for entry point + """ + fix_argv_encoding() + + if args is None: # pragma: no cover + options = parse_options() + else: + options = parse_options(args) + + config = load_config(options) + options = merge_options(config, options) + + if options.get('verbose'): + logging.basicConfig(stream=sys.stdout, format='%(message)s') + logging.getLogger().setLevel(logging.DEBUG) + + help_required = True + + if options.get('version'): + print('+-------------------------------------------------------+') + print('+ GuessIt ' + __version__ + (28 - len(__version__)) * ' ' + '+') + print('+-------------------------------------------------------+') + print('+ Rebulk ' + __rebulk_version__ + (29 - len(__rebulk_version__)) * ' ' + '+') + print('+-------------------------------------------------------+') + print('| Please report any bug or feature request at |') + print('| https://github.com/guessit-io/guessit/issues. |') + print('+-------------------------------------------------------+') + help_required = False + + if options.get('yaml'): + try: + import yaml # pylint:disable=unused-variable,unused-import + except ImportError: # pragma: no cover + del options['yaml'] + print('PyYAML is not installed. \'--yaml\' option will be ignored ...', file=sys.stderr) + + if options.get('properties') or options.get('values'): + display_properties(options) + help_required = False + + filenames = [] + if options.get('filename'): + for filename in options.get('filename'): + filenames.append(filename) + if options.get('input_file'): + if six.PY2: + input_file = open(options.get('input_file'), 'r') + else: + input_file = open(options.get('input_file'), 'r', encoding='utf-8') + try: + filenames.extend([line.strip() for line in input_file.readlines()]) + finally: + input_file.close() + + filenames = list(filter(lambda f: f, filenames)) + + if filenames: + for filename in filenames: + help_required = False + guess_filename(filename, options) + + if help_required: # pragma: no cover + argument_parser.print_help() + + +if __name__ == '__main__': # pragma: no cover + main() diff --git a/lib/guessit/__version__.py b/lib/guessit/__version__.py new file mode 100644 index 00000000..0cd3e067 --- /dev/null +++ b/lib/guessit/__version__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Version module +""" +# pragma: no cover +__version__ = '3.1.2.dev0' diff --git a/lib/guessit/api.py b/lib/guessit/api.py new file mode 100644 index 00000000..8e306340 --- /dev/null +++ b/lib/guessit/api.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +API functions that can be used by external software +""" + +try: + from collections import OrderedDict +except ImportError: # pragma: no-cover + from ordereddict import OrderedDict # pylint:disable=import-error + +import os +import traceback + +import six +from rebulk.introspector import introspect + +from .__version__ import __version__ +from .options import parse_options, load_config, merge_options +from .rules import rebulk_builder + + +class GuessitException(Exception): + """ + Exception raised when guessit fails to perform a guess because of an internal error. + """ + + def __init__(self, string, options): + super(GuessitException, self).__init__("An internal error has occured in guessit.\n" + "===================== Guessit Exception Report =====================\n" + "version=%s\n" + "string=%s\n" + "options=%s\n" + "--------------------------------------------------------------------\n" + "%s" + "--------------------------------------------------------------------\n" + "Please report at " + "https://github.com/guessit-io/guessit/issues.\n" + "====================================================================" % + (__version__, str(string), str(options), traceback.format_exc())) + + self.string = string + self.options = options + + +def configure(options=None, rules_builder=rebulk_builder, force=False): + """ + Load configuration files and initialize rebulk rules if required. + + :param options: + :type options: dict + :param rules_builder: + :type rules_builder: + :param force: + :type force: bool + :return: + """ + default_api.configure(options, rules_builder=rules_builder, force=force) + + +def guessit(string, options=None): + """ + Retrieves all matches from string as a dict + :param string: the filename or release name + :type string: str + :param options: + :type options: str|dict + :return: + :rtype: + """ + return default_api.guessit(string, options) + + +def properties(options=None): + """ + Retrieves all properties with possible values that can be guessed + :param options: + :type options: str|dict + :return: + :rtype: + """ + return default_api.properties(options) + + +def suggested_expected(titles, options=None): + """ + Return a list of suggested titles to be used as `expected_title` based on the list of titles + :param titles: the filename or release name + :type titles: list|set|dict + :param options: + :type options: str|dict + :return: + :rtype: list of str + """ + return default_api.suggested_expected(titles, options) + + +class GuessItApi(object): + """ + An api class that can be configured with custom Rebulk configuration. + """ + + def __init__(self): + """Default constructor.""" + self.rebulk = None + self.config = None + self.load_config_options = None + self.advanced_config = None + + @classmethod + def _fix_encoding(cls, value): + if isinstance(value, list): + return [cls._fix_encoding(item) for item in value] + if isinstance(value, dict): + return {cls._fix_encoding(k): cls._fix_encoding(v) for k, v in value.items()} + if six.PY2 and isinstance(value, six.text_type): + return value.encode('utf-8') + if six.PY3 and isinstance(value, six.binary_type): + return value.decode('ascii') + return value + + @classmethod + def _has_same_properties(cls, dic1, dic2, values): + for value in values: + if dic1.get(value) != dic2.get(value): + return False + return True + + def configure(self, options=None, rules_builder=rebulk_builder, force=False, sanitize_options=True): + """ + Load configuration files and initialize rebulk rules if required. + + :param options: + :type options: str|dict + :param rules_builder: + :type rules_builder: + :param force: + :type force: bool + :return: + :rtype: dict + """ + if sanitize_options: + options = parse_options(options, True) + options = self._fix_encoding(options) + + if self.config is None or self.load_config_options is None or force or \ + not self._has_same_properties(self.load_config_options, + options, + ['config', 'no_user_config', 'no_default_config']): + config = load_config(options) + config = self._fix_encoding(config) + self.load_config_options = options + else: + config = self.config + + advanced_config = merge_options(config.get('advanced_config'), options.get('advanced_config')) + + should_build_rebulk = force or not self.rebulk or not self.advanced_config or \ + self.advanced_config != advanced_config + + if should_build_rebulk: + self.advanced_config = advanced_config + self.rebulk = rules_builder(advanced_config) + + self.config = config + return self.config + + def guessit(self, string, options=None): # pylint: disable=too-many-branches + """ + Retrieves all matches from string as a dict + :param string: the filename or release name + :type string: str|Path + :param options: + :type options: str|dict + :return: + :rtype: + """ + try: + from pathlib import Path + if isinstance(string, Path): + try: + # Handle path-like object + string = os.fspath(string) + except AttributeError: + string = str(string) + except ImportError: + pass + + try: + options = parse_options(options, True) + options = self._fix_encoding(options) + config = self.configure(options, sanitize_options=False) + options = merge_options(config, options) + result_decode = False + result_encode = False + + if six.PY2: + if isinstance(string, six.text_type): + string = string.encode("utf-8") + result_decode = True + elif isinstance(string, six.binary_type): + string = six.binary_type(string) + if six.PY3: + if isinstance(string, six.binary_type): + string = string.decode('ascii') + result_encode = True + elif isinstance(string, six.text_type): + string = six.text_type(string) + + matches = self.rebulk.matches(string, options) + if result_decode: + for match in matches: + if isinstance(match.value, six.binary_type): + match.value = match.value.decode("utf-8") + if result_encode: + for match in matches: + if isinstance(match.value, six.text_type): + match.value = match.value.encode("ascii") + return matches.to_dict(options.get('advanced', False), options.get('single_value', False), + options.get('enforce_list', False)) + except: + raise GuessitException(string, options) + + def properties(self, options=None): + """ + Grab properties and values that can be generated. + :param options: + :type options: + :return: + :rtype: + """ + options = parse_options(options, True) + options = self._fix_encoding(options) + config = self.configure(options, sanitize_options=False) + options = merge_options(config, options) + unordered = introspect(self.rebulk, options).properties + ordered = OrderedDict() + for k in sorted(unordered.keys(), key=six.text_type): + ordered[k] = list(sorted(unordered[k], key=six.text_type)) + if hasattr(self.rebulk, 'customize_properties'): + ordered = self.rebulk.customize_properties(ordered) + return ordered + + def suggested_expected(self, titles, options=None): + """ + Return a list of suggested titles to be used as `expected_title` based on the list of titles + :param titles: the filename or release name + :type titles: list|set|dict + :param options: + :type options: str|dict + :return: + :rtype: list of str + """ + suggested = [] + for title in titles: + guess = self.guessit(title, options) + if len(guess) != 2 or 'title' not in guess: + suggested.append(title) + + return suggested + + +default_api = GuessItApi() diff --git a/lib/guessit/backports.py b/lib/guessit/backports.py new file mode 100644 index 00000000..c149a6b5 --- /dev/null +++ b/lib/guessit/backports.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Backports +""" +# pragma: no-cover +# pylint: skip-file + +def cmp_to_key(mycmp): + """functools.cmp_to_key backport""" + class KeyClass(object): + """Key class""" + def __init__(self, obj, *args): # pylint: disable=unused-argument + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) < 0 + def __gt__(self, other): + return mycmp(self.obj, other.obj) > 0 + def __eq__(self, other): + return mycmp(self.obj, other.obj) == 0 + def __le__(self, other): + return mycmp(self.obj, other.obj) <= 0 + def __ge__(self, other): + return mycmp(self.obj, other.obj) >= 0 + def __ne__(self, other): + return mycmp(self.obj, other.obj) != 0 + return KeyClass diff --git a/lib/guessit/config/options.json b/lib/guessit/config/options.json new file mode 100644 index 00000000..da7c7030 --- /dev/null +++ b/lib/guessit/config/options.json @@ -0,0 +1,586 @@ +{ + "expected_title": [ + "OSS 117", + "This is Us" + ], + "allowed_countries": [ + "au", + "gb", + "us" + ], + "allowed_languages": [ + "ca", + "cs", + "de", + "en", + "es", + "fr", + "he", + "hi", + "hu", + "it", + "ja", + "ko", + "mul", + "nl", + "no", + "pl", + "pt", + "ro", + "ru", + "sv", + "te", + "uk", + "und" + ], + "advanced_config": { + "common_words": [ + "ca", + "cat", + "de", + "he", + "it", + "no", + "por", + "rum", + "se", + "st", + "sub" + ], + "groups": { + "starting": "([{", + "ending": ")]}" + }, + "audio_codec": { + "audio_channels": { + "1.0": [ + "1ch", + "mono" + ], + "2.0": [ + "2ch", + "stereo", + "re:(2[\\W_]0(?:ch)?)(?=[^\\d]|$)" + ], + "5.1": [ + "5ch", + "6ch", + "re:(5[\\W_][01](?:ch)?)(?=[^\\d]|$)", + "re:(6[\\W_]0(?:ch)?)(?=[^\\d]|$)" + ], + "7.1": [ + "7ch", + "8ch", + "re:(7[\\W_][01](?:ch)?)(?=[^\\d]|$)" + ] + } + }, + "container": { + "subtitles": [ + "srt", + "idx", + "sub", + "ssa", + "ass" + ], + "info": [ + "nfo" + ], + "videos": [ + "3g2", + "3gp", + "3gp2", + "asf", + "avi", + "divx", + "flv", + "iso", + "m4v", + "mk2", + "mk3d", + "mka", + "mkv", + "mov", + "mp4", + "mp4a", + "mpeg", + "mpg", + "ogg", + "ogm", + "ogv", + "qt", + "ra", + "ram", + "rm", + "ts", + "vob", + "wav", + "webm", + "wma", + "wmv" + ], + "torrent": [ + "torrent" + ], + "nzb": [ + "nzb" + ] + }, + "country": { + "synonyms": { + "ES": [ + "españa" + ], + "GB": [ + "UK" + ], + "BR": [ + "brazilian", + "bra" + ], + "CA": [ + "québec", + "quebec", + "qc" + ], + "MX": [ + "Latinoamérica", + "latin america" + ] + } + }, + "episodes": { + "season_max_range": 100, + "episode_max_range": 100, + "max_range_gap": 1, + "season_markers": [ + "s" + ], + "season_ep_markers": [ + "x" + ], + "disc_markers": [ + "d" + ], + "episode_markers": [ + "xe", + "ex", + "ep", + "e", + "x" + ], + "range_separators": [ + "-", + "~", + "to", + "a" + ], + "discrete_separators": [ + "+", + "&", + "and", + "et" + ], + "season_words": [ + "season", + "saison", + "seizoen", + "seasons", + "saisons", + "tem", + "temp", + "temporada", + "temporadas", + "stagione" + ], + "episode_words": [ + "episode", + "episodes", + "eps", + "ep", + "episodio", + "episodios", + "capitulo", + "capitulos" + ], + "of_words": [ + "of", + "sur" + ], + "all_words": [ + "All" + ] + }, + "language": { + "synonyms": { + "ell": [ + "gr", + "greek" + ], + "spa": [ + "esp", + "español", + "espanol" + ], + "fra": [ + "français", + "vf", + "vff", + "vfi", + "vfq" + ], + "swe": [ + "se" + ], + "por_BR": [ + "po", + "pb", + "pob", + "ptbr", + "br", + "brazilian" + ], + "deu_CH": [ + "swissgerman", + "swiss german" + ], + "nld_BE": [ + "flemish" + ], + "cat": [ + "català", + "castellano", + "espanol castellano", + "español castellano" + ], + "ces": [ + "cz" + ], + "ukr": [ + "ua" + ], + "zho": [ + "cn" + ], + "jpn": [ + "jp" + ], + "hrv": [ + "scr" + ], + "mul": [ + "multi", + "dl" + ] + }, + "subtitle_affixes": [ + "sub", + "subs", + "esub", + "esubs", + "subbed", + "custom subbed", + "custom subs", + "custom sub", + "customsubbed", + "customsubs", + "customsub", + "soft subtitles", + "soft subs" + ], + "subtitle_prefixes": [ + "st", + "vost", + "subforced", + "fansub", + "hardsub", + "legenda", + "legendas", + "legendado", + "subtitulado", + "soft", + "subtitles" + ], + "subtitle_suffixes": [ + "subforced", + "fansub", + "hardsub" + ], + "language_affixes": [ + "dublado", + "dubbed", + "dub" + ], + "language_prefixes": [ + "true" + ], + "language_suffixes": [ + "audio" + ], + "weak_affixes": [ + "v", + "audio", + "true" + ] + }, + "part": { + "prefixes": [ + "pt", + "part" + ] + }, + "release_group": { + "forbidden_names": [ + "bonus", + "by", + "for", + "par", + "pour", + "rip" + ], + "ignored_seps": "[]{}()" + }, + "screen_size": { + "frame_rates": [ + "23.976", + "24", + "25", + "29.970", + "30", + "48", + "50", + "60", + "120" + ], + "min_ar": 1.333, + "max_ar": 1.898, + "interlaced": [ + "360", + "480", + "576", + "900", + "1080" + ], + "progressive": [ + "360", + "480", + "540", + "576", + "900", + "1080", + "368", + "720", + "1440", + "2160", + "4320" + ] + }, + "website": { + "safe_tlds": [ + "com", + "net", + "org" + ], + "safe_subdomains": [ + "www" + ], + "safe_prefixes": [ + "co", + "com", + "net", + "org" + ], + "prefixes": [ + "from" + ] + }, + "streaming_service": { + "A&E": [ + "AE", + "A&E" + ], + "ABC": "AMBC", + "ABC Australia": "AUBC", + "Al Jazeera English": "AJAZ", + "AMC": "AMC", + "Amazon Prime": [ + "AMZN", + "Amazon", + "re:Amazon-?Prime" + ], + "Adult Swim": [ + "AS", + "re:Adult-?Swim" + ], + "America's Test Kitchen": "ATK", + "Animal Planet": "ANPL", + "AnimeLab": "ANLB", + "AOL": "AOL", + "ARD": "ARD", + "BBC iPlayer": [ + "iP", + "re:BBC-?iPlayer" + ], + "BravoTV": "BRAV", + "Canal+": "CNLP", + "Cartoon Network": "CN", + "CBC": "CBC", + "CBS": "CBS", + "CNBC": "CNBC", + "Comedy Central": [ + "CC", + "re:Comedy-?Central" + ], + "Channel 4": "4OD", + "CHRGD": "CHGD", + "Cinemax": "CMAX", + "Country Music Television": "CMT", + "Comedians in Cars Getting Coffee": "CCGC", + "Crunchy Roll": [ + "CR", + "re:Crunchy-?Roll" + ], + "Crackle": "CRKL", + "CSpan": "CSPN", + "CTV": "CTV", + "CuriosityStream": "CUR", + "CWSeed": "CWS", + "Daisuki": "DSKI", + "DC Universe": "DCU", + "Deadhouse Films": "DHF", + "DramaFever": [ + "DF", + "DramaFever" + ], + "Digiturk Diledigin Yerde": "DDY", + "Discovery": [ + "DISC", + "Discovery" + ], + "Disney": [ + "DSNY", + "Disney" + ], + "DIY Network": "DIY", + "Doc Club": "DOCC", + "DPlay": "DPLY", + "E!": "ETV", + "ePix": "EPIX", + "El Trece": "ETTV", + "ESPN": "ESPN", + "Esquire": "ESQ", + "Family": "FAM", + "Family Jr": "FJR", + "Food Network": "FOOD", + "Fox": "FOX", + "Freeform": "FREE", + "FYI Network": "FYI", + "Global": "GLBL", + "GloboSat Play": "GLOB", + "Hallmark": "HLMK", + "HBO Go": [ + "HBO", + "re:HBO-?Go" + ], + "HGTV": "HGTV", + "History": [ + "HIST", + "History" + ], + "Hulu": "HULU", + "Investigation Discovery": "ID", + "IFC": "IFC", + "iTunes": "iTunes", + "ITV": "ITV", + "Knowledge Network": "KNOW", + "Lifetime": "LIFE", + "Motor Trend OnDemand": "MTOD", + "MBC": [ + "MBC", + "MBCVOD" + ], + "MSNBC": "MNBC", + "MTV": "MTV", + "National Geographic": [ + "NATG", + "re:National-?Geographic" + ], + "NBA TV": [ + "NBA", + "re:NBA-?TV" + ], + "NBC": "NBC", + "Netflix": [ + "NF", + "Netflix" + ], + "NFL": "NFL", + "NFL Now": "NFLN", + "NHL GameCenter": "GC", + "Nickelodeon": [ + "NICK", + "Nickelodeon" + ], + "Norsk Rikskringkasting": "NRK", + "OnDemandKorea": [ + "ODK", + "OnDemandKorea" + ], + "PBS": "PBS", + "PBS Kids": "PBSK", + "Playstation Network": "PSN", + "Pluzz": "PLUZ", + "RTE One": "RTE", + "SBS (AU)": "SBS", + "SeeSo": [ + "SESO", + "SeeSo" + ], + "Shomi": "SHMI", + "Spike": "SPIK", + "Spike TV": [ + "SPKE", + "re:Spike-?TV" + ], + "Sportsnet": "SNET", + "Sprout": "SPRT", + "Stan": "STAN", + "Starz": "STZ", + "Sveriges Television": "SVT", + "SwearNet": "SWER", + "Syfy": "SYFY", + "TBS": "TBS", + "TFou": "TFOU", + "The CW": [ + "CW", + "re:The-?CW" + ], + "TLC": "TLC", + "TubiTV": "TUBI", + "TV3 Ireland": "TV3", + "TV4 Sweeden": "TV4", + "TVING": "TVING", + "TV Land": [ + "TVL", + "re:TV-?Land" + ], + "UFC": "UFC", + "UKTV": "UKTV", + "Univision": "UNIV", + "USA Network": "USAN", + "Velocity": "VLCT", + "VH1": "VH1", + "Viceland": "VICE", + "Viki": "VIKI", + "Vimeo": "VMEO", + "VRV": "VRV", + "W Network": "WNET", + "WatchMe": "WME", + "WWE Network": "WWEN", + "Xbox Video": "XBOX", + "Yahoo": "YHOO", + "YouTube Red": "RED", + "ZDF": "ZDF" + } + } +} diff --git a/lib/guessit/jsonutils.py b/lib/guessit/jsonutils.py new file mode 100644 index 00000000..0a0ac3a6 --- /dev/null +++ b/lib/guessit/jsonutils.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +JSON Utils +""" +import json + +from six import text_type +from rebulk.match import Match + +class GuessitEncoder(json.JSONEncoder): + """ + JSON Encoder for guessit response + """ + + def default(self, o): # pylint:disable=method-hidden + if isinstance(o, Match): + return o.advanced + if hasattr(o, 'name'): # Babelfish languages/countries long name + return text_type(o.name) + # pragma: no cover + return text_type(o) diff --git a/lib/guessit/monkeypatch.py b/lib/guessit/monkeypatch.py new file mode 100644 index 00000000..33e7c46e --- /dev/null +++ b/lib/guessit/monkeypatch.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Monkeypatch initialisation functions +""" + +try: + from collections import OrderedDict +except ImportError: # pragma: no-cover + from ordereddict import OrderedDict # pylint:disable=import-error + +from rebulk.match import Match + + +def monkeypatch_rebulk(): + """Monkeypatch rebulk classes""" + + @property + def match_advanced(self): + """ + Build advanced dict from match + :param self: + :return: + """ + + ret = OrderedDict() + ret['value'] = self.value + if self.raw: + ret['raw'] = self.raw + ret['start'] = self.start + ret['end'] = self.end + return ret + + Match.advanced = match_advanced diff --git a/lib/guessit/options.py b/lib/guessit/options.py new file mode 100644 index 00000000..8fa6825c --- /dev/null +++ b/lib/guessit/options.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Options +""" +import copy +import json +import os +import pkgutil +import shlex + +from argparse import ArgumentParser + +import six + + +def build_argument_parser(): + """ + Builds the argument parser + :return: the argument parser + :rtype: ArgumentParser + """ + opts = ArgumentParser() + opts.add_argument(dest='filename', help='Filename or release name to guess', nargs='*') + + naming_opts = opts.add_argument_group("Naming") + naming_opts.add_argument('-t', '--type', dest='type', default=None, + help='The suggested file type: movie, episode. If undefined, type will be guessed.') + naming_opts.add_argument('-n', '--name-only', dest='name_only', action='store_true', default=None, + help='Parse files as name only, considering "/" and "\\" like other separators.') + naming_opts.add_argument('-Y', '--date-year-first', action='store_true', dest='date_year_first', default=None, + help='If short date is found, consider the first digits as the year.') + naming_opts.add_argument('-D', '--date-day-first', action='store_true', dest='date_day_first', default=None, + help='If short date is found, consider the second digits as the day.') + naming_opts.add_argument('-L', '--allowed-languages', action='append', dest='allowed_languages', default=None, + help='Allowed language (can be used multiple times)') + naming_opts.add_argument('-C', '--allowed-countries', action='append', dest='allowed_countries', default=None, + help='Allowed country (can be used multiple times)') + naming_opts.add_argument('-E', '--episode-prefer-number', action='store_true', dest='episode_prefer_number', + default=None, + help='Guess "serie.213.avi" as the episode 213. Without this option, ' + 'it will be guessed as season 2, episode 13') + naming_opts.add_argument('-T', '--expected-title', action='append', dest='expected_title', default=None, + help='Expected title to parse (can be used multiple times)') + naming_opts.add_argument('-G', '--expected-group', action='append', dest='expected_group', default=None, + help='Expected release group (can be used multiple times)') + naming_opts.add_argument('--includes', action='append', dest='includes', default=None, + help='List of properties to be detected') + naming_opts.add_argument('--excludes', action='append', dest='excludes', default=None, + help='List of properties to be ignored') + + input_opts = opts.add_argument_group("Input") + input_opts.add_argument('-f', '--input-file', dest='input_file', default=None, + help='Read filenames from an input text file. File should use UTF-8 charset.') + + output_opts = opts.add_argument_group("Output") + output_opts.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=None, + help='Display debug output') + output_opts.add_argument('-P', '--show-property', dest='show_property', default=None, + help='Display the value of a single property (title, series, video_codec, year, ...)') + output_opts.add_argument('-a', '--advanced', dest='advanced', action='store_true', default=None, + help='Display advanced information for filename guesses, as json output') + output_opts.add_argument('-s', '--single-value', dest='single_value', action='store_true', default=None, + help='Keep only first value found for each property') + output_opts.add_argument('-l', '--enforce-list', dest='enforce_list', action='store_true', default=None, + help='Wrap each found value in a list even when property has a single value') + output_opts.add_argument('-j', '--json', dest='json', action='store_true', default=None, + help='Display information for filename guesses as json output') + output_opts.add_argument('-y', '--yaml', dest='yaml', action='store_true', default=None, + help='Display information for filename guesses as yaml output') + + conf_opts = opts.add_argument_group("Configuration") + conf_opts.add_argument('-c', '--config', dest='config', action='append', default=None, + help='Filepath to configuration file. Configuration file contains the same ' + 'options as those from command line options, but option names have "-" characters ' + 'replaced with "_". This configuration will be merged with default and user ' + 'configuration files.') + conf_opts.add_argument('--no-user-config', dest='no_user_config', action='store_true', + default=None, + help='Disable user configuration. If not defined, guessit tries to read configuration files ' + 'at ~/.guessit/options.(json|yml|yaml) and ~/.config/guessit/options.(json|yml|yaml)') + conf_opts.add_argument('--no-default-config', dest='no_default_config', action='store_true', + default=None, + help='Disable default configuration. This should be done only if you are providing a full ' + 'configuration through user configuration or --config option. If no "advanced_config" ' + 'is provided by another configuration file, it will still be loaded from default ' + 'configuration.') + + information_opts = opts.add_argument_group("Information") + information_opts.add_argument('-p', '--properties', dest='properties', action='store_true', default=None, + help='Display properties that can be guessed.') + information_opts.add_argument('-V', '--values', dest='values', action='store_true', default=None, + help='Display property values that can be guessed.') + information_opts.add_argument('--version', dest='version', action='store_true', default=None, + help='Display the guessit version.') + + return opts + + +def parse_options(options=None, api=False): + """ + Parse given option string + + :param options: + :type options: + :param api + :type api: boolean + :return: + :rtype: + """ + if isinstance(options, six.string_types): + args = shlex.split(options) + options = vars(argument_parser.parse_args(args)) + elif options is None: + if api: + options = {} + else: + options = vars(argument_parser.parse_args()) + elif not isinstance(options, dict): + options = vars(argument_parser.parse_args(options)) + return options + + +argument_parser = build_argument_parser() + + +class ConfigurationException(Exception): + """ + Exception related to configuration file. + """ + pass # pylint:disable=unnecessary-pass + + +def load_config(options): + """ + Load options from configuration files, if defined and present. + :param options: + :type options: + :return: + :rtype: + """ + configurations = [] + + if not options.get('no_default_config'): + default_options_data = pkgutil.get_data('guessit', 'config/options.json').decode('utf-8') + default_options = json.loads(default_options_data) + configurations.append(default_options) + + config_files = [] + + if not options.get('no_user_config'): + home_directory = os.path.expanduser("~") + cwd = os.getcwd() + yaml_supported = False + try: + import yaml # pylint:disable=unused-variable,unused-import + yaml_supported = True + except ImportError: + pass + + config_file_locations = get_options_file_locations(home_directory, cwd, yaml_supported) + config_files = [f for f in config_file_locations if os.path.exists(f)] + + custom_config_files = options.get('config') + if custom_config_files: + config_files = config_files + custom_config_files + + for config_file in config_files: + config_file_options = load_config_file(config_file) + if config_file_options: + configurations.append(config_file_options) + + config = {} + if configurations: + config = merge_options(*configurations) + + if 'advanced_config' not in config: + # Guessit doesn't work without advanced_config, so we use default if no configuration files provides it. + default_options_data = pkgutil.get_data('guessit', 'config/options.json').decode('utf-8') + default_options = json.loads(default_options_data) + config['advanced_config'] = default_options['advanced_config'] + + return config + + +def merge_options(*options): + """ + Merge options into a single options dict. + :param options: + :type options: + :return: + :rtype: + """ + + merged = {} + if options: + if options[0]: + merged.update(copy.deepcopy(options[0])) + + for options in options[1:]: + if options: + pristine = options.get('pristine') + + if pristine is True: + merged = {} + elif pristine: + for to_reset in pristine: + if to_reset in merged: + del merged[to_reset] + + for (option, value) in options.items(): + merge_option_value(option, value, merged) + + return merged + + +def merge_option_value(option, value, merged): + """ + Merge option value + :param option: + :param value: + :param merged: + :return: + """ + if value is not None and option != 'pristine': + if option in merged.keys() and isinstance(merged[option], list): + for val in value: + if val not in merged[option]: + merged[option].append(val) + elif option in merged.keys() and isinstance(merged[option], dict): + merged[option] = merge_options(merged[option], value) + elif isinstance(value, list): + merged[option] = list(value) + else: + merged[option] = value + + +def load_config_file(filepath): + """ + Load a configuration as an options dict. + + Format of the file is given with filepath extension. + :param filepath: + :type filepath: + :return: + :rtype: + """ + if filepath.endswith('.json'): + with open(filepath) as config_file_data: + return json.load(config_file_data) + if filepath.endswith('.yaml') or filepath.endswith('.yml'): + try: + import yaml + with open(filepath) as config_file_data: + return yaml.load(config_file_data, yaml.SafeLoader) + except ImportError: # pragma: no cover + raise ConfigurationException('Configuration file extension is not supported. ' + 'PyYAML should be installed to support "%s" file' % ( + filepath,)) + + try: + # Try to load input as JSON + return json.loads(filepath) + except: # pylint: disable=bare-except + pass + + raise ConfigurationException('Configuration file extension is not supported for "%s" file.' % (filepath,)) + + +def get_options_file_locations(homedir, cwd, yaml_supported=False): + """ + Get all possible locations for options file. + :param homedir: user home directory + :type homedir: basestring + :param cwd: current working directory + :type homedir: basestring + :return: + :rtype: list + """ + locations = [] + + configdirs = [(os.path.join(homedir, '.guessit'), 'options'), + (os.path.join(homedir, '.config', 'guessit'), 'options'), + (cwd, 'guessit.options')] + configexts = ['json'] + + if yaml_supported: + configexts.append('yaml') + configexts.append('yml') + + for configdir in configdirs: + for configext in configexts: + locations.append(os.path.join(configdir[0], configdir[1] + '.' + configext)) + + return locations diff --git a/lib/guessit/reutils.py b/lib/guessit/reutils.py new file mode 100644 index 00000000..0b654d27 --- /dev/null +++ b/lib/guessit/reutils.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Utils for re module +""" + +from rebulk.remodule import re + + +def build_or_pattern(patterns, name=None, escape=False): + """ + Build a or pattern string from a list of possible patterns + + :param patterns: + :type patterns: + :param name: + :type name: + :param escape: + :type escape: + :return: + :rtype: + """ + or_pattern = [] + for pattern in patterns: + if not or_pattern: + or_pattern.append('(?') + if name: + or_pattern.append('P<' + name + '>') + else: + or_pattern.append(':') + else: + or_pattern.append('|') + or_pattern.append('(?:%s)' % re.escape(pattern) if escape else pattern) + or_pattern.append(')') + return ''.join(or_pattern) diff --git a/lib/guessit/rules/__init__.py b/lib/guessit/rules/__init__.py new file mode 100644 index 00000000..f16bc4e0 --- /dev/null +++ b/lib/guessit/rules/__init__.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Rebulk object default builder +""" +from rebulk import Rebulk + +from .markers.path import path +from .markers.groups import groups + +from .properties.episodes import episodes +from .properties.container import container +from .properties.source import source +from .properties.video_codec import video_codec +from .properties.audio_codec import audio_codec +from .properties.screen_size import screen_size +from .properties.website import website +from .properties.date import date +from .properties.title import title +from .properties.episode_title import episode_title +from .properties.language import language +from .properties.country import country +from .properties.release_group import release_group +from .properties.streaming_service import streaming_service +from .properties.other import other +from .properties.size import size +from .properties.bit_rate import bit_rate +from .properties.edition import edition +from .properties.cds import cds +from .properties.bonus import bonus +from .properties.film import film +from .properties.part import part +from .properties.crc import crc +from .properties.mimetype import mimetype +from .properties.type import type_ + +from .processors import processors + + +def rebulk_builder(config): + """ + Default builder for main Rebulk object used by api. + :return: Main Rebulk object + :rtype: Rebulk + """ + def _config(name): + return config.get(name, {}) + + rebulk = Rebulk() + + common_words = frozenset(_config('common_words')) + + rebulk.rebulk(path(_config('path'))) + rebulk.rebulk(groups(_config('groups'))) + + rebulk.rebulk(episodes(_config('episodes'))) + rebulk.rebulk(container(_config('container'))) + rebulk.rebulk(source(_config('source'))) + rebulk.rebulk(video_codec(_config('video_codec'))) + rebulk.rebulk(audio_codec(_config('audio_codec'))) + rebulk.rebulk(screen_size(_config('screen_size'))) + rebulk.rebulk(website(_config('website'))) + rebulk.rebulk(date(_config('date'))) + rebulk.rebulk(title(_config('title'))) + rebulk.rebulk(episode_title(_config('episode_title'))) + rebulk.rebulk(language(_config('language'), common_words)) + rebulk.rebulk(country(_config('country'), common_words)) + rebulk.rebulk(release_group(_config('release_group'))) + rebulk.rebulk(streaming_service(_config('streaming_service'))) + rebulk.rebulk(other(_config('other'))) + rebulk.rebulk(size(_config('size'))) + rebulk.rebulk(bit_rate(_config('bit_rate'))) + rebulk.rebulk(edition(_config('edition'))) + rebulk.rebulk(cds(_config('cds'))) + rebulk.rebulk(bonus(_config('bonus'))) + rebulk.rebulk(film(_config('film'))) + rebulk.rebulk(part(_config('part'))) + rebulk.rebulk(crc(_config('crc'))) + + rebulk.rebulk(processors(_config('processors'))) + + rebulk.rebulk(mimetype(_config('mimetype'))) + rebulk.rebulk(type_(_config('type'))) + + def customize_properties(properties): + """ + Customize default rebulk properties + """ + count = properties['count'] + del properties['count'] + + properties['season_count'] = count + properties['episode_count'] = count + + return properties + + rebulk.customize_properties = customize_properties + + return rebulk diff --git a/lib/guessit/rules/common/__init__.py b/lib/guessit/rules/common/__init__.py new file mode 100644 index 00000000..444dc72a --- /dev/null +++ b/lib/guessit/rules/common/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Common module +""" +import re + +seps = r' [](){}+*|=-_~#/\\.,;:' # list of tags/words separators +seps_no_groups = seps.replace('[](){}', '') +seps_no_fs = seps.replace('/', '').replace('\\', '') + +title_seps = r'-+/\|' # separators for title + +dash = (r'-', r'['+re.escape(seps_no_fs)+']') # abbreviation used by many rebulk objects. +alt_dash = (r'@', r'['+re.escape(seps_no_fs)+']') # abbreviation used by many rebulk objects. diff --git a/lib/guessit/rules/common/comparators.py b/lib/guessit/rules/common/comparators.py new file mode 100644 index 00000000..f46f0c11 --- /dev/null +++ b/lib/guessit/rules/common/comparators.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Comparators +""" +try: + from functools import cmp_to_key +except ImportError: + from ...backports import cmp_to_key + + +def marker_comparator_predicate(match): + """ + Match predicate used in comparator + """ + return ( + not match.private + and match.name not in ('proper_count', 'title') + and not (match.name == 'container' and 'extension' in match.tags) + and not (match.name == 'other' and match.value == 'Rip') + ) + + +def marker_weight(matches, marker, predicate): + """ + Compute the comparator weight of a marker + :param matches: + :param marker: + :param predicate: + :return: + """ + return len(set(match.name for match in matches.range(*marker.span, predicate=predicate))) + + +def marker_comparator(matches, markers, predicate): + """ + Builds a comparator that returns markers sorted from the most valuable to the less. + + Take the parts where matches count is higher, then when length is higher, then when position is at left. + + :param matches: + :type matches: + :param markers: + :param predicate: + :return: + :rtype: + """ + + def comparator(marker1, marker2): + """ + The actual comparator function. + """ + matches_count = marker_weight(matches, marker2, predicate) - marker_weight(matches, marker1, predicate) + if matches_count: + return matches_count + + # give preference to rightmost path + return markers.index(marker2) - markers.index(marker1) + + return comparator + + +def marker_sorted(markers, matches, predicate=marker_comparator_predicate): + """ + Sort markers from matches, from the most valuable to the less. + + :param markers: + :type markers: + :param matches: + :type matches: + :param predicate: + :return: + :rtype: + """ + return sorted(markers, key=cmp_to_key(marker_comparator(matches, markers, predicate=predicate))) diff --git a/lib/guessit/rules/common/date.py b/lib/guessit/rules/common/date.py new file mode 100644 index 00000000..e513af9f --- /dev/null +++ b/lib/guessit/rules/common/date.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Date +""" +from dateutil import parser + +from rebulk.remodule import re + +_dsep = r'[-/ \.]' +_dsep_bis = r'[-/ \.x]' + +date_regexps = [ + re.compile(r'%s((\d{8}))%s' % (_dsep, _dsep), re.IGNORECASE), + re.compile(r'%s((\d{6}))%s' % (_dsep, _dsep), re.IGNORECASE), + re.compile(r'(?:^|[^\d])((\d{2})%s(\d{1,2})%s(\d{1,2}))(?:$|[^\d])' % (_dsep, _dsep), re.IGNORECASE), + re.compile(r'(?:^|[^\d])((\d{1,2})%s(\d{1,2})%s(\d{2}))(?:$|[^\d])' % (_dsep, _dsep), re.IGNORECASE), + re.compile(r'(?:^|[^\d])((\d{4})%s(\d{1,2})%s(\d{1,2}))(?:$|[^\d])' % (_dsep_bis, _dsep), re.IGNORECASE), + re.compile(r'(?:^|[^\d])((\d{1,2})%s(\d{1,2})%s(\d{4}))(?:$|[^\d])' % (_dsep, _dsep_bis), re.IGNORECASE), + re.compile(r'(?:^|[^\d])((\d{1,2}(?:st|nd|rd|th)?%s(?:[a-z]{3,10})%s\d{4}))(?:$|[^\d])' % (_dsep, _dsep), + re.IGNORECASE)] + + +def valid_year(year): + """Check if number is a valid year""" + return 1920 <= year < 2030 + + +def _is_int(string): + """ + Check if the input string is an integer + + :param string: + :type string: + :return: + :rtype: + """ + try: + int(string) + return True + except ValueError: + return False + + +def _guess_day_first_parameter(groups): # pylint:disable=inconsistent-return-statements + """ + If day_first is not defined, use some heuristic to fix it. + It helps to solve issues with python dateutils 2.5.3 parser changes. + + :param groups: match groups found for the date + :type groups: list of match objects + :return: day_first option guessed value + :rtype: bool + """ + + # If match starts with a long year, then day_first is force to false. + if _is_int(groups[0]) and valid_year(int(groups[0][:4])): + return False + # If match ends with a long year, the day_first is forced to true. + if _is_int(groups[-1]) and valid_year(int(groups[-1][-4:])): + return True + # If match starts with a short year, then day_first is force to false. + if _is_int(groups[0]) and int(groups[0][:2]) > 31: + return False + # If match ends with a short year, then day_first is force to true. + if _is_int(groups[-1]) and int(groups[-1][-2:]) > 31: + return True + + +def search_date(string, year_first=None, day_first=None): # pylint:disable=inconsistent-return-statements + """Looks for date patterns, and if found return the date and group span. + + Assumes there are sentinels at the beginning and end of the string that + always allow matching a non-digit delimiting the date. + + Year can be defined on two digit only. It will return the nearest possible + date from today. + + >>> search_date(' This happened on 2002-04-22. ') + (18, 28, datetime.date(2002, 4, 22)) + + >>> search_date(' And this on 17-06-1998. ') + (13, 23, datetime.date(1998, 6, 17)) + + >>> search_date(' no date in here ') + """ + for date_re in date_regexps: + search_match = date_re.search(string) + if not search_match: + continue + + start, end = search_match.start(1), search_match.end(1) + groups = search_match.groups()[1:] + match = '-'.join(groups) + + if match is None: + continue + + if year_first and day_first is None: + day_first = False + + if day_first is None: + day_first = _guess_day_first_parameter(groups) + + # If day_first/year_first is undefined, parse is made using both possible values. + yearfirst_opts = [False, True] + if year_first is not None: + yearfirst_opts = [year_first] + + dayfirst_opts = [True, False] + if day_first is not None: + dayfirst_opts = [day_first] + + kwargs_list = ({'dayfirst': d, 'yearfirst': y} + for d in dayfirst_opts for y in yearfirst_opts) + for kwargs in kwargs_list: + try: + date = parser.parse(match, **kwargs) + except (ValueError, TypeError): # pragma: no cover + # see https://bugs.launchpad.net/dateutil/+bug/1247643 + date = None + + # check date plausibility + if date and valid_year(date.year): # pylint:disable=no-member + return start, end, date.date() # pylint:disable=no-member diff --git a/lib/guessit/rules/common/expected.py b/lib/guessit/rules/common/expected.py new file mode 100644 index 00000000..eae562a2 --- /dev/null +++ b/lib/guessit/rules/common/expected.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Expected property factory +""" +import re + +from rebulk import Rebulk +from rebulk.utils import find_all + +from . import dash, seps + + +def build_expected_function(context_key): + """ + Creates a expected property function + :param context_key: + :type context_key: + :param cleanup: + :type cleanup: + :return: + :rtype: + """ + + def expected(input_string, context): + """ + Expected property functional pattern. + :param input_string: + :type input_string: + :param context: + :type context: + :return: + :rtype: + """ + ret = [] + for search in context.get(context_key): + if search.startswith('re:'): + search = search[3:] + search = search.replace(' ', '-') + matches = Rebulk().regex(search, abbreviations=[dash], flags=re.IGNORECASE) \ + .matches(input_string, context) + for match in matches: + ret.append(match.span) + else: + value = search + for sep in seps: + input_string = input_string.replace(sep, ' ') + search = search.replace(sep, ' ') + for start in find_all(input_string, search, ignore_case=True): + ret.append({'start': start, 'end': start + len(search), 'value': value}) + return ret + + return expected diff --git a/lib/guessit/rules/common/formatters.py b/lib/guessit/rules/common/formatters.py new file mode 100644 index 00000000..2a64dee9 --- /dev/null +++ b/lib/guessit/rules/common/formatters.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Formatters +""" +from rebulk.formatters import formatters +from rebulk.remodule import re +from . import seps + +_excluded_clean_chars = ',:;-/\\' +clean_chars = "" +for sep in seps: + if sep not in _excluded_clean_chars: + clean_chars += sep + + +def _potential_before(i, input_string): + """ + Check if the character at position i can be a potential single char separator considering what's before it. + + :param i: + :type i: int + :param input_string: + :type input_string: str + :return: + :rtype: bool + """ + return i - 1 >= 0 and input_string[i] in seps and input_string[i - 2] in seps and input_string[i - 1] not in seps + + +def _potential_after(i, input_string): + """ + Check if the character at position i can be a potential single char separator considering what's after it. + + :param i: + :type i: int + :param input_string: + :type input_string: str + :return: + :rtype: bool + """ + return i + 2 >= len(input_string) or \ + input_string[i + 2] == input_string[i] and input_string[i + 1] not in seps + + +def cleanup(input_string): + """ + Removes and strip separators from input_string (but keep ',;' characters) + + It also keep separators for single characters (Mavels Agents of S.H.I.E.L.D.) + + :param input_string: + :type input_string: str + :return: + :rtype: + """ + clean_string = input_string + for char in clean_chars: + clean_string = clean_string.replace(char, ' ') + + # Restore input separator if they separate single characters. + # Useful for Mavels Agents of S.H.I.E.L.D. + # https://github.com/guessit-io/guessit/issues/278 + + indices = [i for i, letter in enumerate(clean_string) if letter in seps] + + dots = set() + if indices: + clean_list = list(clean_string) + + potential_indices = [] + + for i in indices: + if _potential_before(i, input_string) and _potential_after(i, input_string): + potential_indices.append(i) + + replace_indices = [] + + for potential_index in potential_indices: + if potential_index - 2 in potential_indices or potential_index + 2 in potential_indices: + replace_indices.append(potential_index) + + if replace_indices: + for replace_index in replace_indices: + dots.add(input_string[replace_index]) + clean_list[replace_index] = input_string[replace_index] + clean_string = ''.join(clean_list) + + clean_string = strip(clean_string, ''.join([c for c in seps if c not in dots])) + + clean_string = re.sub(' +', ' ', clean_string) + return clean_string + + +def strip(input_string, chars=seps): + """ + Strip separators from input_string + :param input_string: + :param chars: + :type input_string: + :return: + :rtype: + """ + return input_string.strip(chars) + + +def raw_cleanup(raw): + """ + Cleanup a raw value to perform raw comparison + :param raw: + :type raw: + :return: + :rtype: + """ + return formatters(cleanup, strip)(raw.lower()) + + +def reorder_title(title, articles=('the',), separators=(',', ', ')): + """ + Reorder the title + :param title: + :type title: + :param articles: + :type articles: + :param separators: + :type separators: + :return: + :rtype: + """ + ltitle = title.lower() + for article in articles: + for separator in separators: + suffix = separator + article + if ltitle[-len(suffix):] == suffix: + return title[-len(suffix) + len(separator):] + ' ' + title[:-len(suffix)] + return title diff --git a/lib/guessit/rules/common/numeral.py b/lib/guessit/rules/common/numeral.py new file mode 100644 index 00000000..7c064fdb --- /dev/null +++ b/lib/guessit/rules/common/numeral.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +parse numeral from various formats +""" +from rebulk.remodule import re + +digital_numeral = r'\d{1,4}' + +roman_numeral = r'(?=[MCDLXVI]+)M{0,4}(?:CM|CD|D?C{0,3})(?:XC|XL|L?X{0,3})(?:IX|IV|V?I{0,3})' + +english_word_numeral_list = [ + 'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', + 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen', 'twenty' +] + +french_word_numeral_list = [ + 'zéro', 'un', 'deux', 'trois', 'quatre', 'cinq', 'six', 'sept', 'huit', 'neuf', 'dix', + 'onze', 'douze', 'treize', 'quatorze', 'quinze', 'seize', 'dix-sept', 'dix-huit', 'dix-neuf', 'vingt' +] + +french_alt_word_numeral_list = [ + 'zero', 'une', 'deux', 'trois', 'quatre', 'cinq', 'six', 'sept', 'huit', 'neuf', 'dix', + 'onze', 'douze', 'treize', 'quatorze', 'quinze', 'seize', 'dixsept', 'dixhuit', 'dixneuf', 'vingt' +] + + +def __build_word_numeral(*args): + """ + Build word numeral regexp from list. + + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + re_ = None + for word_list in args: + for word in word_list: + if not re_: + re_ = r'(?:(?=\w+)' + else: + re_ += '|' + re_ += word + re_ += ')' + return re_ + + +word_numeral = __build_word_numeral(english_word_numeral_list, french_word_numeral_list, french_alt_word_numeral_list) + +numeral = '(?:' + digital_numeral + '|' + roman_numeral + '|' + word_numeral + ')' + +__romanNumeralMap = ( + ('M', 1000), + ('CM', 900), + ('D', 500), + ('CD', 400), + ('C', 100), + ('XC', 90), + ('L', 50), + ('XL', 40), + ('X', 10), + ('IX', 9), + ('V', 5), + ('IV', 4), + ('I', 1) +) + +__romanNumeralPattern = re.compile('^' + roman_numeral + '$') + + +def __parse_roman(value): + """ + convert Roman numeral to integer + + :param value: Value to parse + :type value: string + :return: + :rtype: + """ + if not __romanNumeralPattern.search(value): + raise ValueError('Invalid Roman numeral: %s' % value) + + result = 0 + index = 0 + for num, integer in __romanNumeralMap: + while value[index:index + len(num)] == num: + result += integer + index += len(num) + return result + + +def __parse_word(value): + """ + Convert Word numeral to integer + + :param value: Value to parse + :type value: string + :return: + :rtype: + """ + for word_list in [english_word_numeral_list, french_word_numeral_list, french_alt_word_numeral_list]: + try: + return word_list.index(value.lower()) + except ValueError: + pass + raise ValueError # pragma: no cover + + +_clean_re = re.compile(r'[^\d]*(\d+)[^\d]*') + + +def parse_numeral(value, int_enabled=True, roman_enabled=True, word_enabled=True, clean=True): + """ + Parse a numeric value into integer. + + :param value: Value to parse. Can be an integer, roman numeral or word. + :type value: string + :param int_enabled: + :type int_enabled: + :param roman_enabled: + :type roman_enabled: + :param word_enabled: + :type word_enabled: + :param clean: + :type clean: + :return: Numeric value, or None if value can't be parsed + :rtype: int + """ + # pylint: disable=too-many-branches + if int_enabled: + try: + if clean: + match = _clean_re.match(value) + if match: + clean_value = match.group(1) + return int(clean_value) + return int(value) + except ValueError: + pass + if roman_enabled: + try: + if clean: + for word in value.split(): + try: + return __parse_roman(word.upper()) + except ValueError: + pass + return __parse_roman(value) + except ValueError: + pass + if word_enabled: + try: + if clean: + for word in value.split(): + try: + return __parse_word(word) + except ValueError: # pragma: no cover + pass + return __parse_word(value) # pragma: no cover + except ValueError: # pragma: no cover + pass + raise ValueError('Invalid numeral: ' + value) # pragma: no cover diff --git a/lib/guessit/rules/common/pattern.py b/lib/guessit/rules/common/pattern.py new file mode 100644 index 00000000..5f560f2c --- /dev/null +++ b/lib/guessit/rules/common/pattern.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Pattern utility functions +""" + + +def is_disabled(context, name): + """Whether a specific pattern is disabled. + + The context object might define an inclusion list (includes) or an exclusion list (excludes) + A pattern is considered disabled if it's found in the exclusion list or + it's not found in the inclusion list and the inclusion list is not empty or not defined. + + :param context: + :param name: + :return: + """ + if not context: + return False + + excludes = context.get('excludes') + if excludes and name in excludes: + return True + + includes = context.get('includes') + return includes and name not in includes diff --git a/lib/guessit/rules/common/quantity.py b/lib/guessit/rules/common/quantity.py new file mode 100644 index 00000000..bbd41fbb --- /dev/null +++ b/lib/guessit/rules/common/quantity.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Quantities: Size +""" +import re +from abc import abstractmethod + +import six + +from ..common import seps + + +class Quantity(object): + """ + Represent a quantity object with magnitude and units. + """ + + parser_re = re.compile(r'(?P<magnitude>\d+(?:[.]\d+)?)(?P<units>[^\d]+)?') + + def __init__(self, magnitude, units): + self.magnitude = magnitude + self.units = units + + @classmethod + @abstractmethod + def parse_units(cls, value): + """ + Parse a string to a proper unit notation. + """ + raise NotImplementedError + + @classmethod + def fromstring(cls, string): + """ + Parse the string into a quantity object. + :param string: + :return: + """ + values = cls.parser_re.match(string).groupdict() + try: + magnitude = int(values['magnitude']) + except ValueError: + magnitude = float(values['magnitude']) + units = cls.parse_units(values['units']) + + return cls(magnitude, units) + + def __hash__(self): + return hash(str(self)) + + def __eq__(self, other): + if isinstance(other, six.string_types): + return str(self) == other + if not isinstance(other, self.__class__): + return NotImplemented + return self.magnitude == other.magnitude and self.units == other.units + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return '<{0} [{1}]>'.format(self.__class__.__name__, self) + + def __str__(self): + return '{0}{1}'.format(self.magnitude, self.units) + + +class Size(Quantity): + """ + Represent size. + + e.g.: 1.1GB, 300MB + """ + + @classmethod + def parse_units(cls, value): + return value.strip(seps).upper() + + +class BitRate(Quantity): + """ + Represent bit rate. + + e.g.: 320Kbps, 1.5Mbps + """ + + @classmethod + def parse_units(cls, value): + value = value.strip(seps).capitalize() + for token in ('bits', 'bit'): + value = value.replace(token, 'bps') + + return value + + +class FrameRate(Quantity): + """ + Represent frame rate. + + e.g.: 24fps, 60fps + """ + + @classmethod + def parse_units(cls, value): + return 'fps' diff --git a/lib/guessit/rules/common/validators.py b/lib/guessit/rules/common/validators.py new file mode 100644 index 00000000..0d0eb3eb --- /dev/null +++ b/lib/guessit/rules/common/validators.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Validators +""" +from functools import partial + +from rebulk.validators import chars_before, chars_after, chars_surround +from . import seps + +seps_before = partial(chars_before, seps) +seps_after = partial(chars_after, seps) +seps_surround = partial(chars_surround, seps) + + +def int_coercable(string): + """ + Check if string can be coerced to int + :param string: + :type string: + :return: + :rtype: + """ + try: + int(string) + return True + except ValueError: + return False + + +def and_(*validators): + """ + Compose validators functions + :param validators: + :type validators: + :return: + :rtype: + """ + def composed(string): + """ + Composed validators function + :param string: + :type string: + :return: + :rtype: + """ + for validator in validators: + if not validator(string): + return False + return True + return composed + + +def or_(*validators): + """ + Compose validators functions + :param validators: + :type validators: + :return: + :rtype: + """ + def composed(string): + """ + Composed validators function + :param string: + :type string: + :return: + :rtype: + """ + for validator in validators: + if validator(string): + return True + return False + return composed diff --git a/lib/guessit/rules/common/words.py b/lib/guessit/rules/common/words.py new file mode 100644 index 00000000..cccbc7d2 --- /dev/null +++ b/lib/guessit/rules/common/words.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Words utils +""" +from collections import namedtuple + +from . import seps + +_Word = namedtuple('_Word', ['span', 'value']) + + +def iter_words(string): + """ + Iterate on all words in a string + :param string: + :type string: + :return: + :rtype: iterable[str] + """ + i = 0 + last_sep_index = -1 + inside_word = False + for char in string: + if ord(char) < 128 and char in seps: # Make sure we don't exclude unicode characters. + if inside_word: + yield _Word(span=(last_sep_index+1, i), value=string[last_sep_index+1:i]) + inside_word = False + last_sep_index = i + else: + inside_word = True + i += 1 + if inside_word: + yield _Word(span=(last_sep_index+1, i), value=string[last_sep_index+1:i]) diff --git a/lib/guessit/rules/markers/__init__.py b/lib/guessit/rules/markers/__init__.py new file mode 100644 index 00000000..6a48a13b --- /dev/null +++ b/lib/guessit/rules/markers/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Markers +""" diff --git a/lib/guessit/rules/markers/groups.py b/lib/guessit/rules/markers/groups.py new file mode 100644 index 00000000..4716d15d --- /dev/null +++ b/lib/guessit/rules/markers/groups.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Groups markers (...), [...] and {...} +""" +from rebulk import Rebulk + + +def groups(config): + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk() + rebulk.defaults(name="group", marker=True) + + starting = config['starting'] + ending = config['ending'] + + def mark_groups(input_string): + """ + Functional pattern to mark groups (...), [...] and {...}. + + :param input_string: + :return: + """ + openings = ([], [], []) + i = 0 + + ret = [] + for char in input_string: + start_type = starting.find(char) + if start_type > -1: + openings[start_type].append(i) + + i += 1 + + end_type = ending.find(char) + if end_type > -1: + try: + start_index = openings[end_type].pop() + ret.append((start_index, i)) + except IndexError: + pass + return ret + + rebulk.functional(mark_groups) + return rebulk diff --git a/lib/guessit/rules/markers/path.py b/lib/guessit/rules/markers/path.py new file mode 100644 index 00000000..6d993b75 --- /dev/null +++ b/lib/guessit/rules/markers/path.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Path markers +""" +from rebulk import Rebulk + +from rebulk.utils import find_all + + +def path(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk() + rebulk.defaults(name="path", marker=True) + + def mark_path(input_string, context): + """ + Functional pattern to mark path elements. + + :param input_string: + :param context: + :return: + """ + ret = [] + if context.get('name_only', False): + ret.append((0, len(input_string))) + else: + indices = list(find_all(input_string, '/')) + indices += list(find_all(input_string, '\\')) + indices += [-1, len(input_string)] + + indices.sort() + + for i in range(0, len(indices) - 1): + ret.append((indices[i] + 1, indices[i + 1])) + + return ret + + rebulk.functional(mark_path) + return rebulk diff --git a/lib/guessit/rules/match_processors.py b/lib/guessit/rules/match_processors.py new file mode 100644 index 00000000..0b49372f --- /dev/null +++ b/lib/guessit/rules/match_processors.py @@ -0,0 +1,20 @@ +""" +Match processors +""" +from guessit.rules.common import seps + + +def strip(match, chars=seps): + """ + Strip given characters from match. + + :param chars: + :param match: + :return: + """ + while match.input_string[match.start] in chars: + match.start += 1 + while match.input_string[match.end - 1] in chars: + match.end -= 1 + if not match: + return False diff --git a/lib/guessit/rules/processors.py b/lib/guessit/rules/processors.py new file mode 100644 index 00000000..5b018140 --- /dev/null +++ b/lib/guessit/rules/processors.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Processors +""" +from collections import defaultdict +import copy + +import six + +from rebulk import Rebulk, Rule, CustomRule, POST_PROCESS, PRE_PROCESS, AppendMatch, RemoveMatch + +from .common import seps_no_groups +from .common.formatters import cleanup +from .common.comparators import marker_sorted +from .common.date import valid_year +from .common.words import iter_words + + +class EnlargeGroupMatches(CustomRule): + """ + Enlarge matches that are starting and/or ending group to include brackets in their span. + """ + priority = PRE_PROCESS + + def when(self, matches, context): + starting = [] + ending = [] + + for group in matches.markers.named('group'): + for match in matches.starting(group.start + 1): + starting.append(match) + + for match in matches.ending(group.end - 1): + ending.append(match) + + if starting or ending: + return starting, ending + return False + + def then(self, matches, when_response, context): + starting, ending = when_response + for match in starting: + matches.remove(match) + match.start -= 1 + match.raw_start += 1 + matches.append(match) + + for match in ending: + matches.remove(match) + match.end += 1 + match.raw_end -= 1 + matches.append(match) + + +class EquivalentHoles(Rule): + """ + Creates equivalent matches for holes that have same values than existing (case insensitive) + """ + priority = POST_PROCESS + consequence = AppendMatch + + def when(self, matches, context): + new_matches = [] + + for filepath in marker_sorted(matches.markers.named('path'), matches): + holes = matches.holes(start=filepath.start, end=filepath.end, formatter=cleanup) + for name in matches.names: + for hole in list(holes): + for current_match in matches.named(name): + if isinstance(current_match.value, six.string_types) and \ + hole.value.lower() == current_match.value.lower(): + if 'equivalent-ignore' in current_match.tags: + continue + new_value = _preferred_string(hole.value, current_match.value) + if hole.value != new_value: + hole.value = new_value + if current_match.value != new_value: + current_match.value = new_value + hole.name = name + hole.tags = ['equivalent'] + new_matches.append(hole) + if hole in holes: + holes.remove(hole) + + return new_matches + + +class RemoveAmbiguous(Rule): + """ + If multiple matches are found with same name and different values, keep the one in the most valuable filepart. + Also keep others match with same name and values than those kept ones. + """ + + priority = POST_PROCESS + consequence = RemoveMatch + + def __init__(self, sort_function=marker_sorted, predicate=None): + super(RemoveAmbiguous, self).__init__() + self.sort_function = sort_function + self.predicate = predicate + + def when(self, matches, context): + fileparts = self.sort_function(matches.markers.named('path'), matches) + + previous_fileparts_names = set() + values = defaultdict(list) + + to_remove = [] + for filepart in fileparts: + filepart_matches = matches.range(filepart.start, filepart.end, predicate=self.predicate) + + filepart_names = set() + for match in filepart_matches: + filepart_names.add(match.name) + if match.name in previous_fileparts_names: + if match.value not in values[match.name]: + to_remove.append(match) + else: + if match.value not in values[match.name]: + values[match.name].append(match.value) + + previous_fileparts_names.update(filepart_names) + + return to_remove + + +class RemoveLessSpecificSeasonEpisode(RemoveAmbiguous): + """ + If multiple season/episodes matches are found with different values, + keep the one tagged as 'SxxExx' or in the rightmost filepart. + """ + def __init__(self, name): + super(RemoveLessSpecificSeasonEpisode, self).__init__( + sort_function=(lambda markers, matches: + marker_sorted(list(reversed(markers)), matches, + lambda match: match.name == name and 'SxxExx' in match.tags)), + predicate=lambda match: match.name == name) + + +def _preferred_string(value1, value2): # pylint:disable=too-many-return-statements + """ + Retrieves preferred title from both values. + :param value1: + :type value1: str + :param value2: + :type value2: str + :return: The preferred title + :rtype: str + """ + if value1 == value2: + return value1 + if value1.istitle() and not value2.istitle(): + return value1 + if not value1.isupper() and value2.isupper(): + return value1 + if not value1.isupper() and value1[0].isupper() and not value2[0].isupper(): + return value1 + if _count_title_words(value1) > _count_title_words(value2): + return value1 + return value2 + + +def _count_title_words(value): + """ + Count only many words are titles in value. + :param value: + :type value: + :return: + :rtype: + """ + ret = 0 + for word in iter_words(value): + if word.value.istitle(): + ret += 1 + return ret + + +class SeasonYear(Rule): + """ + If a season is a valid year and no year was found, create an match with year. + """ + priority = POST_PROCESS + consequence = AppendMatch + + def when(self, matches, context): + ret = [] + if not matches.named('year'): + for season in matches.named('season'): + if valid_year(season.value): + year = copy.copy(season) + year.name = 'year' + ret.append(year) + return ret + + +class YearSeason(Rule): + """ + If a year is found, no season found, and episode is found, create an match with season. + """ + priority = POST_PROCESS + consequence = AppendMatch + + def when(self, matches, context): + ret = [] + if not matches.named('season') and matches.named('episode'): + for year in matches.named('year'): + season = copy.copy(year) + season.name = 'season' + ret.append(season) + return ret + + +class Processors(CustomRule): + """ + Empty rule for ordering post_processing properly. + """ + priority = POST_PROCESS + + def when(self, matches, context): + pass + + def then(self, matches, when_response, context): # pragma: no cover + pass + + +class StripSeparators(CustomRule): + """ + Strip separators from matches. Keep separators if they are from acronyms, like in ".S.H.I.E.L.D." + """ + priority = POST_PROCESS + + def when(self, matches, context): + return matches + + def then(self, matches, when_response, context): # pragma: no cover + for match in matches: + for _ in range(0, len(match.span)): + if match.raw[0] in seps_no_groups and (len(match.raw) < 3 or match.raw[2] not in seps_no_groups): + match.raw_start += 1 + + for _ in reversed(range(0, len(match.span))): + if match.raw[-1] in seps_no_groups and (len(match.raw) < 3 or match.raw[-3] not in seps_no_groups): + match.raw_end -= 1 + + +def processors(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + return Rebulk().rules(EnlargeGroupMatches, EquivalentHoles, + RemoveLessSpecificSeasonEpisode('season'), + RemoveLessSpecificSeasonEpisode('episode'), + RemoveAmbiguous, SeasonYear, YearSeason, Processors, StripSeparators) diff --git a/lib/guessit/rules/properties/__init__.py b/lib/guessit/rules/properties/__init__.py new file mode 100644 index 00000000..e0a24eaf --- /dev/null +++ b/lib/guessit/rules/properties/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Properties +""" diff --git a/lib/guessit/rules/properties/audio_codec.py b/lib/guessit/rules/properties/audio_codec.py new file mode 100644 index 00000000..815caff9 --- /dev/null +++ b/lib/guessit/rules/properties/audio_codec.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +audio_codec, audio_profile and audio_channels property +""" +from rebulk import Rebulk, Rule, RemoveMatch +from rebulk.remodule import re + +from ..common import dash +from ..common.pattern import is_disabled +from ..common.validators import seps_before, seps_after + +audio_properties = ['audio_codec', 'audio_profile', 'audio_channels'] + + +def audio_codec(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk()\ + .regex_defaults(flags=re.IGNORECASE, abbreviations=[dash])\ + .string_defaults(ignore_case=True) + + def audio_codec_priority(match1, match2): + """ + Gives priority to audio_codec + :param match1: + :type match1: + :param match2: + :type match2: + :return: + :rtype: + """ + if match1.name == 'audio_codec' and match2.name in ['audio_profile', 'audio_channels']: + return match2 + if match1.name in ['audio_profile', 'audio_channels'] and match2.name == 'audio_codec': + return match1 + return '__default__' + + rebulk.defaults(name='audio_codec', + conflict_solver=audio_codec_priority, + disabled=lambda context: is_disabled(context, 'audio_codec')) + + rebulk.regex("MP3", "LAME", r"LAME(?:\d)+-?(?:\d)+", value="MP3") + rebulk.string("MP2", value="MP2") + rebulk.regex('Dolby', 'DolbyDigital', 'Dolby-Digital', 'DD', 'AC3D?', value='Dolby Digital') + rebulk.regex('Dolby-?Atmos', 'Atmos', value='Dolby Atmos') + rebulk.string("AAC", value="AAC") + rebulk.string('EAC3', 'DDP', 'DD+', value='Dolby Digital Plus') + rebulk.string("Flac", value="FLAC") + rebulk.string("DTS", value="DTS") + rebulk.regex('DTS-?HD', 'DTS(?=-?MA)', value='DTS-HD', + conflict_solver=lambda match, other: other if other.name == 'audio_codec' else '__default__') + rebulk.regex('True-?HD', value='Dolby TrueHD') + rebulk.string('Opus', value='Opus') + rebulk.string('Vorbis', value='Vorbis') + rebulk.string('PCM', value='PCM') + rebulk.string('LPCM', value='LPCM') + + rebulk.defaults(clear=True, + name='audio_profile', + disabled=lambda context: is_disabled(context, 'audio_profile')) + rebulk.string('MA', value='Master Audio', tags=['audio_profile.rule', 'DTS-HD']) + rebulk.string('HR', 'HRA', value='High Resolution Audio', tags=['audio_profile.rule', 'DTS-HD']) + rebulk.string('ES', value='Extended Surround', tags=['audio_profile.rule', 'DTS']) + rebulk.string('HE', value='High Efficiency', tags=['audio_profile.rule', 'AAC']) + rebulk.string('LC', value='Low Complexity', tags=['audio_profile.rule', 'AAC']) + rebulk.string('HQ', value='High Quality', tags=['audio_profile.rule', 'Dolby Digital']) + rebulk.string('EX', value='EX', tags=['audio_profile.rule', 'Dolby Digital']) + + rebulk.defaults(clear=True, + name="audio_channels", + disabled=lambda context: is_disabled(context, 'audio_channels')) + rebulk.regex('7[01]', value='7.1', validator=seps_after, tags='weak-audio_channels') + rebulk.regex('5[01]', value='5.1', validator=seps_after, tags='weak-audio_channels') + rebulk.string('20', value='2.0', validator=seps_after, tags='weak-audio_channels') + + for value, items in config.get('audio_channels').items(): + for item in items: + if item.startswith('re:'): + rebulk.regex(item[3:], value=value, children=True) + else: + rebulk.string(item, value=value) + + rebulk.rules(DtsHDRule, DtsRule, AacRule, DolbyDigitalRule, AudioValidatorRule, HqConflictRule, + AudioChannelsValidatorRule) + + return rebulk + + +class AudioValidatorRule(Rule): + """ + Remove audio properties if not surrounded by separators and not next each others + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + + audio_list = matches.range(predicate=lambda match: match.name in audio_properties) + for audio in audio_list: + if not seps_before(audio): + valid_before = matches.range(audio.start - 1, audio.start, + lambda match: match.name in audio_properties) + if not valid_before: + ret.append(audio) + continue + if not seps_after(audio): + valid_after = matches.range(audio.end, audio.end + 1, + lambda match: match.name in audio_properties) + if not valid_after: + ret.append(audio) + continue + + return ret + + +class AudioProfileRule(Rule): + """ + Abstract rule to validate audio profiles + """ + priority = 64 + dependency = AudioValidatorRule + consequence = RemoveMatch + + def __init__(self, codec): + super(AudioProfileRule, self).__init__() + self.codec = codec + + def enabled(self, context): + return not is_disabled(context, 'audio_profile') + + def when(self, matches, context): + profile_list = matches.named('audio_profile', + lambda match: 'audio_profile.rule' in match.tags and + self.codec in match.tags) + ret = [] + for profile in profile_list: + codec = matches.at_span(profile.span, + lambda match: match.name == 'audio_codec' and + match.value == self.codec, 0) + if not codec: + codec = matches.previous(profile, + lambda match: match.name == 'audio_codec' and + match.value == self.codec) + if not codec: + codec = matches.next(profile, + lambda match: match.name == 'audio_codec' and + match.value == self.codec) + if not codec: + ret.append(profile) + if codec: + ret.extend(matches.conflicting(profile)) + return ret + + +class DtsHDRule(AudioProfileRule): + """ + Rule to validate DTS-HD profile + """ + + def __init__(self): + super(DtsHDRule, self).__init__('DTS-HD') + + +class DtsRule(AudioProfileRule): + """ + Rule to validate DTS profile + """ + + def __init__(self): + super(DtsRule, self).__init__('DTS') + + +class AacRule(AudioProfileRule): + """ + Rule to validate AAC profile + """ + + def __init__(self): + super(AacRule, self).__init__('AAC') + + +class DolbyDigitalRule(AudioProfileRule): + """ + Rule to validate Dolby Digital profile + """ + + def __init__(self): + super(DolbyDigitalRule, self).__init__('Dolby Digital') + + +class HqConflictRule(Rule): + """ + Solve conflict between HQ from other property and from audio_profile. + """ + + dependency = [DtsHDRule, DtsRule, AacRule, DolbyDigitalRule] + consequence = RemoveMatch + + def enabled(self, context): + return not is_disabled(context, 'audio_profile') + + def when(self, matches, context): + hq_audio = matches.named('audio_profile', lambda m: m.value == 'High Quality') + hq_audio_spans = [match.span for match in hq_audio] + return matches.named('other', lambda m: m.span in hq_audio_spans) + + +class AudioChannelsValidatorRule(Rule): + """ + Remove audio_channel if no audio codec as previous match. + """ + priority = 128 + consequence = RemoveMatch + + def enabled(self, context): + return not is_disabled(context, 'audio_channels') + + def when(self, matches, context): + ret = [] + + for audio_channel in matches.tagged('weak-audio_channels'): + valid_before = matches.range(audio_channel.start - 1, audio_channel.start, + lambda match: match.name == 'audio_codec') + if not valid_before: + ret.append(audio_channel) + + return ret diff --git a/lib/guessit/rules/properties/bit_rate.py b/lib/guessit/rules/properties/bit_rate.py new file mode 100644 index 00000000..d279c9f1 --- /dev/null +++ b/lib/guessit/rules/properties/bit_rate.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +video_bit_rate and audio_bit_rate properties +""" +import re + +from rebulk import Rebulk +from rebulk.rules import Rule, RemoveMatch, RenameMatch + +from ..common import dash, seps +from ..common.pattern import is_disabled +from ..common.quantity import BitRate +from ..common.validators import seps_surround + + +def bit_rate(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: (is_disabled(context, 'audio_bit_rate') + and is_disabled(context, 'video_bit_rate'))) + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]) + rebulk.defaults(name='audio_bit_rate', validator=seps_surround) + rebulk.regex(r'\d+-?[kmg]b(ps|its?)', r'\d+\.\d+-?[kmg]b(ps|its?)', + conflict_solver=( + lambda match, other: match + if other.name == 'audio_channels' and 'weak-audio_channels' not in other.tags + else other + ), + formatter=BitRate.fromstring, tags=['release-group-prefix']) + + rebulk.rules(BitRateTypeRule) + + return rebulk + + +class BitRateTypeRule(Rule): + """ + Convert audio bit rate guess into video bit rate. + """ + consequence = [RenameMatch('video_bit_rate'), RemoveMatch] + + def when(self, matches, context): + to_rename = [] + to_remove = [] + + if is_disabled(context, 'audio_bit_rate'): + to_remove.extend(matches.named('audio_bit_rate')) + else: + video_bit_rate_disabled = is_disabled(context, 'video_bit_rate') + for match in matches.named('audio_bit_rate'): + previous = matches.previous(match, index=0, + predicate=lambda m: m.name in ('source', 'screen_size', 'video_codec')) + if previous and not matches.holes(previous.end, match.start, predicate=lambda m: m.value.strip(seps)): + after = matches.next(match, index=0, predicate=lambda m: m.name == 'audio_codec') + if after and not matches.holes(match.end, after.start, predicate=lambda m: m.value.strip(seps)): + bitrate = match.value + if bitrate.units == 'Kbps' or (bitrate.units == 'Mbps' and bitrate.magnitude < 10): + continue + + if video_bit_rate_disabled: + to_remove.append(match) + else: + to_rename.append(match) + + if to_rename or to_remove: + return to_rename, to_remove + return False diff --git a/lib/guessit/rules/properties/bonus.py b/lib/guessit/rules/properties/bonus.py new file mode 100644 index 00000000..54087aa3 --- /dev/null +++ b/lib/guessit/rules/properties/bonus.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +bonus property +""" +from rebulk.remodule import re + +from rebulk import Rebulk, AppendMatch, Rule + +from .title import TitleFromPosition +from ..common.formatters import cleanup +from ..common.pattern import is_disabled +from ..common.validators import seps_surround + + +def bonus(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'bonus')) + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE) + + rebulk.regex(r'x(\d+)', name='bonus', private_parent=True, children=True, formatter=int, + validator={'__parent__': seps_surround}, + validate_all=True, + conflict_solver=lambda match, conflicting: match + if conflicting.name in ('video_codec', 'episode') and 'weak-episode' not in conflicting.tags + else '__default__') + + rebulk.rules(BonusTitleRule) + + return rebulk + + +class BonusTitleRule(Rule): + """ + Find bonus title after bonus. + """ + dependency = TitleFromPosition + consequence = AppendMatch + + properties = {'bonus_title': [None]} + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + bonus_number = matches.named('bonus', lambda match: not match.private, index=0) + if bonus_number: + filepath = matches.markers.at_match(bonus_number, lambda marker: marker.name == 'path', 0) + hole = matches.holes(bonus_number.end, filepath.end + 1, formatter=cleanup, index=0) + if hole and hole.value: + hole.name = 'bonus_title' + return hole diff --git a/lib/guessit/rules/properties/cds.py b/lib/guessit/rules/properties/cds.py new file mode 100644 index 00000000..873df6fe --- /dev/null +++ b/lib/guessit/rules/properties/cds.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +cd and cd_count properties +""" +from rebulk.remodule import re + +from rebulk import Rebulk + +from ..common import dash +from ..common.pattern import is_disabled + + +def cds(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'cd')) + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]) + + rebulk.regex(r'cd-?(?P<cd>\d+)(?:-?of-?(?P<cd_count>\d+))?', + validator={'cd': lambda match: 0 < match.value < 100, + 'cd_count': lambda match: 0 < match.value < 100}, + formatter={'cd': int, 'cd_count': int}, + children=True, + private_parent=True, + properties={'cd': [None], 'cd_count': [None]}) + rebulk.regex(r'(?P<cd_count>\d+)-?cds?', + validator={'cd': lambda match: 0 < match.value < 100, + 'cd_count': lambda match: 0 < match.value < 100}, + formatter={'cd_count': int}, + children=True, + private_parent=True, + properties={'cd': [None], 'cd_count': [None]}) + + return rebulk diff --git a/lib/guessit/rules/properties/container.py b/lib/guessit/rules/properties/container.py new file mode 100644 index 00000000..0f1860af --- /dev/null +++ b/lib/guessit/rules/properties/container.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +container property +""" +from rebulk.remodule import re + +from rebulk import Rebulk + +from ..common import seps +from ..common.pattern import is_disabled +from ..common.validators import seps_surround +from ...reutils import build_or_pattern + + +def container(config): + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'container')) + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True) + rebulk.defaults(name='container', + formatter=lambda value: value.strip(seps), + tags=['extension'], + conflict_solver=lambda match, other: other + if other.name in ('source', 'video_codec') or + other.name == 'container' and 'extension' not in other.tags + else '__default__') + + subtitles = config['subtitles'] + info = config['info'] + videos = config['videos'] + torrent = config['torrent'] + nzb = config['nzb'] + + rebulk.regex(r'\.'+build_or_pattern(subtitles)+'$', exts=subtitles, tags=['extension', 'subtitle']) + rebulk.regex(r'\.'+build_or_pattern(info)+'$', exts=info, tags=['extension', 'info']) + rebulk.regex(r'\.'+build_or_pattern(videos)+'$', exts=videos, tags=['extension', 'video']) + rebulk.regex(r'\.'+build_or_pattern(torrent)+'$', exts=torrent, tags=['extension', 'torrent']) + rebulk.regex(r'\.'+build_or_pattern(nzb)+'$', exts=nzb, tags=['extension', 'nzb']) + + rebulk.defaults(clear=True, + name='container', + validator=seps_surround, + formatter=lambda s: s.lower(), + conflict_solver=lambda match, other: match + if other.name in ('source', + 'video_codec') or other.name == 'container' and 'extension' in other.tags + else '__default__') + + rebulk.string(*[sub for sub in subtitles if sub not in ('sub', 'ass')], tags=['subtitle']) + rebulk.string(*videos, tags=['video']) + rebulk.string(*torrent, tags=['torrent']) + rebulk.string(*nzb, tags=['nzb']) + + return rebulk diff --git a/lib/guessit/rules/properties/country.py b/lib/guessit/rules/properties/country.py new file mode 100644 index 00000000..172c2990 --- /dev/null +++ b/lib/guessit/rules/properties/country.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +country property +""" +# pylint: disable=no-member +import babelfish + +from rebulk import Rebulk +from ..common.pattern import is_disabled +from ..common.words import iter_words + + +def country(config, common_words): + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :param common_words: common words + :type common_words: set + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'country')) + rebulk = rebulk.defaults(name='country') + + def find_countries(string, context=None): + """ + Find countries in given string. + """ + allowed_countries = context.get('allowed_countries') if context else None + return CountryFinder(allowed_countries, common_words).find(string) + + rebulk.functional(find_countries, + #  Prefer language and any other property over country if not US or GB. + conflict_solver=lambda match, other: match + if other.name != 'language' or match.value not in (babelfish.Country('US'), + babelfish.Country('GB')) + else other, + properties={'country': [None]}, + disabled=lambda context: not context.get('allowed_countries')) + + babelfish.country_converters['guessit'] = GuessitCountryConverter(config['synonyms']) + + return rebulk + + +class GuessitCountryConverter(babelfish.CountryReverseConverter): # pylint: disable=missing-docstring + def __init__(self, synonyms): + self.guessit_exceptions = {} + + for alpha2, synlist in synonyms.items(): + for syn in synlist: + self.guessit_exceptions[syn.lower()] = alpha2 + + @property + def codes(self): # pylint: disable=missing-docstring + return (babelfish.country_converters['name'].codes | + frozenset(babelfish.COUNTRIES.values()) | + frozenset(self.guessit_exceptions.keys())) + + def convert(self, alpha2): + if alpha2 == 'GB': + return 'UK' + return str(babelfish.Country(alpha2)) + + def reverse(self, name): # pylint:disable=arguments-differ + # exceptions come first, as they need to override a potential match + # with any of the other guessers + try: + return self.guessit_exceptions[name.lower()] + except KeyError: + pass + + try: + return babelfish.Country(name.upper()).alpha2 + except ValueError: + pass + + for conv in [babelfish.Country.fromname]: + try: + return conv(name).alpha2 + except babelfish.CountryReverseError: + pass + + raise babelfish.CountryReverseError(name) + + +class CountryFinder(object): + """Helper class to search and return country matches.""" + + def __init__(self, allowed_countries, common_words): + self.allowed_countries = {l.lower() for l in allowed_countries or []} + self.common_words = common_words + + def find(self, string): + """Return all matches for country.""" + for word_match in iter_words(string.strip().lower()): + word = word_match.value + if word.lower() in self.common_words: + continue + + try: + country_object = babelfish.Country.fromguessit(word) + if (country_object.name.lower() in self.allowed_countries or + country_object.alpha2.lower() in self.allowed_countries): + yield self._to_rebulk_match(word_match, country_object) + except babelfish.Error: + continue + + @classmethod + def _to_rebulk_match(cls, word, value): + return word.span[0], word.span[1], {'value': value} diff --git a/lib/guessit/rules/properties/crc.py b/lib/guessit/rules/properties/crc.py new file mode 100644 index 00000000..eedee93d --- /dev/null +++ b/lib/guessit/rules/properties/crc.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +crc and uuid properties +""" +from rebulk.remodule import re + +from rebulk import Rebulk +from ..common.pattern import is_disabled +from ..common.validators import seps_surround + + +def crc(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'crc32')) + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE) + rebulk.defaults(validator=seps_surround) + + rebulk.regex('(?:[a-fA-F]|[0-9]){8}', name='crc32', + conflict_solver=lambda match, other: other + if other.name in ['episode', 'season'] + else '__default__') + + rebulk.functional(guess_idnumber, name='uuid', + conflict_solver=lambda match, other: match + if other.name in ['episode', 'season'] + else '__default__') + return rebulk + + +_DIGIT = 0 +_LETTER = 1 +_OTHER = 2 + +_idnum = re.compile(r'(?P<uuid>[a-zA-Z0-9-]{20,})') # 1.0, (0, 0)) + + +def guess_idnumber(string): + """ + Guess id number function + :param string: + :type string: + :return: + :rtype: + """ + # pylint:disable=invalid-name + ret = [] + + matches = list(_idnum.finditer(string)) + for match in matches: + result = match.groupdict() + switch_count = 0 + switch_letter_count = 0 + letter_count = 0 + last_letter = None + + last = _LETTER + for c in result['uuid']: + if c in '0123456789': + ci = _DIGIT + elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': + ci = _LETTER + if c != last_letter: + switch_letter_count += 1 + last_letter = c + letter_count += 1 + else: + ci = _OTHER + + if ci != last: + switch_count += 1 + + last = ci + + # only return the result as probable if we alternate often between + # char type (more likely for hash values than for common words) + switch_ratio = float(switch_count) / len(result['uuid']) + letters_ratio = (float(switch_letter_count) / letter_count) if letter_count > 0 else 1 + + if switch_ratio > 0.4 and letters_ratio > 0.4: + ret.append(match.span()) + + return ret diff --git a/lib/guessit/rules/properties/date.py b/lib/guessit/rules/properties/date.py new file mode 100644 index 00000000..e50cdfa3 --- /dev/null +++ b/lib/guessit/rules/properties/date.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +date and year properties +""" +from rebulk import Rebulk, RemoveMatch, Rule + +from ..common.date import search_date, valid_year +from ..common.pattern import is_disabled +from ..common.validators import seps_surround + + +def date(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().defaults(validator=seps_surround) + + rebulk.regex(r"\d{4}", name="year", formatter=int, + disabled=lambda context: is_disabled(context, 'year'), + conflict_solver=lambda match, other: other + if other.name in ('episode', 'season') and len(other.raw) < len(match.raw) + else '__default__', + validator=lambda match: seps_surround(match) and valid_year(match.value)) + + def date_functional(string, context): # pylint:disable=inconsistent-return-statements + """ + Search for date in the string and retrieves match + + :param string: + :return: + """ + + ret = search_date(string, context.get('date_year_first'), context.get('date_day_first')) + if ret: + return ret[0], ret[1], {'value': ret[2]} + + rebulk.functional(date_functional, name="date", properties={'date': [None]}, + disabled=lambda context: is_disabled(context, 'date'), + conflict_solver=lambda match, other: other + if other.name in ('episode', 'season', 'crc32') + else '__default__') + + rebulk.rules(KeepMarkedYearInFilepart) + + return rebulk + + +class KeepMarkedYearInFilepart(Rule): + """ + Keep first years marked with [](){} in filepart, or if no year is marked, ensure it won't override titles. + """ + priority = 64 + consequence = RemoveMatch + + def enabled(self, context): + return not is_disabled(context, 'year') + + def when(self, matches, context): + ret = [] + if len(matches.named('year')) > 1: + for filepart in matches.markers.named('path'): + years = matches.range(filepart.start, filepart.end, lambda match: match.name == 'year') + if len(years) > 1: + group_years = [] + ungroup_years = [] + for year in years: + if matches.markers.at_match(year, lambda marker: marker.name == 'group'): + group_years.append(year) + else: + ungroup_years.append(year) + if group_years and ungroup_years: + ret.extend(ungroup_years) + ret.extend(group_years[1:]) # Keep the first year in marker. + elif not group_years: + ret.append(ungroup_years[0]) # Keep first year for title. + if len(ungroup_years) > 2: + ret.extend(ungroup_years[2:]) + return ret diff --git a/lib/guessit/rules/properties/edition.py b/lib/guessit/rules/properties/edition.py new file mode 100644 index 00000000..822aa4ee --- /dev/null +++ b/lib/guessit/rules/properties/edition.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +edition property +""" +from rebulk.remodule import re + +from rebulk import Rebulk +from ..common import dash +from ..common.pattern import is_disabled +from ..common.validators import seps_surround + + +def edition(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'edition')) + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True) + rebulk.defaults(name='edition', validator=seps_surround) + + rebulk.regex('collector', "collector'?s?-edition", 'edition-collector', value='Collector') + rebulk.regex('special-edition', 'edition-special', value='Special', + conflict_solver=lambda match, other: other + if other.name == 'episode_details' and other.value == 'Special' + else '__default__') + rebulk.string('se', value='Special', tags='has-neighbor') + rebulk.string('ddc', value="Director's Definitive Cut") + rebulk.regex('criterion-edition', 'edition-criterion', 'CC', value='Criterion') + rebulk.regex('deluxe', 'deluxe-edition', 'edition-deluxe', value='Deluxe') + rebulk.regex('limited', 'limited-edition', value='Limited', tags=['has-neighbor', 'release-group-prefix']) + rebulk.regex(r'theatrical-cut', r'theatrical-edition', r'theatrical', value='Theatrical') + rebulk.regex(r"director'?s?-cut", r"director'?s?-cut-edition", r"edition-director'?s?-cut", 'DC', + value="Director's Cut") + rebulk.regex('extended', 'extended-?cut', 'extended-?version', + value='Extended', tags=['has-neighbor', 'release-group-prefix']) + rebulk.regex('alternat(e|ive)(?:-?Cut)?', value='Alternative Cut', tags=['has-neighbor', 'release-group-prefix']) + for value in ('Remastered', 'Uncensored', 'Uncut', 'Unrated'): + rebulk.string(value, value=value, tags=['has-neighbor', 'release-group-prefix']) + rebulk.string('Festival', value='Festival', tags=['has-neighbor-before', 'has-neighbor-after']) + rebulk.regex('imax', 'imax-edition', value='IMAX') + rebulk.regex('fan-edit(?:ion)?', 'fan-collection', value='Fan') + rebulk.regex('ultimate-edition', value='Ultimate') + rebulk.regex("ultimate-collector'?s?-edition", value=['Ultimate', 'Collector']) + rebulk.regex('ultimate-fan-edit(?:ion)?', 'ultimate-fan-collection', value=['Ultimate', 'Fan']) + + return rebulk diff --git a/lib/guessit/rules/properties/episode_title.py b/lib/guessit/rules/properties/episode_title.py new file mode 100644 index 00000000..ece8921d --- /dev/null +++ b/lib/guessit/rules/properties/episode_title.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Episode title +""" +from collections import defaultdict + +from rebulk import Rebulk, Rule, AppendMatch, RemoveMatch, RenameMatch, POST_PROCESS + +from ..common import seps, title_seps +from ..common.formatters import cleanup +from ..common.pattern import is_disabled +from ..common.validators import or_ +from ..properties.title import TitleFromPosition, TitleBaseRule +from ..properties.type import TypeProcessor + + +def episode_title(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + previous_names = ('episode', 'episode_count', + 'season', 'season_count', 'date', 'title', 'year') + + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'episode_title')) + rebulk = rebulk.rules(RemoveConflictsWithEpisodeTitle(previous_names), + EpisodeTitleFromPosition(previous_names), + AlternativeTitleReplace(previous_names), + TitleToEpisodeTitle, + Filepart3EpisodeTitle, + Filepart2EpisodeTitle, + RenameEpisodeTitleWhenMovieType) + return rebulk + + +class RemoveConflictsWithEpisodeTitle(Rule): + """ + Remove conflicting matches that might lead to wrong episode_title parsing. + """ + + priority = 64 + consequence = RemoveMatch + + def __init__(self, previous_names): + super(RemoveConflictsWithEpisodeTitle, self).__init__() + self.previous_names = previous_names + self.next_names = ('streaming_service', 'screen_size', 'source', + 'video_codec', 'audio_codec', 'other', 'container') + self.affected_if_holes_after = ('part', ) + self.affected_names = ('part', 'year') + + def when(self, matches, context): + to_remove = [] + for filepart in matches.markers.named('path'): + for match in matches.range(filepart.start, filepart.end, + predicate=lambda m: m.name in self.affected_names): + before = matches.range(filepart.start, match.start, predicate=lambda m: not m.private, index=-1) + if not before or before.name not in self.previous_names: + continue + + after = matches.range(match.end, filepart.end, predicate=lambda m: not m.private, index=0) + if not after or after.name not in self.next_names: + continue + + group = matches.markers.at_match(match, predicate=lambda m: m.name == 'group', index=0) + + def has_value_in_same_group(current_match, current_group=group): + """Return true if current match has value and belongs to the current group.""" + return current_match.value.strip(seps) and ( + current_group == matches.markers.at_match(current_match, + predicate=lambda mm: mm.name == 'group', index=0) + ) + + holes_before = matches.holes(before.end, match.start, predicate=has_value_in_same_group) + holes_after = matches.holes(match.end, after.start, predicate=has_value_in_same_group) + + if not holes_before and not holes_after: + continue + + if match.name in self.affected_if_holes_after and not holes_after: + continue + + to_remove.append(match) + if match.parent: + to_remove.append(match.parent) + + return to_remove + + +class TitleToEpisodeTitle(Rule): + """ + If multiple different title are found, convert the one following episode number to episode_title. + """ + dependency = TitleFromPosition + + def when(self, matches, context): + titles = matches.named('title') + title_groups = defaultdict(list) + for title in titles: + title_groups[title.value].append(title) + + episode_titles = [] + if len(title_groups) < 2: + return episode_titles + + for title in titles: + if matches.previous(title, lambda match: match.name == 'episode'): + episode_titles.append(title) + + return episode_titles + + def then(self, matches, when_response, context): + for title in when_response: + matches.remove(title) + title.name = 'episode_title' + matches.append(title) + + +class EpisodeTitleFromPosition(TitleBaseRule): + """ + Add episode title match in existing matches + Must run after TitleFromPosition rule. + """ + dependency = TitleToEpisodeTitle + + def __init__(self, previous_names): + super(EpisodeTitleFromPosition, self).__init__('episode_title', ['title']) + self.previous_names = previous_names + + def hole_filter(self, hole, matches): + episode = matches.previous(hole, + lambda previous: previous.named(*self.previous_names), + 0) + + crc32 = matches.named('crc32') + + return episode or crc32 + + def filepart_filter(self, filepart, matches): + # Filepart where title was found. + if matches.range(filepart.start, filepart.end, lambda match: match.name == 'title'): + return True + return False + + def should_remove(self, match, matches, filepart, hole, context): + if match.name == 'episode_details': + return False + return super(EpisodeTitleFromPosition, self).should_remove(match, matches, filepart, hole, context) + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + if matches.named('episode_title'): + return + return super(EpisodeTitleFromPosition, self).when(matches, context) + + +class AlternativeTitleReplace(Rule): + """ + If alternateTitle was found and title is next to episode, season or date, replace it with episode_title. + """ + dependency = EpisodeTitleFromPosition + consequence = RenameMatch + + def __init__(self, previous_names): + super(AlternativeTitleReplace, self).__init__() + self.previous_names = previous_names + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + if matches.named('episode_title'): + return + + alternative_title = matches.range(predicate=lambda match: match.name == 'alternative_title', index=0) + if alternative_title: + main_title = matches.chain_before(alternative_title.start, seps=seps, + predicate=lambda match: 'title' in match.tags, index=0) + if main_title: + episode = matches.previous(main_title, + lambda previous: previous.named(*self.previous_names), + 0) + + crc32 = matches.named('crc32') + + if episode or crc32: + return alternative_title + + def then(self, matches, when_response, context): + matches.remove(when_response) + when_response.name = 'episode_title' + when_response.tags.append('alternative-replaced') + matches.append(when_response) + + +class RenameEpisodeTitleWhenMovieType(Rule): + """ + Rename episode_title by alternative_title when type is movie. + """ + priority = POST_PROCESS + + dependency = TypeProcessor + consequence = RenameMatch + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + if matches.named('episode_title', lambda m: 'alternative-replaced' not in m.tags) \ + and not matches.named('type', lambda m: m.value == 'episode'): + return matches.named('episode_title') + + def then(self, matches, when_response, context): + for match in when_response: + matches.remove(match) + match.name = 'alternative_title' + matches.append(match) + + +class Filepart3EpisodeTitle(Rule): + """ + If we have at least 3 filepart structured like this: + + Serie name/SO1/E01-episode_title.mkv + AAAAAAAAAA/BBB/CCCCCCCCCCCCCCCCCCCC + + Serie name/SO1/episode_title-E01.mkv + AAAAAAAAAA/BBB/CCCCCCCCCCCCCCCCCCCC + + If CCCC contains episode and BBB contains seasonNumber + Then title is to be found in AAAA. + """ + consequence = AppendMatch('title') + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + if matches.tagged('filepart-title'): + return + + fileparts = matches.markers.named('path') + if len(fileparts) < 3: + return + + filename = fileparts[-1] + directory = fileparts[-2] + subdirectory = fileparts[-3] + + episode_number = matches.range(filename.start, filename.end, lambda match: match.name == 'episode', 0) + if episode_number: + season = matches.range(directory.start, directory.end, lambda match: match.name == 'season', 0) + + if season: + hole = matches.holes(subdirectory.start, subdirectory.end, + ignore=or_(lambda match: 'weak-episode' in match.tags, TitleBaseRule.is_ignored), + formatter=cleanup, seps=title_seps, predicate=lambda match: match.value, + index=0) + if hole: + return hole + + +class Filepart2EpisodeTitle(Rule): + """ + If we have at least 2 filepart structured like this: + + Serie name SO1/E01-episode_title.mkv + AAAAAAAAAAAAA/BBBBBBBBBBBBBBBBBBBBB + + If BBBB contains episode and AAA contains a hole followed by seasonNumber + then title is to be found in AAAA. + + or + + Serie name/SO1E01-episode_title.mkv + AAAAAAAAAA/BBBBBBBBBBBBBBBBBBBBB + + If BBBB contains season and episode and AAA contains a hole + then title is to be found in AAAA. + """ + consequence = AppendMatch('title') + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + if matches.tagged('filepart-title'): + return + + fileparts = matches.markers.named('path') + if len(fileparts) < 2: + return + + filename = fileparts[-1] + directory = fileparts[-2] + + episode_number = matches.range(filename.start, filename.end, lambda match: match.name == 'episode', 0) + if episode_number: + season = (matches.range(directory.start, directory.end, lambda match: match.name == 'season', 0) or + matches.range(filename.start, filename.end, lambda match: match.name == 'season', 0)) + if season: + hole = matches.holes(directory.start, directory.end, + ignore=or_(lambda match: 'weak-episode' in match.tags, TitleBaseRule.is_ignored), + formatter=cleanup, seps=title_seps, + predicate=lambda match: match.value, index=0) + if hole: + hole.tags.append('filepart-title') + return hole diff --git a/lib/guessit/rules/properties/episodes.py b/lib/guessit/rules/properties/episodes.py new file mode 100644 index 00000000..345c785d --- /dev/null +++ b/lib/guessit/rules/properties/episodes.py @@ -0,0 +1,912 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +episode, season, disc, episode_count, season_count and episode_details properties +""" +import copy +from collections import defaultdict + +from rebulk import Rebulk, RemoveMatch, Rule, AppendMatch, RenameMatch +from rebulk.match import Match +from rebulk.remodule import re +from rebulk.utils import is_iterable + +from guessit.rules import match_processors +from guessit.rules.common.numeral import parse_numeral, numeral +from .title import TitleFromPosition +from ..common import dash, alt_dash, seps, seps_no_fs +from ..common.formatters import strip +from ..common.pattern import is_disabled +from ..common.validators import seps_surround, int_coercable, and_ +from ...reutils import build_or_pattern + + +def episodes(config): + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + + # pylint: disable=too-many-branches,too-many-statements,too-many-locals + def is_season_episode_disabled(context): + """Whether season and episode rules should be enabled.""" + return is_disabled(context, 'episode') or is_disabled(context, 'season') + + def episodes_season_chain_breaker(matches): + """ + Break chains if there's more than 100 offset between two neighbor values. + :param matches: + :type matches: + :return: + :rtype: + """ + eps = matches.named('episode') + if len(eps) > 1 and abs(eps[-1].value - eps[-2].value) > episode_max_range: + return True + + seasons = matches.named('season') + if len(seasons) > 1 and abs(seasons[-1].value - seasons[-2].value) > season_max_range: + return True + return False + + def season_episode_conflict_solver(match, other): + """ + Conflict solver for episode/season patterns + + :param match: + :param other: + :return: + """ + if match.name != other.name: + if match.name == 'episode' and other.name == 'year': + return match + if match.name in ('season', 'episode'): + if other.name in ('video_codec', 'audio_codec', 'container', 'date'): + return match + if (other.name == 'audio_channels' and 'weak-audio_channels' not in other.tags + and not match.initiator.children.named(match.name + 'Marker')) or ( + other.name == 'screen_size' and not int_coercable(other.raw)): + return match + if other.name in ('season', 'episode') and match.initiator != other.initiator: + if (match.initiator.name in ('weak_episode', 'weak_duplicate') + and other.initiator.name in ('weak_episode', 'weak_duplicate')): + return '__default__' + for current in (match, other): + if 'weak-episode' in current.tags or 'x' in current.initiator.raw.lower(): + return current + return '__default__' + + def ordering_validator(match): + """ + Validator for season list. They should be in natural order to be validated. + + episode/season separated by a weak discrete separator should be consecutive, unless a strong discrete separator + or a range separator is present in the chain (1.3&5 is valid, but 1.3-5 is not valid and 1.3.5 is not valid) + """ + values = match.children.to_dict() + if 'season' in values and is_iterable(values['season']): + # Season numbers must be in natural order to be validated. + if not list(sorted(values['season'])) == values['season']: + return False + if 'episode' in values and is_iterable(values['episode']): + # Season numbers must be in natural order to be validated. + if not list(sorted(values['episode'])) == values['episode']: + return False + + def is_consecutive(property_name): + """ + Check if the property season or episode has valid consecutive values. + :param property_name: + :type property_name: + :return: + :rtype: + """ + previous_match = None + valid = True + for current_match in match.children.named(property_name): + if previous_match: + match.children.previous(current_match, + lambda m: m.name == property_name + 'Separator') + separator = match.children.previous(current_match, + lambda m: m.name == property_name + 'Separator', 0) + if separator: + if separator.raw not in range_separators and separator.raw in weak_discrete_separators: + if not 0 < current_match.value - previous_match.value <= max_range_gap + 1: + valid = False + if separator.raw in strong_discrete_separators: + valid = True + break + previous_match = current_match + return valid + + return is_consecutive('episode') and is_consecutive('season') + + def validate_roman(match): + """ + Validate a roman match if surrounded by separators + :param match: + :type match: + :return: + :rtype: + """ + if int_coercable(match.raw): + return True + return seps_surround(match) + + season_words = config['season_words'] + episode_words = config['episode_words'] + of_words = config['of_words'] + all_words = config['all_words'] + season_markers = config['season_markers'] + season_ep_markers = config['season_ep_markers'] + disc_markers = config['disc_markers'] + episode_markers = config['episode_markers'] + range_separators = config['range_separators'] + weak_discrete_separators = list(sep for sep in seps_no_fs if sep not in range_separators) + strong_discrete_separators = config['discrete_separators'] + discrete_separators = strong_discrete_separators + weak_discrete_separators + episode_max_range = config['episode_max_range'] + season_max_range = config['season_max_range'] + max_range_gap = config['max_range_gap'] + + rebulk = Rebulk() \ + .regex_defaults(flags=re.IGNORECASE) \ + .string_defaults(ignore_case=True) \ + .chain_defaults(chain_breaker=episodes_season_chain_breaker) \ + .defaults(private_names=['episodeSeparator', 'seasonSeparator', 'episodeMarker', 'seasonMarker'], + formatter={'season': int, 'episode': int, 'version': int, 'count': int}, + children=True, + private_parent=True, + conflict_solver=season_episode_conflict_solver, + abbreviations=[alt_dash]) + + # S01E02, 01x02, S01S02S03 + rebulk.chain( + tags=['SxxExx'], + validate_all=True, + validator={'__parent__': and_(seps_surround, ordering_validator)}, + disabled=is_season_episode_disabled) \ + .defaults(tags=['SxxExx']) \ + .regex(build_or_pattern(season_markers, name='seasonMarker') + r'(?P<season>\d+)@?' + + build_or_pattern(episode_markers + disc_markers, name='episodeMarker') + r'@?(?P<episode>\d+)')\ + .repeater('+') \ + .regex(build_or_pattern(episode_markers + disc_markers + discrete_separators + range_separators, + name='episodeSeparator', + escape=True) + + r'(?P<episode>\d+)').repeater('*') + + rebulk.chain(tags=['SxxExx'], + validate_all=True, + validator={'__parent__': and_(seps_surround, ordering_validator)}, + disabled=is_season_episode_disabled) \ + .defaults(tags=['SxxExx']) \ + .regex(r'(?P<season>\d+)@?' + + build_or_pattern(season_ep_markers, name='episodeMarker') + + r'@?(?P<episode>\d+)').repeater('+') \ + + rebulk.chain(tags=['SxxExx'], + validate_all=True, + validator={'__parent__': and_(seps_surround, ordering_validator)}, + disabled=is_season_episode_disabled) \ + .defaults(tags=['SxxExx']) \ + .regex(r'(?P<season>\d+)@?' + + build_or_pattern(season_ep_markers, name='episodeMarker') + + r'@?(?P<episode>\d+)') \ + .regex(build_or_pattern(season_ep_markers + discrete_separators + range_separators, + name='episodeSeparator', + escape=True) + + r'(?P<episode>\d+)').repeater('*') + + rebulk.chain(tags=['SxxExx'], + validate_all=True, + validator={'__parent__': and_(seps_surround, ordering_validator)}, + disabled=is_season_episode_disabled) \ + .defaults(tags=['SxxExx']) \ + .regex(build_or_pattern(season_markers, name='seasonMarker') + r'(?P<season>\d+)') \ + .regex('(?P<other>Extras)', name='other', value='Extras', tags=['no-release-group-prefix']).repeater('?') \ + .regex(build_or_pattern(season_markers + discrete_separators + range_separators, + name='seasonSeparator', + escape=True) + + r'(?P<season>\d+)').repeater('*') + + # episode_details property + for episode_detail in ('Special', 'Pilot', 'Unaired', 'Final'): + rebulk.string(episode_detail, + private_parent=False, + children=False, + value=episode_detail, + name='episode_details', + disabled=lambda context: is_disabled(context, 'episode_details')) + + rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator', 'episodeMarker', 'seasonMarker'], + validate_all=True, + validator={'__parent__': and_(seps_surround, ordering_validator)}, + children=True, + private_parent=True, + conflict_solver=season_episode_conflict_solver) + + rebulk.chain(validate_all=True, + conflict_solver=season_episode_conflict_solver, + formatter={'season': parse_numeral, 'count': parse_numeral}, + validator={'__parent__': and_(seps_surround, ordering_validator), + 'season': validate_roman, + 'count': validate_roman}, + disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'season')) \ + .defaults(formatter={'season': parse_numeral, 'count': parse_numeral}, + validator={'season': validate_roman, 'count': validate_roman}, + conflict_solver=season_episode_conflict_solver) \ + .regex(build_or_pattern(season_words, name='seasonMarker') + '@?(?P<season>' + numeral + ')') \ + .regex(r'' + build_or_pattern(of_words) + '@?(?P<count>' + numeral + ')').repeater('?') \ + .regex(r'@?' + build_or_pattern(range_separators + discrete_separators + ['@'], + name='seasonSeparator', escape=True) + + r'@?(?P<season>\d+)').repeater('*') + + rebulk.defaults(abbreviations=[dash]) + + rebulk.regex(build_or_pattern(episode_words, name='episodeMarker') + r'-?(?P<episode>\d+)' + + r'(?:v(?P<version>\d+))?' + + r'(?:-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+))?', # Episode 4 + disabled=lambda context: context.get('type') == 'episode' or is_disabled(context, 'episode')) + + rebulk.regex(build_or_pattern(episode_words, name='episodeMarker') + r'-?(?P<episode>' + numeral + ')' + + r'(?:v(?P<version>\d+))?' + + r'(?:-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+))?', # Episode 4 + validator={'episode': validate_roman}, + formatter={'episode': parse_numeral}, + disabled=lambda context: context.get('type') != 'episode' or is_disabled(context, 'episode')) + + rebulk.regex(r'S?(?P<season>\d+)-?(?:xE|Ex|E|x)-?(?P<other>' + build_or_pattern(all_words) + ')', + tags=['SxxExx'], + formatter={'other': lambda match: 'Complete'}, + disabled=lambda context: is_disabled(context, 'season')) + + # 12, 13 + rebulk.chain(tags=['weak-episode'], + disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \ + .defaults(validator=None, tags=['weak-episode']) \ + .regex(r'(?P<episode>\d{2})') \ + .regex(r'v(?P<version>\d+)').repeater('?') \ + .regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{2})', abbreviations=None).repeater('*') + + # 012, 013 + rebulk.chain(tags=['weak-episode'], + disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \ + .defaults(validator=None, tags=['weak-episode']) \ + .regex(r'0(?P<episode>\d{1,2})') \ + .regex(r'v(?P<version>\d+)').repeater('?') \ + .regex(r'(?P<episodeSeparator>[x-])0(?P<episode>\d{1,2})', abbreviations=None).repeater('*') + + # 112, 113 + rebulk.chain(tags=['weak-episode'], + name='weak_episode', + disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \ + .defaults(validator=None, tags=['weak-episode'], name='weak_episode') \ + .regex(r'(?P<episode>\d{3,4})') \ + .regex(r'v(?P<version>\d+)').repeater('?') \ + .regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{3,4})', abbreviations=None).repeater('*') + + # 1, 2, 3 + rebulk.chain(tags=['weak-episode'], + disabled=lambda context: context.get('type') != 'episode' or is_disabled(context, 'episode')) \ + .defaults(validator=None, tags=['weak-episode']) \ + .regex(r'(?P<episode>\d)') \ + .regex(r'v(?P<version>\d+)').repeater('?') \ + .regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{1,2})', abbreviations=None).repeater('*') + + # e112, e113, 1e18, 3e19 + rebulk.chain(disabled=lambda context: is_disabled(context, 'episode')) \ + .defaults(validator=None) \ + .regex(r'(?P<season>\d{1,2})?(?P<episodeMarker>e)(?P<episode>\d{1,4})') \ + .regex(r'v(?P<version>\d+)').repeater('?') \ + .regex(r'(?P<episodeSeparator>e|x|-)(?P<episode>\d{1,4})', abbreviations=None).repeater('*') + + # ep 112, ep113, ep112, ep113 + rebulk.chain(disabled=lambda context: is_disabled(context, 'episode')) \ + .defaults(validator=None) \ + .regex(r'ep-?(?P<episode>\d{1,4})') \ + .regex(r'v(?P<version>\d+)').repeater('?') \ + .regex(r'(?P<episodeSeparator>ep|e|x|-)(?P<episode>\d{1,4})', abbreviations=None).repeater('*') + + # cap 112, cap 112_114 + rebulk.chain(tags=['see-pattern'], + disabled=is_season_episode_disabled) \ + .defaults(validator=None, tags=['see-pattern']) \ + .regex(r'(?P<seasonMarker>cap)-?(?P<season>\d{1,2})(?P<episode>\d{2})') \ + .regex(r'(?P<episodeSeparator>-)(?P<season>\d{1,2})(?P<episode>\d{2})').repeater('?') + + # 102, 0102 + rebulk.chain(tags=['weak-episode', 'weak-duplicate'], + name='weak_duplicate', + conflict_solver=season_episode_conflict_solver, + disabled=lambda context: (context.get('episode_prefer_number', False) or + context.get('type') == 'movie') or is_season_episode_disabled(context)) \ + .defaults(tags=['weak-episode', 'weak-duplicate'], + name='weak_duplicate', + validator=None, + conflict_solver=season_episode_conflict_solver) \ + .regex(r'(?P<season>\d{1,2})(?P<episode>\d{2})') \ + .regex(r'v(?P<version>\d+)').repeater('?') \ + .regex(r'(?P<episodeSeparator>x|-)(?P<episode>\d{2})', abbreviations=None).repeater('*') + + rebulk.regex(r'v(?P<version>\d+)', + formatter=int, + disabled=lambda context: is_disabled(context, 'version')) + + rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator']) + + # TODO: List of words + # detached of X count (season/episode) + rebulk.regex(r'(?P<episode>\d+)-?' + build_or_pattern(of_words) + + r'-?(?P<count>\d+)-?' + build_or_pattern(episode_words) + '?', + formatter=int, + pre_match_processor=match_processors.strip, + disabled=lambda context: is_disabled(context, 'episode')) + + rebulk.regex(r'Minisodes?', + children=False, + private_parent=False, + name='episode_format', + value="Minisode", + disabled=lambda context: is_disabled(context, 'episode_format')) + + rebulk.rules(WeakConflictSolver, RemoveInvalidSeason, RemoveInvalidEpisode, + SeePatternRange(range_separators + ['_']), + EpisodeNumberSeparatorRange(range_separators), + SeasonSeparatorRange(range_separators), RemoveWeakIfMovie, RemoveWeakIfSxxExx, RemoveWeakDuplicate, + EpisodeDetailValidator, RemoveDetachedEpisodeNumber, VersionValidator, RemoveWeak(episode_words), + RenameToAbsoluteEpisode, CountValidator, EpisodeSingleDigitValidator, RenameToDiscMatch) + + return rebulk + + +class WeakConflictSolver(Rule): + """ + Rule to decide whether weak-episode or weak-duplicate matches should be kept. + + If an anime is detected: + - weak-duplicate matches should be removed + - weak-episode matches should be tagged as anime + Otherwise: + - weak-episode matches are removed unless they're part of an episode range match. + """ + priority = 128 + consequence = [RemoveMatch, AppendMatch] + + def enabled(self, context): + return context.get('type') != 'movie' + + @classmethod + def is_anime(cls, matches): + """Return True if it seems to be an anime. + + Anime characteristics: + - version, crc32 matches + - screen_size inside brackets + - release_group at start and inside brackets + """ + if matches.named('version') or matches.named('crc32'): + return True + + for group in matches.markers.named('group'): + if matches.range(group.start, group.end, predicate=lambda m: m.name == 'screen_size'): + return True + if matches.markers.starting(group.start, predicate=lambda m: m.name == 'path'): + hole = matches.holes(group.start, group.end, index=0) + if hole and hole.raw == group.raw: + return True + + return False + + def when(self, matches, context): + to_remove = [] + to_append = [] + anime_detected = self.is_anime(matches) + for filepart in matches.markers.named('path'): + weak_matches = matches.range(filepart.start, filepart.end, predicate=( + lambda m: m.initiator.name == 'weak_episode')) + weak_dup_matches = matches.range(filepart.start, filepart.end, predicate=( + lambda m: m.initiator.name == 'weak_duplicate')) + if anime_detected: + if weak_matches: + to_remove.extend(weak_dup_matches) + for match in matches.range(filepart.start, filepart.end, predicate=( + lambda m: m.name == 'episode' and m.initiator.name != 'weak_duplicate')): + episode = copy.copy(match) + episode.tags = episode.tags + ['anime'] + to_append.append(episode) + to_remove.append(match) + elif weak_dup_matches: + episodes_in_range = matches.range(filepart.start, filepart.end, predicate=( + lambda m: + m.name == 'episode' and m.initiator.name == 'weak_episode' + and m.initiator.children.named('episodeSeparator') + )) + if not episodes_in_range and not matches.range(filepart.start, filepart.end, + predicate=lambda m: 'SxxExx' in m.tags): + to_remove.extend(weak_matches) + else: + for match in episodes_in_range: + episode = copy.copy(match) + episode.tags = [] + to_append.append(episode) + to_remove.append(match) + + if to_append: + to_remove.extend(weak_dup_matches) + + if to_remove or to_append: + return to_remove, to_append + return False + + +class CountValidator(Rule): + """ + Validate count property and rename it + """ + priority = 64 + consequence = [RemoveMatch, RenameMatch('episode_count'), RenameMatch('season_count')] + + properties = {'episode_count': [None], 'season_count': [None]} + + def when(self, matches, context): + to_remove = [] + episode_count = [] + season_count = [] + + for count in matches.named('count'): + previous = matches.previous(count, lambda match: match.name in ['episode', 'season'], 0) + if previous: + if previous.name == 'episode': + episode_count.append(count) + elif previous.name == 'season': + season_count.append(count) + else: + to_remove.append(count) + if to_remove or episode_count or season_count: + return to_remove, episode_count, season_count + return False + + +class SeePatternRange(Rule): + """ + Create matches for episode range for SEE pattern. E.g.: Cap.102_104 + """ + priority = 128 + consequence = [RemoveMatch, AppendMatch] + + def __init__(self, range_separators): + super(SeePatternRange, self).__init__() + self.range_separators = range_separators + + def when(self, matches, context): + to_remove = [] + to_append = [] + + for separator in matches.tagged('see-pattern', lambda m: m.name == 'episodeSeparator'): + previous_match = matches.previous(separator, lambda m: m.name == 'episode' and 'see-pattern' in m.tags, 0) + next_match = matches.next(separator, lambda m: m.name == 'season' and 'see-pattern' in m.tags, 0) + if not next_match: + continue + + next_match = matches.next(next_match, lambda m: m.name == 'episode' and 'see-pattern' in m.tags, 0) + if previous_match and next_match and separator.value in self.range_separators: + to_remove.append(next_match) + + for episode_number in range(previous_match.value + 1, next_match.value + 1): + match = copy.copy(next_match) + match.value = episode_number + to_append.append(match) + + to_remove.append(separator) + + if to_remove or to_append: + return to_remove, to_append + return False + + +class AbstractSeparatorRange(Rule): + """ + Remove separator matches and create matches for season range. + """ + priority = 128 + consequence = [RemoveMatch, AppendMatch] + + def __init__(self, range_separators, property_name): + super(AbstractSeparatorRange, self).__init__() + self.range_separators = range_separators + self.property_name = property_name + + def when(self, matches, context): + to_remove = [] + to_append = [] + + for separator in matches.named(self.property_name + 'Separator'): + previous_match = matches.previous(separator, lambda m: m.name == self.property_name, 0) + next_match = matches.next(separator, lambda m: m.name == self.property_name, 0) + initiator = separator.initiator + + if previous_match and next_match and separator.value in self.range_separators: + to_remove.append(next_match) + for episode_number in range(previous_match.value + 1, next_match.value): + match = copy.copy(next_match) + match.value = episode_number + initiator.children.append(match) + to_append.append(match) + to_append.append(next_match) + to_remove.append(separator) + + previous_match = None + for next_match in matches.named(self.property_name): + if previous_match: + separator = matches.input_string[previous_match.initiator.end:next_match.initiator.start] + if separator not in self.range_separators: + separator = strip(separator) + if separator in self.range_separators: + initiator = previous_match.initiator + for episode_number in range(previous_match.value + 1, next_match.value): + match = copy.copy(next_match) + match.value = episode_number + initiator.children.append(match) + to_append.append(match) + to_append.append(Match(previous_match.end, next_match.start - 1, + name=self.property_name + 'Separator', + private=True, + input_string=matches.input_string)) + to_remove.append(next_match) # Remove and append match to support proper ordering + to_append.append(next_match) + + previous_match = next_match + + if to_remove or to_append: + return to_remove, to_append + return False + + +class RenameToAbsoluteEpisode(Rule): + """ + Rename episode to absolute_episodes. + + Absolute episodes are only used if two groups of episodes are detected: + S02E04-06 25-27 + 25-27 S02E04-06 + 2x04-06 25-27 + 28. Anime Name S02E05 + The matches in the group with higher episode values are renamed to absolute_episode. + """ + + consequence = RenameMatch('absolute_episode') + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + initiators = {match.initiator for match in matches.named('episode') + if len(match.initiator.children.named('episode')) > 1} + if len(initiators) != 2: + ret = [] + for filepart in matches.markers.named('path'): + if matches.range(filepart.start + 1, filepart.end, predicate=lambda m: m.name == 'episode'): + ret.extend( + matches.starting(filepart.start, predicate=lambda m: m.initiator.name == 'weak_episode')) + return ret + + initiators = sorted(initiators, key=lambda item: item.end) + if not matches.holes(initiators[0].end, initiators[1].start, predicate=lambda m: m.raw.strip(seps)): + first_range = matches.named('episode', predicate=lambda m: m.initiator == initiators[0]) + second_range = matches.named('episode', predicate=lambda m: m.initiator == initiators[1]) + if len(first_range) == len(second_range): + if second_range[0].value > first_range[0].value: + return second_range + if first_range[0].value > second_range[0].value: + return first_range + + +class EpisodeNumberSeparatorRange(AbstractSeparatorRange): + """ + Remove separator matches and create matches for episoderNumber range. + """ + + def __init__(self, range_separators): + super(EpisodeNumberSeparatorRange, self).__init__(range_separators, "episode") + + +class SeasonSeparatorRange(AbstractSeparatorRange): + """ + Remove separator matches and create matches for season range. + """ + + def __init__(self, range_separators): + super(SeasonSeparatorRange, self).__init__(range_separators, "season") + + +class RemoveWeakIfMovie(Rule): + """ + Remove weak-episode tagged matches if it seems to be a movie. + """ + priority = 64 + consequence = RemoveMatch + + def enabled(self, context): + return context.get('type') != 'episode' + + def when(self, matches, context): + to_remove = [] + to_ignore = set() + remove = False + for filepart in matches.markers.named('path'): + year = matches.range(filepart.start, filepart.end, predicate=lambda m: m.name == 'year', index=0) + if year: + remove = True + next_match = matches.range(year.end, filepart.end, predicate=lambda m: m.private, index=0) + if (next_match and not matches.holes(year.end, next_match.start, predicate=lambda m: m.raw.strip(seps)) + and not matches.at_match(next_match, predicate=lambda m: m.name == 'year')): + to_ignore.add(next_match.initiator) + + to_ignore.update(matches.range(filepart.start, filepart.end, + predicate=lambda m: len(m.children.named('episode')) > 1)) + + to_remove.extend(matches.conflicting(year)) + if remove: + to_remove.extend(matches.tagged('weak-episode', predicate=( + lambda m: m.initiator not in to_ignore and 'anime' not in m.tags))) + + return to_remove + + +class RemoveWeak(Rule): + """ + Remove weak-episode matches which appears after video, source, and audio matches. + """ + priority = 16 + consequence = RemoveMatch, AppendMatch + + def __init__(self, episode_words): + super(RemoveWeak, self).__init__() + self.episode_words = episode_words + + def when(self, matches, context): + to_remove = [] + to_append = [] + for filepart in matches.markers.named('path'): + weaks = matches.range(filepart.start, filepart.end, predicate=lambda m: 'weak-episode' in m.tags) + if weaks: + weak = weaks[0] + previous = matches.previous(weak, predicate=lambda m: m.name in ( + 'audio_codec', 'screen_size', 'streaming_service', 'source', 'video_profile', + 'audio_channels', 'audio_profile'), index=0) + if previous and not matches.holes( + previous.end, weak.start, predicate=lambda m: m.raw.strip(seps)): + if previous.raw.lower() in self.episode_words: + try: + episode = copy.copy(weak) + episode.name = 'episode' + episode.value = int(weak.value) + episode.start = previous.start + episode.private = False + episode.tags = [] + + to_append.append(episode) + except ValueError: + pass + + to_remove.extend(weaks) + if to_remove or to_append: + return to_remove, to_append + return False + + +class RemoveWeakIfSxxExx(Rule): + """ + Remove weak-episode tagged matches if SxxExx pattern is matched. + + Weak episodes at beginning of filepart are kept. + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for filepart in matches.markers.named('path'): + if matches.range(filepart.start, filepart.end, + predicate=lambda m: not m.private and 'SxxExx' in m.tags): + for match in matches.range(filepart.start, filepart.end, predicate=lambda m: 'weak-episode' in m.tags): + if match.start != filepart.start or match.initiator.name != 'weak_episode': + to_remove.append(match) + return to_remove + + +class RemoveInvalidSeason(Rule): + """ + Remove invalid season matches. + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for filepart in matches.markers.named('path'): + strong_season = matches.range(filepart.start, filepart.end, index=0, + predicate=lambda m: m.name == 'season' + and not m.private and 'SxxExx' in m.tags) + if strong_season: + if strong_season.initiator.children.named('episode'): + for season in matches.range(strong_season.end, filepart.end, + predicate=lambda m: m.name == 'season' and not m.private): + # remove weak season or seasons without episode matches + if 'SxxExx' not in season.tags or not season.initiator.children.named('episode'): + if season.initiator: + to_remove.append(season.initiator) + to_remove.extend(season.initiator.children) + else: + to_remove.append(season) + + return to_remove + + +class RemoveInvalidEpisode(Rule): + """ + Remove invalid episode matches. + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for filepart in matches.markers.named('path'): + strong_episode = matches.range(filepart.start, filepart.end, index=0, + predicate=lambda m: m.name == 'episode' + and not m.private and 'SxxExx' in m.tags) + if strong_episode: + strong_ep_marker = RemoveInvalidEpisode.get_episode_prefix(matches, strong_episode) + for episode in matches.range(strong_episode.end, filepart.end, + predicate=lambda m: m.name == 'episode' and not m.private): + ep_marker = RemoveInvalidEpisode.get_episode_prefix(matches, episode) + if strong_ep_marker and ep_marker and strong_ep_marker.value.lower() != ep_marker.value.lower(): + if episode.initiator: + to_remove.append(episode.initiator) + to_remove.extend(episode.initiator.children) + else: + to_remove.append(ep_marker) + to_remove.append(episode) + + return to_remove + + @staticmethod + def get_episode_prefix(matches, episode): + """ + Return episode prefix: episodeMarker or episodeSeparator + """ + return matches.previous(episode, index=0, + predicate=lambda m: m.name in ('episodeMarker', 'episodeSeparator')) + + +class RemoveWeakDuplicate(Rule): + """ + Remove weak-duplicate tagged matches if duplicate patterns, for example The 100.109 + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for filepart in matches.markers.named('path'): + patterns = defaultdict(list) + for match in reversed(matches.range(filepart.start, filepart.end, + predicate=lambda m: 'weak-duplicate' in m.tags)): + if match.pattern in patterns[match.name]: + to_remove.append(match) + else: + patterns[match.name].append(match.pattern) + return to_remove + + +class EpisodeDetailValidator(Rule): + """ + Validate episode_details if they are detached or next to season or episode. + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for detail in matches.named('episode_details'): + if not seps_surround(detail) \ + and not matches.previous(detail, lambda match: match.name in ['season', 'episode']) \ + and not matches.next(detail, lambda match: match.name in ['season', 'episode']): + ret.append(detail) + return ret + + +class RemoveDetachedEpisodeNumber(Rule): + """ + If multiple episode are found, remove those that are not detached from a range and less than 10. + + Fairy Tail 2 - 16-20, 2 should be removed. + """ + priority = 64 + consequence = RemoveMatch + dependency = [RemoveWeakIfSxxExx, RemoveWeakDuplicate] + + def when(self, matches, context): + ret = [] + + episode_numbers = [] + episode_values = set() + for match in matches.named('episode', lambda m: not m.private and 'weak-episode' in m.tags): + if match.value not in episode_values: + episode_numbers.append(match) + episode_values.add(match.value) + + episode_numbers = list(sorted(episode_numbers, key=lambda m: m.value)) + if len(episode_numbers) > 1 and \ + episode_numbers[0].value < 10 and \ + episode_numbers[1].value - episode_numbers[0].value != 1: + parent = episode_numbers[0] + while parent: # TODO: Add a feature in rebulk to avoid this ... + ret.append(parent) + parent = parent.parent + return ret + + +class VersionValidator(Rule): + """ + Validate version if previous match is episode or if surrounded by separators. + """ + priority = 64 + dependency = [RemoveWeakIfMovie, RemoveWeakIfSxxExx] + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for version in matches.named('version'): + episode_number = matches.previous(version, lambda match: match.name == 'episode', 0) + if not episode_number and not seps_surround(version.initiator): + ret.append(version) + return ret + + +class EpisodeSingleDigitValidator(Rule): + """ + Remove single digit episode when inside a group that doesn't own title. + """ + dependency = [TitleFromPosition] + + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for episode in matches.named('episode', lambda match: len(match.initiator) == 1): + group = matches.markers.at_match(episode, lambda marker: marker.name == 'group', index=0) + if group: + if not matches.range(*group.span, predicate=lambda match: match.name == 'title'): + ret.append(episode) + return ret + + +class RenameToDiscMatch(Rule): + """ + Rename episodes detected with `d` episodeMarkers to `disc`. + """ + + consequence = [RenameMatch('disc'), RenameMatch('discMarker'), RemoveMatch] + + def when(self, matches, context): + discs = [] + markers = [] + to_remove = [] + + disc_disabled = is_disabled(context, 'disc') + + for marker in matches.named('episodeMarker', predicate=lambda m: m.value.lower() == 'd'): + if disc_disabled: + to_remove.append(marker) + to_remove.extend(marker.initiator.children) + continue + + markers.append(marker) + discs.extend(sorted(marker.initiator.children.named('episode'), key=lambda m: m.value)) + + if discs or markers or to_remove: + return discs, markers, to_remove + return False diff --git a/lib/guessit/rules/properties/film.py b/lib/guessit/rules/properties/film.py new file mode 100644 index 00000000..3c7e6c0f --- /dev/null +++ b/lib/guessit/rules/properties/film.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +film property +""" +from rebulk import Rebulk, AppendMatch, Rule +from rebulk.remodule import re + +from ..common.formatters import cleanup +from ..common.pattern import is_disabled +from ..common.validators import seps_surround + + +def film(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, validate_all=True, validator={'__parent__': seps_surround}) + + rebulk.regex(r'f(\d{1,2})', name='film', private_parent=True, children=True, formatter=int, + disabled=lambda context: is_disabled(context, 'film')) + + rebulk.rules(FilmTitleRule) + + return rebulk + + +class FilmTitleRule(Rule): + """ + Rule to find out film_title (hole after film property + """ + consequence = AppendMatch + + properties = {'film_title': [None]} + + def enabled(self, context): + return not is_disabled(context, 'film_title') + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + bonus_number = matches.named('film', lambda match: not match.private, index=0) + if bonus_number: + filepath = matches.markers.at_match(bonus_number, lambda marker: marker.name == 'path', 0) + hole = matches.holes(filepath.start, bonus_number.start + 1, formatter=cleanup, index=0) + if hole and hole.value: + hole.name = 'film_title' + return hole diff --git a/lib/guessit/rules/properties/language.py b/lib/guessit/rules/properties/language.py new file mode 100644 index 00000000..3f83bc34 --- /dev/null +++ b/lib/guessit/rules/properties/language.py @@ -0,0 +1,510 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +language and subtitle_language properties +""" +# pylint: disable=no-member +import copy +from collections import defaultdict, namedtuple + +import babelfish +from rebulk import Rebulk, Rule, RemoveMatch, RenameMatch +from rebulk.remodule import re + +from ..common import seps +from ..common.pattern import is_disabled +from ..common.words import iter_words +from ..common.validators import seps_surround + + +def language(config, common_words): + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :param common_words: common words + :type common_words: set + :return: Created Rebulk object + :rtype: Rebulk + """ + subtitle_both = config['subtitle_affixes'] + subtitle_prefixes = sorted(subtitle_both + config['subtitle_prefixes'], key=length_comparator) + subtitle_suffixes = sorted(subtitle_both + config['subtitle_suffixes'], key=length_comparator) + lang_both = config['language_affixes'] + lang_prefixes = sorted(lang_both + config['language_prefixes'], key=length_comparator) + lang_suffixes = sorted(lang_both + config['language_suffixes'], key=length_comparator) + weak_affixes = frozenset(config['weak_affixes']) + + rebulk = Rebulk(disabled=lambda context: (is_disabled(context, 'language') and + is_disabled(context, 'subtitle_language'))) + + rebulk.string(*subtitle_prefixes, name="subtitle_language.prefix", ignore_case=True, private=True, + validator=seps_surround, tags=['release-group-prefix'], + disabled=lambda context: is_disabled(context, 'subtitle_language')) + rebulk.string(*subtitle_suffixes, name="subtitle_language.suffix", ignore_case=True, private=True, + validator=seps_surround, + disabled=lambda context: is_disabled(context, 'subtitle_language')) + rebulk.string(*lang_suffixes, name="language.suffix", ignore_case=True, private=True, + validator=seps_surround, tags=['source-suffix'], + disabled=lambda context: is_disabled(context, 'language')) + + def find_languages(string, context=None): + """Find languages in the string + + :return: list of tuple (property, Language, lang_word, word) + """ + return LanguageFinder(context, subtitle_prefixes, subtitle_suffixes, + lang_prefixes, lang_suffixes, weak_affixes).find(string) + + rebulk.functional(find_languages, + properties={'language': [None]}, + disabled=lambda context: not context.get('allowed_languages')) + rebulk.rules(SubtitleExtensionRule, + SubtitlePrefixLanguageRule, + SubtitleSuffixLanguageRule, + RemoveLanguage, + RemoveInvalidLanguages(common_words)) + + babelfish.language_converters['guessit'] = GuessitConverter(config['synonyms']) + + return rebulk + + +UNDETERMINED = babelfish.Language('und') +MULTIPLE = babelfish.Language('mul') +NON_SPECIFIC_LANGUAGES = frozenset([UNDETERMINED, MULTIPLE]) + + +class GuessitConverter(babelfish.LanguageReverseConverter): # pylint: disable=missing-docstring + _with_country_regexp = re.compile(r'(.*)\((.*)\)') + _with_country_regexp2 = re.compile(r'(.*)-(.*)') + + def __init__(self, synonyms): + self.guessit_exceptions = {} + for code, synlist in synonyms.items(): + if '_' in code: + (alpha3, country) = code.split('_') + else: + (alpha3, country) = (code, None) + for syn in synlist: + self.guessit_exceptions[syn.lower()] = (alpha3, country, None) + + @property + def codes(self): # pylint: disable=missing-docstring + return (babelfish.language_converters['alpha3b'].codes | + babelfish.language_converters['alpha2'].codes | + babelfish.language_converters['name'].codes | + babelfish.language_converters['opensubtitles'].codes | + babelfish.country_converters['name'].codes | + frozenset(self.guessit_exceptions.keys())) + + def convert(self, alpha3, country=None, script=None): + return str(babelfish.Language(alpha3, country, script)) + + def reverse(self, name): # pylint:disable=arguments-differ + name = name.lower() + # exceptions come first, as they need to override a potential match + # with any of the other guessers + try: + return self.guessit_exceptions[name] + except KeyError: + pass + + for conv in [babelfish.Language, + babelfish.Language.fromalpha3b, + babelfish.Language.fromalpha2, + babelfish.Language.fromname, + babelfish.Language.fromopensubtitles, + babelfish.Language.fromietf]: + try: + reverse = conv(name) + return reverse.alpha3, reverse.country, reverse.script + except (ValueError, babelfish.LanguageReverseError): + pass + + raise babelfish.LanguageReverseError(name) + + +def length_comparator(value): + """ + Return value length. + """ + return len(value) + + +_LanguageMatch = namedtuple('_LanguageMatch', ['property_name', 'word', 'lang']) + + +class LanguageWord(object): + """ + Extension to the Word namedtuple in order to create compound words. + + E.g.: pt-BR, soft subtitles, custom subs + """ + + def __init__(self, start, end, value, input_string, next_word=None): + self.start = start + self.end = end + self.value = value + self.input_string = input_string + self.next_word = next_word + + @property + def extended_word(self): # pylint:disable=inconsistent-return-statements + """ + Return the extended word for this instance, if any. + """ + if self.next_word: + separator = self.input_string[self.end:self.next_word.start] + next_separator = self.input_string[self.next_word.end:self.next_word.end + 1] + + if (separator == '-' and separator != next_separator) or separator in (' ', '.'): + value = self.input_string[self.start:self.next_word.end].replace('.', ' ') + + return LanguageWord(self.start, self.next_word.end, value, self.input_string, self.next_word.next_word) + + def __repr__(self): + return '<({start},{end}): {value}'.format(start=self.start, end=self.end, value=self.value) + + +def to_rebulk_match(language_match): + """ + Convert language match to rebulk Match: start, end, dict + """ + word = language_match.word + start = word.start + end = word.end + name = language_match.property_name + if language_match.lang == UNDETERMINED: + return start, end, { + 'name': name, + 'value': word.value.lower(), + 'formatter': babelfish.Language, + 'tags': ['weak-language'] + } + + return start, end, { + 'name': name, + 'value': language_match.lang + } + + +class LanguageFinder(object): + """ + Helper class to search and return language matches: 'language' and 'subtitle_language' properties + """ + + def __init__(self, context, + subtitle_prefixes, subtitle_suffixes, + lang_prefixes, lang_suffixes, weak_affixes): + allowed_languages = context.get('allowed_languages') if context else None + self.allowed_languages = {l.lower() for l in allowed_languages or []} + self.weak_affixes = weak_affixes + self.prefixes_map = {} + self.suffixes_map = {} + + if not is_disabled(context, 'subtitle_language'): + self.prefixes_map['subtitle_language'] = subtitle_prefixes + self.suffixes_map['subtitle_language'] = subtitle_suffixes + + self.prefixes_map['language'] = lang_prefixes + self.suffixes_map['language'] = lang_suffixes + + def find(self, string): + """ + Return all matches for language and subtitle_language. + + Undetermined language matches are removed if a regular language is found. + Multi language matches are removed if there are only undetermined language matches + """ + regular_lang_map = defaultdict(set) + undetermined_map = defaultdict(set) + multi_map = defaultdict(set) + + for match in self.iter_language_matches(string): + key = match.property_name + if match.lang == UNDETERMINED: + undetermined_map[key].add(match) + elif match.lang == 'mul': + multi_map[key].add(match) + else: + regular_lang_map[key].add(match) + + for key, values in multi_map.items(): + if key in regular_lang_map or key not in undetermined_map: + for value in values: + yield to_rebulk_match(value) + + for key, values in undetermined_map.items(): + if key not in regular_lang_map: + for value in values: + yield to_rebulk_match(value) + + for values in regular_lang_map.values(): + for value in values: + yield to_rebulk_match(value) + + def iter_language_matches(self, string): + """ + Return language matches for the given string. + """ + candidates = [] + previous = None + for word in iter_words(string): + language_word = LanguageWord(start=word.span[0], end=word.span[1], value=word.value, input_string=string) + if previous: + previous.next_word = language_word + candidates.append(previous) + previous = language_word + if previous: + candidates.append(previous) + + for candidate in candidates: + for match in self.iter_matches_for_candidate(candidate): + yield match + + def iter_matches_for_candidate(self, language_word): + """ + Return language matches for the given candidate word. + """ + tuples = [ + (language_word, language_word.next_word, + self.prefixes_map, + lambda string, prefix: string.startswith(prefix), + lambda string, prefix: string[len(prefix):]), + (language_word.next_word, language_word, + self.suffixes_map, + lambda string, suffix: string.endswith(suffix), + lambda string, suffix: string[:len(string) - len(suffix)]) + ] + + for word, fallback_word, affixes, is_affix, strip_affix in tuples: + if not word: + continue + + match = self.find_match_for_word(word, fallback_word, affixes, is_affix, strip_affix) + if match: + yield match + + match = self.find_language_match_for_word(language_word) + if match: + yield match + + def find_match_for_word(self, word, fallback_word, affixes, is_affix, strip_affix): # pylint:disable=inconsistent-return-statements + """ + Return the language match for the given word and affixes. + """ + for current_word in (word.extended_word, word): + if not current_word: + continue + + word_lang = current_word.value.lower() + + for key, parts in affixes.items(): + for part in parts: + if not is_affix(word_lang, part): + continue + + match = None + value = strip_affix(word_lang, part) + if not value: + if fallback_word and ( + abs(fallback_word.start - word.end) <= 1 or abs(word.start - fallback_word.end) <= 1): + match = self.find_language_match_for_word(fallback_word, key=key) + + if not match and part not in self.weak_affixes: + match = self.create_language_match(key, LanguageWord(current_word.start, current_word.end, + 'und', current_word.input_string)) + else: + match = self.create_language_match(key, LanguageWord(current_word.start, current_word.end, + value, current_word.input_string)) + + if match: + return match + + def find_language_match_for_word(self, word, key='language'): # pylint:disable=inconsistent-return-statements + """ + Return the language match for the given word. + """ + for current_word in (word.extended_word, word): + if current_word: + match = self.create_language_match(key, current_word) + if match: + return match + + def create_language_match(self, key, word): # pylint:disable=inconsistent-return-statements + """ + Create a LanguageMatch for a given word + """ + lang = self.parse_language(word.value.lower()) + + if lang is not None: + return _LanguageMatch(property_name=key, word=word, lang=lang) + + def parse_language(self, lang_word): # pylint:disable=inconsistent-return-statements + """ + Parse the lang_word into a valid Language. + + Multi and Undetermined languages are also valid languages. + """ + try: + lang = babelfish.Language.fromguessit(lang_word) + if ((hasattr(lang, 'name') and lang.name.lower() in self.allowed_languages) or + (hasattr(lang, 'alpha2') and lang.alpha2.lower() in self.allowed_languages) or + lang.alpha3.lower() in self.allowed_languages): + return lang + + except babelfish.Error: + pass + + +class SubtitlePrefixLanguageRule(Rule): + """ + Convert language guess as subtitle_language if previous match is a subtitle language prefix + """ + consequence = RemoveMatch + + properties = {'subtitle_language': [None]} + + def enabled(self, context): + return not is_disabled(context, 'subtitle_language') + + def when(self, matches, context): + to_rename = [] + to_remove = matches.named('subtitle_language.prefix') + for lang in matches.named('language'): + prefix = matches.previous(lang, lambda match: match.name == 'subtitle_language.prefix', 0) + if not prefix: + group_marker = matches.markers.at_match(lang, lambda marker: marker.name == 'group', 0) + if group_marker: + # Find prefix if placed just before the group + prefix = matches.previous(group_marker, lambda match: match.name == 'subtitle_language.prefix', + 0) + if not prefix: + # Find prefix if placed before in the group + prefix = matches.range(group_marker.start, lang.start, + lambda match: match.name == 'subtitle_language.prefix', 0) + if prefix: + to_rename.append((prefix, lang)) + to_remove.extend(matches.conflicting(lang)) + if prefix in to_remove: + to_remove.remove(prefix) + if to_rename or to_remove: + return to_rename, to_remove + return False + + def then(self, matches, when_response, context): + to_rename, to_remove = when_response + super(SubtitlePrefixLanguageRule, self).then(matches, to_remove, context) + for prefix, match in to_rename: + # Remove suffix equivalent of prefix. + suffix = copy.copy(prefix) + suffix.name = 'subtitle_language.suffix' + if suffix in matches: + matches.remove(suffix) + matches.remove(match) + match.name = 'subtitle_language' + matches.append(match) + + +class SubtitleSuffixLanguageRule(Rule): + """ + Convert language guess as subtitle_language if next match is a subtitle language suffix + """ + dependency = SubtitlePrefixLanguageRule + consequence = RemoveMatch + + properties = {'subtitle_language': [None]} + + def enabled(self, context): + return not is_disabled(context, 'subtitle_language') + + def when(self, matches, context): + to_append = [] + to_remove = matches.named('subtitle_language.suffix') + for lang in matches.named('language'): + suffix = matches.next(lang, lambda match: match.name == 'subtitle_language.suffix', 0) + if suffix: + to_append.append(lang) + if suffix in to_remove: + to_remove.remove(suffix) + if to_append or to_remove: + return to_append, to_remove + return False + + def then(self, matches, when_response, context): + to_rename, to_remove = when_response + super(SubtitleSuffixLanguageRule, self).then(matches, to_remove, context) + for match in to_rename: + matches.remove(match) + match.name = 'subtitle_language' + matches.append(match) + + +class SubtitleExtensionRule(Rule): + """ + Convert language guess as subtitle_language if next match is a subtitle extension. + + Since it's a strong match, it also removes any conflicting source with it. + """ + consequence = [RemoveMatch, RenameMatch('subtitle_language')] + + properties = {'subtitle_language': [None]} + + def enabled(self, context): + return not is_disabled(context, 'subtitle_language') + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + subtitle_extension = matches.named('container', + lambda match: 'extension' in match.tags and 'subtitle' in match.tags, + 0) + if subtitle_extension: + subtitle_lang = matches.previous(subtitle_extension, lambda match: match.name == 'language', 0) + if subtitle_lang: + for weak in matches.named('subtitle_language', predicate=lambda m: 'weak-language' in m.tags): + weak.private = True + + return matches.conflicting(subtitle_lang, lambda m: m.name == 'source'), subtitle_lang + + +class RemoveLanguage(Rule): + """Remove language matches that were not converted to subtitle_language when language is disabled.""" + + consequence = RemoveMatch + + def enabled(self, context): + return is_disabled(context, 'language') + + def when(self, matches, context): + return matches.named('language') + + +class RemoveInvalidLanguages(Rule): + """Remove language matches that matches the blacklisted common words.""" + + consequence = RemoveMatch + priority = 32 + + def __init__(self, common_words): + """Constructor.""" + super(RemoveInvalidLanguages, self).__init__() + self.common_words = common_words + + def when(self, matches, context): + to_remove = [] + for match in matches.range(0, len(matches.input_string), + predicate=lambda m: m.name in ('language', 'subtitle_language')): + if match.raw.lower() not in self.common_words: + continue + + group = matches.markers.at_match(match, index=0, predicate=lambda m: m.name == 'group') + if group and ( + not matches.range( + group.start, group.end, predicate=lambda m: m.name not in ('language', 'subtitle_language') + ) and (not matches.holes(group.start, group.end, predicate=lambda m: m.value.strip(seps)))): + continue + + to_remove.append(match) + + return to_remove diff --git a/lib/guessit/rules/properties/mimetype.py b/lib/guessit/rules/properties/mimetype.py new file mode 100644 index 00000000..f9e642ff --- /dev/null +++ b/lib/guessit/rules/properties/mimetype.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +mimetype property +""" +import mimetypes + +from rebulk import Rebulk, CustomRule, POST_PROCESS +from rebulk.match import Match + +from ..common.pattern import is_disabled +from ...rules.processors import Processors + + +def mimetype(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'mimetype')) + rebulk.rules(Mimetype) + + return rebulk + + +class Mimetype(CustomRule): + """ + Mimetype post processor + :param matches: + :type matches: + :return: + :rtype: + """ + priority = POST_PROCESS + + dependency = Processors + + def when(self, matches, context): + mime, _ = mimetypes.guess_type(matches.input_string, strict=False) + return mime + + def then(self, matches, when_response, context): + mime = when_response + matches.append(Match(len(matches.input_string), len(matches.input_string), name='mimetype', value=mime)) + + @property + def properties(self): + """ + Properties for this rule. + """ + return {'mimetype': [None]} diff --git a/lib/guessit/rules/properties/other.py b/lib/guessit/rules/properties/other.py new file mode 100644 index 00000000..c7dc9a88 --- /dev/null +++ b/lib/guessit/rules/properties/other.py @@ -0,0 +1,383 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +other property +""" +import copy + +from rebulk import Rebulk, Rule, RemoveMatch, RenameMatch, POST_PROCESS, AppendMatch +from rebulk.remodule import re + +from ..common import dash +from ..common import seps +from ..common.pattern import is_disabled +from ..common.validators import seps_after, seps_before, seps_surround, and_ +from ...reutils import build_or_pattern +from ...rules.common.formatters import raw_cleanup + + +def other(config): # pylint:disable=unused-argument,too-many-statements + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'other')) + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True) + rebulk.defaults(name="other", validator=seps_surround) + + rebulk.regex('Audio-?Fix', 'Audio-?Fixed', value='Audio Fixed') + rebulk.regex('Sync-?Fix', 'Sync-?Fixed', value='Sync Fixed') + rebulk.regex('Dual', 'Dual-?Audio', value='Dual Audio') + rebulk.regex('ws', 'wide-?screen', value='Widescreen') + rebulk.regex('Re-?Enc(?:oded)?', value='Reencoded') + + rebulk.string('Repack', 'Rerip', value='Proper', + tags=['streaming_service.prefix', 'streaming_service.suffix']) + rebulk.string('Proper', value='Proper', + tags=['has-neighbor', 'streaming_service.prefix', 'streaming_service.suffix']) + + rebulk.regex('Real-Proper', 'Real-Repack', 'Real-Rerip', value='Proper', + tags=['streaming_service.prefix', 'streaming_service.suffix', 'real']) + rebulk.regex('Real', value='Proper', + tags=['has-neighbor', 'streaming_service.prefix', 'streaming_service.suffix', 'real']) + + rebulk.string('Fix', 'Fixed', value='Fix', tags=['has-neighbor-before', 'has-neighbor-after', + 'streaming_service.prefix', 'streaming_service.suffix']) + rebulk.string('Dirfix', 'Nfofix', 'Prooffix', value='Fix', + tags=['streaming_service.prefix', 'streaming_service.suffix']) + rebulk.regex('(?:Proof-?)?Sample-?Fix', value='Fix', + tags=['streaming_service.prefix', 'streaming_service.suffix']) + + rebulk.string('Fansub', value='Fan Subtitled', tags='has-neighbor') + rebulk.string('Fastsub', value='Fast Subtitled', tags='has-neighbor') + + season_words = build_or_pattern(["seasons?", "series?"]) + complete_articles = build_or_pattern(["The"]) + + def validate_complete(match): + """ + Make sure season word is are defined. + :param match: + :type match: + :return: + :rtype: + """ + children = match.children + if not children.named('completeWordsBefore') and not children.named('completeWordsAfter'): + return False + return True + + rebulk.regex('(?P<completeArticle>' + complete_articles + '-)?' + + '(?P<completeWordsBefore>' + season_words + '-)?' + + 'Complete' + '(?P<completeWordsAfter>-' + season_words + ')?', + private_names=['completeArticle', 'completeWordsBefore', 'completeWordsAfter'], + value={'other': 'Complete'}, + tags=['release-group-prefix'], + validator={'__parent__': and_(seps_surround, validate_complete)}) + rebulk.string('R5', value='Region 5') + rebulk.string('RC', value='Region C') + rebulk.regex('Pre-?Air', value='Preair') + rebulk.regex('(?:PS-?)Vita', value='PS Vita') + rebulk.regex('Vita', value='PS Vita', tags='has-neighbor') + rebulk.regex('(HD)(?P<another>Rip)', value={'other': 'HD', 'another': 'Rip'}, + private_parent=True, children=True, validator={'__parent__': seps_surround}, validate_all=True) + + for value in ('Screener', 'Remux', 'PAL', 'SECAM', 'NTSC', 'XXX'): + rebulk.string(value, value=value) + rebulk.string('3D', value='3D', tags='has-neighbor') + + rebulk.string('HQ', value='High Quality', tags='uhdbluray-neighbor') + rebulk.string('HR', value='High Resolution') + rebulk.string('LD', value='Line Dubbed') + rebulk.string('MD', value='Mic Dubbed') + rebulk.string('mHD', 'HDLight', value='Micro HD') + rebulk.string('LDTV', value='Low Definition') + rebulk.string('HFR', value='High Frame Rate') + rebulk.string('VFR', value='Variable Frame Rate') + rebulk.string('HD', value='HD', validator=None, + tags=['streaming_service.prefix', 'streaming_service.suffix']) + rebulk.regex('Full-?HD', 'FHD', value='Full HD', validator=None, + tags=['streaming_service.prefix', 'streaming_service.suffix']) + rebulk.regex('Ultra-?(?:HD)?', 'UHD', value='Ultra HD', validator=None, + tags=['streaming_service.prefix', 'streaming_service.suffix']) + rebulk.regex('Upscaled?', value='Upscaled') + + for value in ('Complete', 'Classic', 'Bonus', 'Trailer', 'Retail', + 'Colorized', 'Internal'): + rebulk.string(value, value=value, tags=['has-neighbor', 'release-group-prefix']) + rebulk.regex('LiNE', value='Line Audio', tags=['has-neighbor-before', 'has-neighbor-after', 'release-group-prefix']) + rebulk.regex('Read-?NFO', value='Read NFO') + rebulk.string('CONVERT', value='Converted', tags='has-neighbor') + rebulk.string('DOCU', 'DOKU', value='Documentary', tags='has-neighbor') + rebulk.string('OM', value='Open Matte', tags='has-neighbor') + rebulk.string('STV', value='Straight to Video', tags='has-neighbor') + rebulk.string('OAR', value='Original Aspect Ratio', tags='has-neighbor') + rebulk.string('Complet', value='Complete', tags=['has-neighbor', 'release-group-prefix']) + + for coast in ('East', 'West'): + rebulk.regex(r'(?:Live-)?(?:Episode-)?' + coast + '-?(?:Coast-)?Feed', value=coast + ' Coast Feed') + + rebulk.string('VO', 'OV', value='Original Video', tags='has-neighbor') + rebulk.string('Ova', 'Oav', value='Original Animated Video') + + rebulk.regex('Scr(?:eener)?', value='Screener', validator=None, + tags=['other.validate.screener', 'source-prefix', 'source-suffix']) + rebulk.string('Mux', value='Mux', validator=seps_after, + tags=['other.validate.mux', 'video-codec-prefix', 'source-suffix']) + rebulk.string('HC', 'vost', value='Hardcoded Subtitles') + + rebulk.string('SDR', value='Standard Dynamic Range', tags='uhdbluray-neighbor') + rebulk.regex('HDR(?:10)?', value='HDR10', tags='uhdbluray-neighbor') + rebulk.regex('Dolby-?Vision', value='Dolby Vision', tags='uhdbluray-neighbor') + rebulk.regex('BT-?2020', value='BT.2020', tags='uhdbluray-neighbor') + + rebulk.string('Sample', value='Sample', tags=['at-end', 'not-a-release-group']) + rebulk.string('Extras', value='Extras', tags='has-neighbor') + rebulk.regex('Digital-?Extras?', value='Extras') + rebulk.string('Proof', value='Proof', tags=['at-end', 'not-a-release-group']) + rebulk.string('Obfuscated', 'Scrambled', value='Obfuscated', tags=['at-end', 'not-a-release-group']) + rebulk.string('xpost', 'postbot', 'asrequested', value='Repost', tags='not-a-release-group') + + rebulk.rules(RenameAnotherToOther, ValidateHasNeighbor, ValidateHasNeighborAfter, ValidateHasNeighborBefore, + ValidateScreenerRule, ValidateMuxRule, ValidateHardcodedSubs, ValidateStreamingServiceNeighbor, + ValidateAtEnd, ValidateReal, ProperCountRule) + + return rebulk + + +class ProperCountRule(Rule): + """ + Add proper_count property + """ + priority = POST_PROCESS + + consequence = AppendMatch + + properties = {'proper_count': [None]} + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + propers = matches.named('other', lambda match: match.value == 'Proper') + if propers: + raws = {} # Count distinct raw values + for proper in propers: + raws[raw_cleanup(proper.raw)] = proper + proper_count_match = copy.copy(propers[-1]) + proper_count_match.name = 'proper_count' + + value = 0 + for raw in raws.values(): + value += 2 if 'real' in raw.tags else 1 + + proper_count_match.value = value + return proper_count_match + + +class RenameAnotherToOther(Rule): + """ + Rename `another` properties to `other` + """ + priority = 32 + consequence = RenameMatch('other') + + def when(self, matches, context): + return matches.named('another') + + +class ValidateHasNeighbor(Rule): + """ + Validate tag has-neighbor + """ + consequence = RemoveMatch + priority = 64 + + def when(self, matches, context): + ret = [] + for to_check in matches.range(predicate=lambda match: 'has-neighbor' in match.tags): + previous_match = matches.previous(to_check, index=0) + previous_group = matches.markers.previous(to_check, lambda marker: marker.name == 'group', 0) + if previous_group and (not previous_match or previous_group.end > previous_match.end): + previous_match = previous_group + if previous_match and not matches.input_string[previous_match.end:to_check.start].strip(seps): + break + next_match = matches.next(to_check, index=0) + next_group = matches.markers.next(to_check, lambda marker: marker.name == 'group', 0) + if next_group and (not next_match or next_group.start < next_match.start): + next_match = next_group + if next_match and not matches.input_string[to_check.end:next_match.start].strip(seps): + break + ret.append(to_check) + return ret + + +class ValidateHasNeighborBefore(Rule): + """ + Validate tag has-neighbor-before that previous match exists. + """ + consequence = RemoveMatch + priority = 64 + + def when(self, matches, context): + ret = [] + for to_check in matches.range(predicate=lambda match: 'has-neighbor-before' in match.tags): + next_match = matches.next(to_check, index=0) + next_group = matches.markers.next(to_check, lambda marker: marker.name == 'group', 0) + if next_group and (not next_match or next_group.start < next_match.start): + next_match = next_group + if next_match and not matches.input_string[to_check.end:next_match.start].strip(seps): + break + ret.append(to_check) + return ret + + +class ValidateHasNeighborAfter(Rule): + """ + Validate tag has-neighbor-after that next match exists. + """ + consequence = RemoveMatch + priority = 64 + + def when(self, matches, context): + ret = [] + for to_check in matches.range(predicate=lambda match: 'has-neighbor-after' in match.tags): + previous_match = matches.previous(to_check, index=0) + previous_group = matches.markers.previous(to_check, lambda marker: marker.name == 'group', 0) + if previous_group and (not previous_match or previous_group.end > previous_match.end): + previous_match = previous_group + if previous_match and not matches.input_string[previous_match.end:to_check.start].strip(seps): + break + ret.append(to_check) + return ret + + +class ValidateScreenerRule(Rule): + """ + Validate tag other.validate.screener + """ + consequence = RemoveMatch + priority = 64 + + def when(self, matches, context): + ret = [] + for screener in matches.named('other', lambda match: 'other.validate.screener' in match.tags): + source_match = matches.previous(screener, lambda match: match.initiator.name == 'source', 0) + if not source_match or matches.input_string[source_match.end:screener.start].strip(seps): + ret.append(screener) + return ret + + +class ValidateMuxRule(Rule): + """ + Validate tag other.validate.mux + """ + consequence = RemoveMatch + priority = 64 + + def when(self, matches, context): + ret = [] + for mux in matches.named('other', lambda match: 'other.validate.mux' in match.tags): + source_match = matches.previous(mux, lambda match: match.initiator.name == 'source', 0) + if not source_match: + ret.append(mux) + return ret + + +class ValidateHardcodedSubs(Rule): + """Validate HC matches.""" + + priority = 32 + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for hc_match in matches.named('other', predicate=lambda match: match.value == 'Hardcoded Subtitles'): + next_match = matches.next(hc_match, predicate=lambda match: match.name == 'subtitle_language', index=0) + if next_match and not matches.holes(hc_match.end, next_match.start, + predicate=lambda match: match.value.strip(seps)): + continue + + previous_match = matches.previous(hc_match, + predicate=lambda match: match.name == 'subtitle_language', index=0) + if previous_match and not matches.holes(previous_match.end, hc_match.start, + predicate=lambda match: match.value.strip(seps)): + continue + + to_remove.append(hc_match) + + return to_remove + + +class ValidateStreamingServiceNeighbor(Rule): + """Validate streaming service's neighbors.""" + + priority = 32 + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for match in matches.named('other', + predicate=lambda m: (m.initiator.name != 'source' + and ('streaming_service.prefix' in m.tags + or 'streaming_service.suffix' in m.tags))): + match = match.initiator + if not seps_after(match): + if 'streaming_service.prefix' in match.tags: + next_match = matches.next(match, lambda m: m.name == 'streaming_service', 0) + if next_match and not matches.holes(match.end, next_match.start, + predicate=lambda m: m.value.strip(seps)): + continue + if match.children: + to_remove.extend(match.children) + to_remove.append(match) + + elif not seps_before(match): + if 'streaming_service.suffix' in match.tags: + previous_match = matches.previous(match, lambda m: m.name == 'streaming_service', 0) + if previous_match and not matches.holes(previous_match.end, match.start, + predicate=lambda m: m.value.strip(seps)): + continue + + if match.children: + to_remove.extend(match.children) + to_remove.append(match) + + return to_remove + + +class ValidateAtEnd(Rule): + """Validate other which should occur at the end of a filepart.""" + + priority = 32 + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for filepart in matches.markers.named('path'): + for match in matches.range(filepart.start, filepart.end, + predicate=lambda m: m.name == 'other' and 'at-end' in m.tags): + if (matches.holes(match.end, filepart.end, predicate=lambda m: m.value.strip(seps)) or + matches.range(match.end, filepart.end, predicate=lambda m: m.name not in ( + 'other', 'container'))): + to_remove.append(match) + + return to_remove + + +class ValidateReal(Rule): + """ + Validate Real + """ + consequence = RemoveMatch + priority = 64 + + def when(self, matches, context): + ret = [] + for filepart in matches.markers.named('path'): + for match in matches.range(filepart.start, filepart.end, lambda m: m.name == 'other' and 'real' in m.tags): + if not matches.range(filepart.start, match.start): + ret.append(match) + + return ret diff --git a/lib/guessit/rules/properties/part.py b/lib/guessit/rules/properties/part.py new file mode 100644 index 00000000..c1123394 --- /dev/null +++ b/lib/guessit/rules/properties/part.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +part property +""" +from rebulk.remodule import re + +from rebulk import Rebulk +from ..common import dash +from ..common.pattern import is_disabled +from ..common.validators import seps_surround, int_coercable, and_ +from ..common.numeral import numeral, parse_numeral +from ...reutils import build_or_pattern + + +def part(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'part')) + rebulk.regex_defaults(flags=re.IGNORECASE, abbreviations=[dash], validator={'__parent__': seps_surround}) + + prefixes = config['prefixes'] + + def validate_roman(match): + """ + Validate a roman match if surrounded by separators + :param match: + :type match: + :return: + :rtype: + """ + if int_coercable(match.raw): + return True + return seps_surround(match) + + rebulk.regex(build_or_pattern(prefixes) + r'-?(?P<part>' + numeral + r')', + prefixes=prefixes, validate_all=True, private_parent=True, children=True, formatter=parse_numeral, + validator={'part': and_(validate_roman, lambda m: 0 < m.value < 100)}) + + return rebulk diff --git a/lib/guessit/rules/properties/release_group.py b/lib/guessit/rules/properties/release_group.py new file mode 100644 index 00000000..ecff808b --- /dev/null +++ b/lib/guessit/rules/properties/release_group.py @@ -0,0 +1,347 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +release_group property +""" +import copy + +from rebulk import Rebulk, Rule, AppendMatch, RemoveMatch +from rebulk.match import Match + +from ..common import seps +from ..common.comparators import marker_sorted +from ..common.expected import build_expected_function +from ..common.formatters import cleanup +from ..common.pattern import is_disabled +from ..common.validators import int_coercable, seps_surround +from ..properties.title import TitleFromPosition + + +def release_group(config): + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + forbidden_groupnames = config['forbidden_names'] + + groupname_ignore_seps = config['ignored_seps'] + groupname_seps = ''.join([c for c in seps if c not in groupname_ignore_seps]) + + def clean_groupname(string): + """ + Removes and strip separators from input_string + :param string: + :type string: + :return: + :rtype: + """ + string = string.strip(groupname_seps) + if not (string.endswith(tuple(groupname_ignore_seps)) and string.startswith(tuple(groupname_ignore_seps))) \ + and not any(i in string.strip(groupname_ignore_seps) for i in groupname_ignore_seps): + string = string.strip(groupname_ignore_seps) + for forbidden in forbidden_groupnames: + if string.lower().startswith(forbidden) and string[len(forbidden):len(forbidden) + 1] in seps: + string = string[len(forbidden):] + string = string.strip(groupname_seps) + if string.lower().endswith(forbidden) and string[-len(forbidden) - 1:-len(forbidden)] in seps: + string = string[:len(forbidden)] + string = string.strip(groupname_seps) + return string.strip() + + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'release_group')) + + expected_group = build_expected_function('expected_group') + + rebulk.functional(expected_group, name='release_group', tags=['expected'], + validator=seps_surround, + conflict_solver=lambda match, other: other, + disabled=lambda context: not context.get('expected_group')) + + return rebulk.rules( + DashSeparatedReleaseGroup(clean_groupname), + SceneReleaseGroup(clean_groupname), + AnimeReleaseGroup + ) + + +_scene_previous_names = ('video_codec', 'source', 'video_api', 'audio_codec', 'audio_profile', 'video_profile', + 'audio_channels', 'screen_size', 'other', 'container', 'language', 'subtitle_language', + 'subtitle_language.suffix', 'subtitle_language.prefix', 'language.suffix') + +_scene_previous_tags = ('release-group-prefix',) + +_scene_no_previous_tags = ('no-release-group-prefix',) + + +class DashSeparatedReleaseGroup(Rule): + """ + Detect dash separated release groups that might appear at the end or at the beginning of a release name. + + Series.S01E02.Pilot.DVDRip.x264-CS.mkv + release_group: CS + abc-the.title.name.1983.1080p.bluray.x264.mkv + release_group: abc + + At the end: Release groups should be dash-separated and shouldn't contain spaces nor + appear in a group with other matches. The preceding matches should be separated by dot. + If a release group is found, the conflicting matches are removed. + + At the beginning: Release groups should be dash-separated and shouldn't contain spaces nor appear in a group. + It should be followed by a hole with dot-separated words. + Detection only happens if no matches exist at the beginning. + """ + consequence = [RemoveMatch, AppendMatch] + + def __init__(self, value_formatter): + """Default constructor.""" + super(DashSeparatedReleaseGroup, self).__init__() + self.value_formatter = value_formatter + + @classmethod + def is_valid(cls, matches, candidate, start, end, at_end): # pylint:disable=inconsistent-return-statements + """ + Whether a candidate is a valid release group. + """ + if not at_end: + if len(candidate.value) <= 1: + return False + + if matches.markers.at_match(candidate, predicate=lambda m: m.name == 'group'): + return False + + first_hole = matches.holes(candidate.end, end, predicate=lambda m: m.start == candidate.end, index=0) + if not first_hole: + return False + + raw_value = first_hole.raw + return raw_value[0] == '-' and '-' not in raw_value[1:] and '.' in raw_value and ' ' not in raw_value + + group = matches.markers.at_match(candidate, predicate=lambda m: m.name == 'group', index=0) + if group and matches.at_match(group, predicate=lambda m: not m.private and m.span != candidate.span): + return False + + count = 0 + match = candidate + while match: + current = matches.range(start, + match.start, + index=-1, + predicate=lambda m: not m.private and not 'expected' in m.tags) + if not current: + break + + separator = match.input_string[current.end:match.start] + if not separator and match.raw[0] == '-': + separator = '-' + + match = current + + if count == 0: + if separator != '-': + break + + count += 1 + continue + + if separator == '.': + return True + + def detect(self, matches, start, end, at_end): # pylint:disable=inconsistent-return-statements + """ + Detect release group at the end or at the beginning of a filepart. + """ + candidate = None + if at_end: + container = matches.ending(end, lambda m: m.name == 'container', index=0) + if container: + end = container.start + + candidate = matches.ending(end, index=0, predicate=( + lambda m: not m.private and not ( + m.name == 'other' and 'not-a-release-group' in m.tags + ) and '-' not in m.raw and m.raw.strip() == m.raw)) + + if not candidate: + if at_end: + candidate = matches.holes(start, end, seps=seps, index=-1, + predicate=lambda m: m.end == end and m.raw.strip(seps) and m.raw[0] == '-') + else: + candidate = matches.holes(start, end, seps=seps, index=0, + predicate=lambda m: m.start == start and m.raw.strip(seps)) + + if candidate and self.is_valid(matches, candidate, start, end, at_end): + return candidate + + def when(self, matches, context): # pylint:disable=inconsistent-return-statements + if matches.named('release_group'): + return + + to_remove = [] + to_append = [] + for filepart in matches.markers.named('path'): + candidate = self.detect(matches, filepart.start, filepart.end, True) + if candidate: + to_remove.extend(matches.at_match(candidate)) + else: + candidate = self.detect(matches, filepart.start, filepart.end, False) + + if candidate: + releasegroup = Match(candidate.start, candidate.end, name='release_group', + formatter=self.value_formatter, input_string=candidate.input_string) + + if releasegroup.value: + to_append.append(releasegroup) + if to_remove or to_append: + return to_remove, to_append + + +class SceneReleaseGroup(Rule): + """ + Add release_group match in existing matches (scene format). + + Something.XViD-ReleaseGroup.mkv + """ + dependency = [TitleFromPosition] + consequence = AppendMatch + + properties = {'release_group': [None]} + + def __init__(self, value_formatter): + """Default constructor.""" + super(SceneReleaseGroup, self).__init__() + self.value_formatter = value_formatter + + @staticmethod + def is_previous_match(match): + """ + Check if match can precede release_group + + :param match: + :return: + """ + return not match.tagged(*_scene_no_previous_tags) if match.name in _scene_previous_names else \ + match.tagged(*_scene_previous_tags) + + def when(self, matches, context): # pylint:disable=too-many-locals + # If a release_group is found before, ignore this kind of release_group rule. + + ret = [] + + for filepart in marker_sorted(matches.markers.named('path'), matches): + # pylint:disable=cell-var-from-loop + start, end = filepart.span + if matches.named('release_group', predicate=lambda m: m.start >= start and m.end <= end): + continue + + titles = matches.named('title', predicate=lambda m: m.start >= start and m.end <= end) + + def keep_only_first_title(match): + """ + Keep only first title from this filepart, as other ones are most likely release group. + + :param match: + :type match: + :return: + :rtype: + """ + return match in titles[1:] + + last_hole = matches.holes(start, end + 1, formatter=self.value_formatter, + ignore=keep_only_first_title, + predicate=lambda hole: cleanup(hole.value), index=-1) + + if last_hole: + def previous_match_filter(match): + """ + Filter to apply to find previous match + + :param match: + :type match: + :return: + :rtype: + """ + + if match.start < filepart.start: + return False + return not match.private or self.is_previous_match(match) + + previous_match = matches.previous(last_hole, + previous_match_filter, + index=0) + if previous_match and (self.is_previous_match(previous_match)) and \ + not matches.input_string[previous_match.end:last_hole.start].strip(seps) \ + and not int_coercable(last_hole.value.strip(seps)): + + last_hole.name = 'release_group' + last_hole.tags = ['scene'] + + # if hole is inside a group marker with same value, remove [](){} ... + group = matches.markers.at_match(last_hole, lambda marker: marker.name == 'group', 0) + if group: + group.formatter = self.value_formatter + if group.value == last_hole.value: + last_hole.start = group.start + 1 + last_hole.end = group.end - 1 + last_hole.tags = ['anime'] + + ignored_matches = matches.range(last_hole.start, last_hole.end, keep_only_first_title) + + for ignored_match in ignored_matches: + matches.remove(ignored_match) + + ret.append(last_hole) + return ret + + +class AnimeReleaseGroup(Rule): + """ + Add release_group match in existing matches (anime format) + ...[ReleaseGroup] Something.mkv + """ + dependency = [SceneReleaseGroup, TitleFromPosition] + consequence = [RemoveMatch, AppendMatch] + + properties = {'release_group': [None]} + + def when(self, matches, context): + to_remove = [] + to_append = [] + + # If a release_group is found before, ignore this kind of release_group rule. + if matches.named('release_group'): + return False + + if not matches.named('episode') and not matches.named('season') and matches.named('release_group'): + # This doesn't seems to be an anime, and we already found another release_group. + return False + + for filepart in marker_sorted(matches.markers.named('path'), matches): + + # pylint:disable=bad-continuation + empty_group = matches.markers.range(filepart.start, + filepart.end, + lambda marker: (marker.name == 'group' + and not matches.range(marker.start, marker.end, + lambda m: + 'weak-language' not in m.tags) + and marker.value.strip(seps) + and not int_coercable(marker.value.strip(seps))), 0) + + if empty_group: + group = copy.copy(empty_group) + group.marker = False + group.raw_start += 1 + group.raw_end -= 1 + group.tags = ['anime'] + group.name = 'release_group' + to_append.append(group) + to_remove.extend(matches.range(empty_group.start, empty_group.end, + lambda m: 'weak-language' in m.tags)) + + if to_remove or to_append: + return to_remove, to_append + return False diff --git a/lib/guessit/rules/properties/screen_size.py b/lib/guessit/rules/properties/screen_size.py new file mode 100644 index 00000000..77d5d052 --- /dev/null +++ b/lib/guessit/rules/properties/screen_size.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +screen_size property +""" +from rebulk.match import Match +from rebulk.remodule import re + +from rebulk import Rebulk, Rule, RemoveMatch, AppendMatch + +from ..common.pattern import is_disabled +from ..common.quantity import FrameRate +from ..common.validators import seps_surround +from ..common import dash, seps +from ...reutils import build_or_pattern + + +def screen_size(config): + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + interlaced = frozenset(config['interlaced']) + progressive = frozenset(config['progressive']) + frame_rates = [re.escape(rate) for rate in config['frame_rates']] + min_ar = config['min_ar'] + max_ar = config['max_ar'] + + rebulk = Rebulk() + rebulk = rebulk.string_defaults(ignore_case=True).regex_defaults(flags=re.IGNORECASE) + + rebulk.defaults(name='screen_size', validator=seps_surround, abbreviations=[dash], + disabled=lambda context: is_disabled(context, 'screen_size')) + + frame_rate_pattern = build_or_pattern(frame_rates, name='frame_rate') + interlaced_pattern = build_or_pattern(interlaced, name='height') + progressive_pattern = build_or_pattern(progressive, name='height') + + res_pattern = r'(?:(?P<width>\d{3,4})(?:x|\*))?' + rebulk.regex(res_pattern + interlaced_pattern + r'(?P<scan_type>i)' + frame_rate_pattern + '?') + rebulk.regex(res_pattern + progressive_pattern + r'(?P<scan_type>p)' + frame_rate_pattern + '?') + rebulk.regex(res_pattern + progressive_pattern + r'(?P<scan_type>p)?(?:hd)') + rebulk.regex(res_pattern + progressive_pattern + r'(?P<scan_type>p)?x?') + rebulk.string('4k', value='2160p') + rebulk.regex(r'(?P<width>\d{3,4})-?(?:x|\*)-?(?P<height>\d{3,4})', + conflict_solver=lambda match, other: '__default__' if other.name == 'screen_size' else other) + + rebulk.regex(frame_rate_pattern + '(p|fps)', name='frame_rate', + formatter=FrameRate.fromstring, disabled=lambda context: is_disabled(context, 'frame_rate')) + + rebulk.rules(PostProcessScreenSize(progressive, min_ar, max_ar), ScreenSizeOnlyOne, ResolveScreenSizeConflicts) + + return rebulk + + +class PostProcessScreenSize(Rule): + """ + Process the screen size calculating the aspect ratio if available. + + Convert to a standard notation (720p, 1080p, etc) when it's a standard resolution and + aspect ratio is valid or not available. + + It also creates an aspect_ratio match when available. + """ + consequence = AppendMatch + + def __init__(self, standard_heights, min_ar, max_ar): + super(PostProcessScreenSize, self).__init__() + self.standard_heights = standard_heights + self.min_ar = min_ar + self.max_ar = max_ar + + def when(self, matches, context): + to_append = [] + for match in matches.named('screen_size'): + if not is_disabled(context, 'frame_rate'): + for frame_rate in match.children.named('frame_rate'): + frame_rate.formatter = FrameRate.fromstring + to_append.append(frame_rate) + + values = match.children.to_dict() + if 'height' not in values: + continue + + scan_type = (values.get('scan_type') or 'p').lower() + height = values['height'] + if 'width' not in values: + match.value = '{0}{1}'.format(height, scan_type) + continue + + width = values['width'] + calculated_ar = float(width) / float(height) + + aspect_ratio = Match(match.start, match.end, input_string=match.input_string, + name='aspect_ratio', value=round(calculated_ar, 3)) + + if not is_disabled(context, 'aspect_ratio'): + to_append.append(aspect_ratio) + + if height in self.standard_heights and self.min_ar < calculated_ar < self.max_ar: + match.value = '{0}{1}'.format(height, scan_type) + else: + match.value = '{0}x{1}'.format(width, height) + + return to_append + + +class ScreenSizeOnlyOne(Rule): + """ + Keep a single screen_size per filepath part. + """ + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for filepart in matches.markers.named('path'): + screensize = list(reversed(matches.range(filepart.start, filepart.end, + lambda match: match.name == 'screen_size'))) + if len(screensize) > 1 and len(set((match.value for match in screensize))) > 1: + to_remove.extend(screensize[1:]) + + return to_remove + + +class ResolveScreenSizeConflicts(Rule): + """ + Resolve screen_size conflicts with season and episode matches. + """ + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for filepart in matches.markers.named('path'): + screensize = matches.range(filepart.start, filepart.end, lambda match: match.name == 'screen_size', 0) + if not screensize: + continue + + conflicts = matches.conflicting(screensize, lambda match: match.name in ('season', 'episode')) + if not conflicts: + continue + + has_neighbor = False + video_profile = matches.range(screensize.end, filepart.end, lambda match: match.name == 'video_profile', 0) + if video_profile and not matches.holes(screensize.end, video_profile.start, + predicate=lambda h: h.value and h.value.strip(seps)): + to_remove.extend(conflicts) + has_neighbor = True + + previous = matches.previous(screensize, index=0, predicate=( + lambda m: m.name in ('date', 'source', 'other', 'streaming_service'))) + if previous and not matches.holes(previous.end, screensize.start, + predicate=lambda h: h.value and h.value.strip(seps)): + to_remove.extend(conflicts) + has_neighbor = True + + if not has_neighbor: + to_remove.append(screensize) + + return to_remove diff --git a/lib/guessit/rules/properties/size.py b/lib/guessit/rules/properties/size.py new file mode 100644 index 00000000..c61580c0 --- /dev/null +++ b/lib/guessit/rules/properties/size.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +size property +""" +import re + +from rebulk import Rebulk + +from ..common import dash +from ..common.quantity import Size +from ..common.pattern import is_disabled +from ..common.validators import seps_surround + + +def size(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'size')) + rebulk.regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]) + rebulk.defaults(name='size', validator=seps_surround) + rebulk.regex(r'\d+-?[mgt]b', r'\d+\.\d+-?[mgt]b', formatter=Size.fromstring, tags=['release-group-prefix']) + + return rebulk diff --git a/lib/guessit/rules/properties/source.py b/lib/guessit/rules/properties/source.py new file mode 100644 index 00000000..2fe55618 --- /dev/null +++ b/lib/guessit/rules/properties/source.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +source property +""" +import copy + +from rebulk.remodule import re + +from rebulk import AppendMatch, Rebulk, RemoveMatch, Rule + +from .audio_codec import HqConflictRule +from ..common import dash, seps +from ..common.pattern import is_disabled +from ..common.validators import seps_before, seps_after, or_ + + +def source(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'source')) + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE, abbreviations=[dash], private_parent=True, children=True) + rebulk = rebulk.defaults(name='source', + tags=['video-codec-prefix', 'streaming_service.suffix'], + validate_all=True, + validator={'__parent__': or_(seps_before, seps_after)}) + + rip_prefix = '(?P<other>Rip)-?' + rip_suffix = '-?(?P<other>Rip)' + rip_optional_suffix = '(?:' + rip_suffix + ')?' + + def build_source_pattern(*patterns, **kwargs): + """Helper pattern to build source pattern.""" + prefix_format = kwargs.get('prefix') or '' + suffix_format = kwargs.get('suffix') or '' + + string_format = prefix_format + '({0})' + suffix_format + return [string_format.format(pattern) for pattern in patterns] + + def demote_other(match, other): # pylint: disable=unused-argument + """Default conflict solver with 'other' property.""" + return other if other.name == 'other' or other.name == 'release_group' else '__default__' + + rebulk.regex(*build_source_pattern('VHS', suffix=rip_optional_suffix), + value={'source': 'VHS', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('CAM', suffix=rip_optional_suffix), + value={'source': 'Camera', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('HD-?CAM', suffix=rip_optional_suffix), + value={'source': 'HD Camera', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('TELESYNC', 'TS', suffix=rip_optional_suffix), + value={'source': 'Telesync', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('HD-?TELESYNC', 'HD-?TS', suffix=rip_optional_suffix), + value={'source': 'HD Telesync', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('WORKPRINT', 'WP'), value='Workprint') + rebulk.regex(*build_source_pattern('TELECINE', 'TC', suffix=rip_optional_suffix), + value={'source': 'Telecine', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('HD-?TELECINE', 'HD-?TC', suffix=rip_optional_suffix), + value={'source': 'HD Telecine', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('PPV', suffix=rip_optional_suffix), + value={'source': 'Pay-per-view', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('SD-?TV', suffix=rip_optional_suffix), + value={'source': 'TV', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('TV', suffix=rip_suffix), # TV is too common to allow matching + value={'source': 'TV', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('TV', 'SD-?TV', prefix=rip_prefix), + value={'source': 'TV', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('TV-?(?=Dub)'), value='TV') + rebulk.regex(*build_source_pattern('DVB', 'PD-?TV', suffix=rip_optional_suffix), + value={'source': 'Digital TV', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('DVD', suffix=rip_optional_suffix), + value={'source': 'DVD', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('DM', suffix=rip_optional_suffix), + value={'source': 'Digital Master', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('VIDEO-?TS', 'DVD-?R(?:$|(?!E))', # 'DVD-?R(?:$|^E)' => DVD-Real ... + 'DVD-?9', 'DVD-?5'), value='DVD') + + rebulk.regex(*build_source_pattern('HD-?TV', suffix=rip_optional_suffix), conflict_solver=demote_other, + value={'source': 'HDTV', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('TV-?HD', suffix=rip_suffix), conflict_solver=demote_other, + value={'source': 'HDTV', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('TV', suffix='-?(?P<other>Rip-?HD)'), conflict_solver=demote_other, + value={'source': 'HDTV', 'other': 'Rip'}) + + rebulk.regex(*build_source_pattern('VOD', suffix=rip_optional_suffix), + value={'source': 'Video on Demand', 'other': 'Rip'}) + + rebulk.regex(*build_source_pattern('WEB', 'WEB-?DL', suffix=rip_suffix), + value={'source': 'Web', 'other': 'Rip'}) + # WEBCap is a synonym to WEBRip, mostly used by non english + rebulk.regex(*build_source_pattern('WEB-?(?P<another>Cap)', suffix=rip_optional_suffix), + value={'source': 'Web', 'other': 'Rip', 'another': 'Rip'}) + rebulk.regex(*build_source_pattern('WEB-?DL', 'WEB-?U?HD', 'DL-?WEB', 'DL(?=-?Mux)'), + value={'source': 'Web'}) + rebulk.regex('(WEB)', value='Web', tags='weak.source') + + rebulk.regex(*build_source_pattern('HD-?DVD', suffix=rip_optional_suffix), + value={'source': 'HD-DVD', 'other': 'Rip'}) + + rebulk.regex(*build_source_pattern('Blu-?ray', 'BD', 'BD[59]', 'BD25', 'BD50', suffix=rip_optional_suffix), + value={'source': 'Blu-ray', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('(?P<another>BR)-?(?=Scr(?:eener)?)', '(?P<another>BR)-?(?=Mux)'), # BRRip + value={'source': 'Blu-ray', 'another': 'Reencoded'}) + rebulk.regex(*build_source_pattern('(?P<another>BR)', suffix=rip_suffix), # BRRip + value={'source': 'Blu-ray', 'other': 'Rip', 'another': 'Reencoded'}) + + rebulk.regex(*build_source_pattern('Ultra-?Blu-?ray', 'Blu-?ray-?Ultra'), value='Ultra HD Blu-ray') + + rebulk.regex(*build_source_pattern('AHDTV'), value='Analog HDTV') + rebulk.regex(*build_source_pattern('UHD-?TV', suffix=rip_optional_suffix), conflict_solver=demote_other, + value={'source': 'Ultra HDTV', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('UHD', suffix=rip_suffix), conflict_solver=demote_other, + value={'source': 'Ultra HDTV', 'other': 'Rip'}) + + rebulk.regex(*build_source_pattern('DSR', 'DTH', suffix=rip_optional_suffix), + value={'source': 'Satellite', 'other': 'Rip'}) + rebulk.regex(*build_source_pattern('DSR?', 'SAT', suffix=rip_suffix), + value={'source': 'Satellite', 'other': 'Rip'}) + + rebulk.rules(ValidateSourcePrefixSuffix, ValidateWeakSource, UltraHdBlurayRule) + + return rebulk + + +class UltraHdBlurayRule(Rule): + """ + Replace other:Ultra HD and source:Blu-ray with source:Ultra HD Blu-ray + """ + dependency = HqConflictRule + consequence = [RemoveMatch, AppendMatch] + + @classmethod + def find_ultrahd(cls, matches, start, end, index): + """Find Ultra HD match.""" + return matches.range(start, end, index=index, predicate=( + lambda m: not m.private and m.name == 'other' and m.value == 'Ultra HD' + )) + + @classmethod + def validate_range(cls, matches, start, end): + """Validate no holes or invalid matches exist in the specified range.""" + return ( + not matches.holes(start, end, predicate=lambda m: m.value.strip(seps)) and + not matches.range(start, end, predicate=( + lambda m: not m.private and ( + m.name not in ('screen_size', 'color_depth') and ( + m.name != 'other' or 'uhdbluray-neighbor' not in m.tags)))) + ) + + def when(self, matches, context): + to_remove = [] + to_append = [] + for filepart in matches.markers.named('path'): + for match in matches.range(filepart.start, filepart.end, predicate=( + lambda m: not m.private and m.name == 'source' and m.value == 'Blu-ray')): + other = self.find_ultrahd(matches, filepart.start, match.start, -1) + if not other or not self.validate_range(matches, other.end, match.start): + other = self.find_ultrahd(matches, match.end, filepart.end, 0) + if not other or not self.validate_range(matches, match.end, other.start): + if not matches.range(filepart.start, filepart.end, predicate=( + lambda m: m.name == 'screen_size' and m.value == '2160p')): + continue + + if other: + other.private = True + + new_source = copy.copy(match) + new_source.value = 'Ultra HD Blu-ray' + to_remove.append(match) + to_append.append(new_source) + + if to_remove or to_append: + return to_remove, to_append + return False + + +class ValidateSourcePrefixSuffix(Rule): + """ + Validate source with source prefix, source suffix. + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for filepart in matches.markers.named('path'): + for match in matches.range(filepart.start, filepart.end, predicate=lambda m: m.name == 'source'): + match = match.initiator + if not seps_before(match) and \ + not matches.range(match.start - 1, match.start - 2, + lambda m: 'source-prefix' in m.tags): + if match.children: + ret.extend(match.children) + ret.append(match) + continue + if not seps_after(match) and \ + not matches.range(match.end, match.end + 1, + lambda m: 'source-suffix' in m.tags): + if match.children: + ret.extend(match.children) + ret.append(match) + continue + + return ret + + +class ValidateWeakSource(Rule): + """ + Validate weak source + """ + dependency = [ValidateSourcePrefixSuffix] + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + ret = [] + for filepart in matches.markers.named('path'): + for match in matches.range(filepart.start, filepart.end, predicate=lambda m: m.name == 'source'): + # if there are more than 1 source in this filepart, just before the year and with holes for the title + # most likely the source is part of the title + if 'weak.source' in match.tags \ + and matches.range(match.end, filepart.end, predicate=lambda m: m.name == 'source') \ + and matches.holes(filepart.start, match.start, + predicate=lambda m: m.value.strip(seps), index=-1): + if match.children: + ret.extend(match.children) + ret.append(match) + continue + + return ret diff --git a/lib/guessit/rules/properties/streaming_service.py b/lib/guessit/rules/properties/streaming_service.py new file mode 100644 index 00000000..f467f20a --- /dev/null +++ b/lib/guessit/rules/properties/streaming_service.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +streaming_service property +""" +import re + +from rebulk import Rebulk +from rebulk.rules import Rule, RemoveMatch + +from ..common.pattern import is_disabled +from ...rules.common import seps, dash +from ...rules.common.validators import seps_before, seps_after + + +def streaming_service(config): # pylint: disable=too-many-statements,unused-argument + """Streaming service property. + + :param config: rule configuration + :type config: dict + :return: + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'streaming_service')) + rebulk = rebulk.string_defaults(ignore_case=True).regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]) + rebulk.defaults(name='streaming_service', tags=['source-prefix']) + + for value, items in config.items(): + patterns = items if isinstance(items, list) else [items] + for pattern in patterns: + if pattern.startswith('re:'): + rebulk.regex(pattern, value=value) + else: + rebulk.string(pattern, value=value) + + rebulk.rules(ValidateStreamingService) + + return rebulk + + +class ValidateStreamingService(Rule): + """Validate streaming service matches.""" + + priority = 128 + consequence = RemoveMatch + + def when(self, matches, context): + """Streaming service is always before source. + + :param matches: + :type matches: rebulk.match.Matches + :param context: + :type context: dict + :return: + """ + to_remove = [] + for service in matches.named('streaming_service'): + next_match = matches.next(service, lambda match: 'streaming_service.suffix' in match.tags, 0) + previous_match = matches.previous(service, lambda match: 'streaming_service.prefix' in match.tags, 0) + has_other = service.initiator and service.initiator.children.named('other') + + if not has_other: + if (not next_match or + matches.holes(service.end, next_match.start, + predicate=lambda match: match.value.strip(seps)) or + not seps_before(service)): + if (not previous_match or + matches.holes(previous_match.end, service.start, + predicate=lambda match: match.value.strip(seps)) or + not seps_after(service)): + to_remove.append(service) + continue + + if service.value == 'Comedy Central': + # Current match is a valid streaming service, removing invalid Criterion Collection (CC) matches + to_remove.extend(matches.named('edition', predicate=lambda match: match.value == 'Criterion')) + + return to_remove diff --git a/lib/guessit/rules/properties/title.py b/lib/guessit/rules/properties/title.py new file mode 100644 index 00000000..0d263016 --- /dev/null +++ b/lib/guessit/rules/properties/title.py @@ -0,0 +1,349 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +title property +""" + +from rebulk import Rebulk, Rule, AppendMatch, RemoveMatch, AppendTags +from rebulk.formatters import formatters + +from .film import FilmTitleRule +from .language import ( + SubtitlePrefixLanguageRule, + SubtitleSuffixLanguageRule, + SubtitleExtensionRule, + NON_SPECIFIC_LANGUAGES +) +from ..common import seps, title_seps +from ..common.comparators import marker_sorted +from ..common.expected import build_expected_function +from ..common.formatters import cleanup, reorder_title +from ..common.pattern import is_disabled +from ..common.validators import seps_surround + + +def title(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'title')) + rebulk.rules(TitleFromPosition, PreferTitleWithYear) + + expected_title = build_expected_function('expected_title') + + rebulk.functional(expected_title, name='title', tags=['expected', 'title'], + validator=seps_surround, + formatter=formatters(cleanup, reorder_title), + conflict_solver=lambda match, other: other, + disabled=lambda context: not context.get('expected_title')) + + return rebulk + + +class TitleBaseRule(Rule): + """ + Add title match in existing matches + """ + # pylint:disable=no-self-use,unused-argument + consequence = [AppendMatch, RemoveMatch] + + def __init__(self, match_name, match_tags=None, alternative_match_name=None): + super(TitleBaseRule, self).__init__() + self.match_name = match_name + self.match_tags = match_tags + self.alternative_match_name = alternative_match_name + + def hole_filter(self, hole, matches): + """ + Filter holes for titles. + :param hole: + :type hole: + :param matches: + :type matches: + :return: + :rtype: + """ + return True + + def filepart_filter(self, filepart, matches): + """ + Filter filepart for titles. + :param filepart: + :type filepart: + :param matches: + :type matches: + :return: + :rtype: + """ + return True + + def holes_process(self, holes, matches): + """ + process holes + :param holes: + :type holes: + :param matches: + :type matches: + :return: + :rtype: + """ + cropped_holes = [] + group_markers = matches.markers.named('group') + for group_marker in group_markers: + path_marker = matches.markers.at_match(group_marker, predicate=lambda m: m.name == 'path', index=0) + if path_marker and path_marker.span == group_marker.span: + group_markers.remove(group_marker) + + for hole in holes: + cropped_holes.extend(hole.crop(group_markers)) + + return cropped_holes + + @staticmethod + def is_ignored(match): + """ + Ignore matches when scanning for title (hole). + + Full word language and countries won't be ignored if they are uppercase. + """ + return not (len(match) > 3 and match.raw.isupper()) and match.name in ('language', 'country', 'episode_details') + + def should_keep(self, match, to_keep, matches, filepart, hole, starting): + """ + Check if this match should be accepted when ending or starting a hole. + :param match: + :type match: + :param to_keep: + :type to_keep: list[Match] + :param matches: + :type matches: Matches + :param hole: the filepart match + :type hole: Match + :param hole: the hole match + :type hole: Match + :param starting: true if match is starting the hole + :type starting: bool + :return: + :rtype: + """ + if match.name in ('language', 'country'): + # Keep language if exactly matching the hole. + if len(hole.value) == len(match.raw): + return True + + # Keep language if other languages exists in the filepart. + outside_matches = filepart.crop(hole) + other_languages = [] + for outside in outside_matches: + other_languages.extend(matches.range(outside.start, outside.end, + lambda c_match: c_match.name == match.name and + c_match not in to_keep and + c_match.value not in NON_SPECIFIC_LANGUAGES)) + + if not other_languages and (not starting or len(match.raw) <= 3): + return True + + return False + + def should_remove(self, match, matches, filepart, hole, context): + """ + Check if this match should be removed after beeing ignored. + :param match: + :param matches: + :param filepart: + :param hole: + :return: + """ + if context.get('type') == 'episode' and match.name == 'episode_details': + return match.start >= hole.start and match.end <= hole.end + return True + + def check_titles_in_filepart(self, filepart, matches, context): # pylint:disable=inconsistent-return-statements + """ + Find title in filepart (ignoring language) + """ + # pylint:disable=too-many-locals,too-many-branches,too-many-statements + start, end = filepart.span + + holes = matches.holes(start, end + 1, formatter=formatters(cleanup, reorder_title), + ignore=self.is_ignored, + predicate=lambda m: m.value) + + holes = self.holes_process(holes, matches) + + for hole in holes: + if not hole or (self.hole_filter and not self.hole_filter(hole, matches)): + continue + + to_remove = [] + to_keep = [] + + ignored_matches = matches.range(hole.start, hole.end, self.is_ignored) + + if ignored_matches: + for ignored_match in reversed(ignored_matches): + # pylint:disable=undefined-loop-variable, cell-var-from-loop + trailing = matches.chain_before(hole.end, seps, predicate=lambda m: m == ignored_match) + if trailing: + should_keep = self.should_keep(ignored_match, to_keep, matches, filepart, hole, False) + if should_keep: + # pylint:disable=unpacking-non-sequence + try: + append, crop = should_keep + except TypeError: + append, crop = should_keep, should_keep + if append: + to_keep.append(ignored_match) + if crop: + hole.end = ignored_match.start + + for ignored_match in ignored_matches: + if ignored_match not in to_keep: + starting = matches.chain_after(hole.start, seps, + predicate=lambda m: m == ignored_match) + if starting: + should_keep = self.should_keep(ignored_match, to_keep, matches, filepart, hole, True) + if should_keep: + # pylint:disable=unpacking-non-sequence + try: + append, crop = should_keep + except TypeError: + append, crop = should_keep, should_keep + if append: + to_keep.append(ignored_match) + if crop: + hole.start = ignored_match.end + + for match in ignored_matches: + if self.should_remove(match, matches, filepart, hole, context): + to_remove.append(match) + for keep_match in to_keep: + if keep_match in to_remove: + to_remove.remove(keep_match) + + if hole and hole.value: + hole.name = self.match_name + hole.tags = self.match_tags + if self.alternative_match_name: + # Split and keep values that can be a title + titles = hole.split(title_seps, lambda m: m.value) + for title_match in list(titles[1:]): + previous_title = titles[titles.index(title_match) - 1] + separator = matches.input_string[previous_title.end:title_match.start] + if len(separator) == 1 and separator == '-' \ + and previous_title.raw[-1] not in seps \ + and title_match.raw[0] not in seps: + titles[titles.index(title_match) - 1].end = title_match.end + titles.remove(title_match) + else: + title_match.name = self.alternative_match_name + + else: + titles = [hole] + return titles, to_remove + + def when(self, matches, context): + ret = [] + to_remove = [] + + if matches.named(self.match_name, lambda match: 'expected' in match.tags): + return False + + fileparts = [filepart for filepart in list(marker_sorted(matches.markers.named('path'), matches)) + if not self.filepart_filter or self.filepart_filter(filepart, matches)] + + # Priorize fileparts containing the year + years_fileparts = [] + for filepart in fileparts: + year_match = matches.range(filepart.start, filepart.end, lambda match: match.name == 'year', 0) + if year_match: + years_fileparts.append(filepart) + + for filepart in fileparts: + try: + years_fileparts.remove(filepart) + except ValueError: + pass + titles = self.check_titles_in_filepart(filepart, matches, context) + if titles: + titles, to_remove_c = titles + ret.extend(titles) + to_remove.extend(to_remove_c) + break + + # Add title match in all fileparts containing the year. + for filepart in years_fileparts: + titles = self.check_titles_in_filepart(filepart, matches, context) + if titles: + # pylint:disable=unbalanced-tuple-unpacking + titles, to_remove_c = titles + ret.extend(titles) + to_remove.extend(to_remove_c) + + if ret or to_remove: + return ret, to_remove + return False + + +class TitleFromPosition(TitleBaseRule): + """ + Add title match in existing matches + """ + dependency = [FilmTitleRule, SubtitlePrefixLanguageRule, SubtitleSuffixLanguageRule, SubtitleExtensionRule] + + properties = {'title': [None], 'alternative_title': [None]} + + def __init__(self): + super(TitleFromPosition, self).__init__('title', ['title'], 'alternative_title') + + def enabled(self, context): + return not is_disabled(context, 'alternative_title') + + +class PreferTitleWithYear(Rule): + """ + Prefer title where filepart contains year. + """ + dependency = TitleFromPosition + consequence = [RemoveMatch, AppendTags(['equivalent-ignore'])] + + properties = {'title': [None]} + + def when(self, matches, context): + with_year_in_group = [] + with_year = [] + titles = matches.named('title') + + for title_match in titles: + filepart = matches.markers.at_match(title_match, lambda marker: marker.name == 'path', 0) + if filepart: + year_match = matches.range(filepart.start, filepart.end, lambda match: match.name == 'year', 0) + if year_match: + group = matches.markers.at_match(year_match, lambda m: m.name == 'group') + if group: + with_year_in_group.append(title_match) + else: + with_year.append(title_match) + + to_tag = [] + if with_year_in_group: + title_values = {title_match.value for title_match in with_year_in_group} + to_tag.extend(with_year_in_group) + elif with_year: + title_values = {title_match.value for title_match in with_year} + to_tag.extend(with_year) + else: + title_values = {title_match.value for title_match in titles} + + to_remove = [] + for title_match in titles: + if title_match.value not in title_values: + to_remove.append(title_match) + if to_remove or to_tag: + return to_remove, to_tag + return False diff --git a/lib/guessit/rules/properties/type.py b/lib/guessit/rules/properties/type.py new file mode 100644 index 00000000..6a2877ef --- /dev/null +++ b/lib/guessit/rules/properties/type.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +type property +""" +from rebulk import CustomRule, Rebulk, POST_PROCESS +from rebulk.match import Match + +from ..common.pattern import is_disabled +from ...rules.processors import Processors + + +def _type(matches, value): + """ + Define type match with given value. + :param matches: + :param value: + :return: + """ + matches.append(Match(len(matches.input_string), len(matches.input_string), name='type', value=value)) + + +def type_(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'type')) + rebulk = rebulk.rules(TypeProcessor) + + return rebulk + + +class TypeProcessor(CustomRule): + """ + Post processor to find file type based on all others found matches. + """ + priority = POST_PROCESS + + dependency = Processors + + properties = {'type': ['episode', 'movie']} + + def when(self, matches, context): # pylint:disable=too-many-return-statements + option_type = context.get('type', None) + if option_type: + return option_type + + episode = matches.named('episode') + season = matches.named('season') + absolute_episode = matches.named('absolute_episode') + episode_details = matches.named('episode_details') + + if episode or season or episode_details or absolute_episode: + return 'episode' + + film = matches.named('film') + if film: + return 'movie' + + year = matches.named('year') + date = matches.named('date') + + if date and not year: + return 'episode' + + bonus = matches.named('bonus') + if bonus and not year: + return 'episode' + + crc32 = matches.named('crc32') + anime_release_group = matches.named('release_group', lambda match: 'anime' in match.tags) + if crc32 and anime_release_group: + return 'episode' + + return 'movie' + + def then(self, matches, when_response, context): + _type(matches, when_response) diff --git a/lib/guessit/rules/properties/video_codec.py b/lib/guessit/rules/properties/video_codec.py new file mode 100644 index 00000000..842a03c7 --- /dev/null +++ b/lib/guessit/rules/properties/video_codec.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +video_codec and video_profile property +""" +from rebulk import Rebulk, Rule, RemoveMatch +from rebulk.remodule import re + +from ..common import dash +from ..common.pattern import is_disabled +from ..common.validators import seps_after, seps_before, seps_surround + + +def video_codec(config): # pylint:disable=unused-argument + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk() + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True) + rebulk.defaults(name="video_codec", + tags=['source-suffix', 'streaming_service.suffix'], + disabled=lambda context: is_disabled(context, 'video_codec')) + + rebulk.regex(r'Rv\d{2}', value='RealVideo') + rebulk.regex('Mpe?g-?2', '[hx]-?262', value='MPEG-2') + rebulk.string("DVDivX", "DivX", value="DivX") + rebulk.string('XviD', value='Xvid') + rebulk.regex('VC-?1', value='VC-1') + rebulk.string('VP7', value='VP7') + rebulk.string('VP8', 'VP80', value='VP8') + rebulk.string('VP9', value='VP9') + rebulk.regex('[hx]-?263', value='H.263') + rebulk.regex('[hx]-?264', '(MPEG-?4)?AVC(?:HD)?', value='H.264') + rebulk.regex('[hx]-?265', 'HEVC', value='H.265') + rebulk.regex('(?P<video_codec>hevc)(?P<color_depth>10)', value={'video_codec': 'H.265', 'color_depth': '10-bit'}, + tags=['video-codec-suffix'], children=True) + + # http://blog.mediacoderhq.com/h264-profiles-and-levels/ + # https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC + rebulk.defaults(clear=True, + name="video_profile", + validator=seps_surround, + disabled=lambda context: is_disabled(context, 'video_profile')) + + rebulk.string('BP', value='Baseline', tags='video_profile.rule') + rebulk.string('XP', 'EP', value='Extended', tags='video_profile.rule') + rebulk.string('MP', value='Main', tags='video_profile.rule') + rebulk.string('HP', 'HiP', value='High', tags='video_profile.rule') + + # https://en.wikipedia.org/wiki/Scalable_Video_Coding + rebulk.string('SC', 'SVC', value='Scalable Video Coding', tags='video_profile.rule') + # https://en.wikipedia.org/wiki/AVCHD + rebulk.regex('AVC(?:HD)?', value='Advanced Video Codec High Definition', tags='video_profile.rule') + # https://en.wikipedia.org/wiki/H.265/HEVC + rebulk.string('HEVC', value='High Efficiency Video Coding', tags='video_profile.rule') + + rebulk.regex('Hi422P', value='High 4:2:2') + rebulk.regex('Hi444PP', value='High 4:4:4 Predictive') + rebulk.regex('Hi10P?', value='High 10') # no profile validation is required + + rebulk.string('DXVA', value='DXVA', name='video_api', + disabled=lambda context: is_disabled(context, 'video_api')) + + rebulk.defaults(clear=True, + name='color_depth', + validator=seps_surround, + disabled=lambda context: is_disabled(context, 'color_depth')) + rebulk.regex('12.?bits?', value='12-bit') + rebulk.regex('10.?bits?', 'YUV420P10', 'Hi10P?', value='10-bit') + rebulk.regex('8.?bits?', value='8-bit') + + rebulk.rules(ValidateVideoCodec, VideoProfileRule) + + return rebulk + + +class ValidateVideoCodec(Rule): + """ + Validate video_codec with source property or separated + """ + priority = 64 + consequence = RemoveMatch + + def enabled(self, context): + return not is_disabled(context, 'video_codec') + + def when(self, matches, context): + ret = [] + for codec in matches.named('video_codec'): + if not seps_before(codec) and \ + not matches.at_index(codec.start - 1, lambda match: 'video-codec-prefix' in match.tags): + ret.append(codec) + continue + if not seps_after(codec) and \ + not matches.at_index(codec.end + 1, lambda match: 'video-codec-suffix' in match.tags): + ret.append(codec) + continue + return ret + + +class VideoProfileRule(Rule): + """ + Rule to validate video_profile + """ + consequence = RemoveMatch + + def enabled(self, context): + return not is_disabled(context, 'video_profile') + + def when(self, matches, context): + profile_list = matches.named('video_profile', lambda match: 'video_profile.rule' in match.tags) + ret = [] + for profile in profile_list: + codec = matches.at_span(profile.span, lambda match: match.name == 'video_codec', 0) + if not codec: + codec = matches.previous(profile, lambda match: match.name == 'video_codec') + if not codec: + codec = matches.next(profile, lambda match: match.name == 'video_codec') + if not codec: + ret.append(profile) + return ret diff --git a/lib/guessit/rules/properties/website.py b/lib/guessit/rules/properties/website.py new file mode 100644 index 00000000..b01e86c8 --- /dev/null +++ b/lib/guessit/rules/properties/website.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Website property. +""" +# from pkg_resources import resource_stream # @UnresolvedImport +import os + +from rebulk.remodule import re + +from rebulk import Rebulk, Rule, RemoveMatch +from ..common import seps +from ..common.formatters import cleanup +from ..common.pattern import is_disabled +from ..common.validators import seps_surround +from ...reutils import build_or_pattern + + +def website(config): + """ + Builder for rebulk object. + + :param config: rule configuration + :type config: dict + :return: Created Rebulk object + :rtype: Rebulk + """ + rebulk = Rebulk(disabled=lambda context: is_disabled(context, 'website')) + rebulk = rebulk.regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True) + rebulk.defaults(name="website") + + with open(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'tlds-alpha-by-domain.txt')) as tld_file: + tlds = [ + tld.strip().decode('utf-8') + for tld in tld_file.readlines() + if b'--' not in tld + ][1:] # All registered domain extension + + safe_tlds = config['safe_tlds'] # For sure a website extension + safe_subdomains = config['safe_subdomains'] # For sure a website subdomain + safe_prefix = config['safe_prefixes'] # Those words before a tlds are sure + website_prefixes = config['prefixes'] + + rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) + + r'\.)+(?:[a-z-]+\.)+(?:'+build_or_pattern(tlds) + + r'))(?:[^a-z0-9]|$)', + children=True) + rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) + + r'\.)*[a-z-]+\.(?:'+build_or_pattern(safe_tlds) + + r'))(?:[^a-z0-9]|$)', + safe_subdomains=safe_subdomains, safe_tlds=safe_tlds, children=True) + rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) + + r'\.)*[a-z-]+\.(?:'+build_or_pattern(safe_prefix) + + r'\.)+(?:'+build_or_pattern(tlds) + + r'))(?:[^a-z0-9]|$)', + safe_subdomains=safe_subdomains, safe_prefix=safe_prefix, tlds=tlds, children=True) + + rebulk.string(*website_prefixes, + validator=seps_surround, private=True, tags=['website.prefix']) + + class PreferTitleOverWebsite(Rule): + """ + If found match is more likely a title, remove website. + """ + consequence = RemoveMatch + + @staticmethod + def valid_followers(match): + """ + Validator for next website matches + """ + return match.named('season', 'episode', 'year') + + def when(self, matches, context): + to_remove = [] + for website_match in matches.named('website'): + safe = False + for safe_start in safe_subdomains + safe_prefix: + if website_match.value.lower().startswith(safe_start): + safe = True + break + if not safe: + suffix = matches.next(website_match, PreferTitleOverWebsite.valid_followers, 0) + if suffix: + group = matches.markers.at_match(website_match, lambda marker: marker.name == 'group', 0) + if not group: + to_remove.append(website_match) + return to_remove + + rebulk.rules(PreferTitleOverWebsite, ValidateWebsitePrefix) + + return rebulk + + +class ValidateWebsitePrefix(Rule): + """ + Validate website prefixes + """ + priority = 64 + consequence = RemoveMatch + + def when(self, matches, context): + to_remove = [] + for prefix in matches.tagged('website.prefix'): + website_match = matches.next(prefix, predicate=lambda match: match.name == 'website', index=0) + if (not website_match or + matches.holes(prefix.end, website_match.start, + formatter=cleanup, seps=seps, predicate=lambda match: match.value)): + to_remove.append(prefix) + return to_remove diff --git a/lib/guessit/test/__init__.py b/lib/guessit/test/__init__.py new file mode 100644 index 00000000..e5be370e --- /dev/null +++ b/lib/guessit/test/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name diff --git a/lib/guessit/test/config/dummy.txt b/lib/guessit/test/config/dummy.txt new file mode 100644 index 00000000..7d6ca31b --- /dev/null +++ b/lib/guessit/test/config/dummy.txt @@ -0,0 +1 @@ +Not a configuration file \ No newline at end of file diff --git a/lib/guessit/test/config/test.json b/lib/guessit/test/config/test.json new file mode 100644 index 00000000..22f45d2a --- /dev/null +++ b/lib/guessit/test/config/test.json @@ -0,0 +1,4 @@ +{ + "expected_title": ["The 100", "OSS 117"], + "yaml": false +} diff --git a/lib/guessit/test/config/test.yaml b/lib/guessit/test/config/test.yaml new file mode 100644 index 00000000..6a4dfe15 --- /dev/null +++ b/lib/guessit/test/config/test.yaml @@ -0,0 +1,4 @@ +expected_title: + - The 100 + - OSS 117 +yaml: True diff --git a/lib/guessit/test/config/test.yml b/lib/guessit/test/config/test.yml new file mode 100644 index 00000000..6a4dfe15 --- /dev/null +++ b/lib/guessit/test/config/test.yml @@ -0,0 +1,4 @@ +expected_title: + - The 100 + - OSS 117 +yaml: True diff --git a/lib/guessit/test/enable_disable_properties.yml b/lib/guessit/test/enable_disable_properties.yml new file mode 100644 index 00000000..ada9c347 --- /dev/null +++ b/lib/guessit/test/enable_disable_properties.yml @@ -0,0 +1,335 @@ +? vorbis +: options: --exclude audio_codec + -audio_codec: Vorbis + +? DTS-ES +: options: --exclude audio_profile + audio_codec: DTS + -audio_profile: Extended Surround + +? DTS.ES +: options: --include audio_codec + audio_codec: DTS + -audio_profile: Extended Surround + +? 5.1 +? 5ch +? 6ch +: options: --exclude audio_channels + -audio_channels: '5.1' + +? Movie Title-x01-Other Title.mkv +? Movie Title-x01-Other Title +? directory/Movie Title-x01-Other Title/file.mkv +: options: --exclude bonus + -bonus: 1 + -bonus_title: Other Title + +? Title-x02-Bonus Title.mkv +: options: --include bonus + bonus: 2 + -bonus_title: Other Title + +? cd 1of3 +: options: --exclude cd + -cd: 1 + -cd_count: 3 + +? This.is.Us +: options: --exclude country + title: This is Us + -country: US + +? 2015.01.31 +: options: --exclude date + year: 2015 + -date: 2015-01-31 + +? Something 2 mar 2013) +: options: --exclude date + -date: 2013-03-02 + +? 2012 2009 S01E02 2015 # If no year is marked, the second one is guessed. +: options: --exclude year + -year: 2009 + +? Director's cut +: options: --exclude edition + -edition: Director's Cut + +? 2x5 +? 2X5 +? 02x05 +? 2X05 +? 02x5 +? S02E05 +? s02e05 +? s02e5 +? s2e05 +? s02ep05 +? s2EP5 +: options: --exclude season + -season: 2 + -episode: 5 + +? 2x6 +? 2X6 +? 02x06 +? 2X06 +? 02x6 +? S02E06 +? s02e06 +? s02e6 +? s2e06 +? s02ep06 +? s2EP6 +: options: --exclude episode + -season: 2 + -episode: 6 + +? serie Season 2 other +: options: --exclude season + -season: 2 + +? Some Dummy Directory/S02 Some Series/E01-Episode title.mkv +: options: --exclude episode_title + -episode_title: Episode title + season: 2 + episode: 1 + +? Another Dummy Directory/S02 Some Series/E01-Episode title.mkv +: options: --include season --include episode + -episode_title: Episode title + season: 2 + episode: 1 + +# pattern contains season and episode: it wont work enabling only one +? Some Series S03E01E02 +: options: --include episode + -season: 3 + -episode: [1, 2] + +# pattern contains season and episode: it wont work enabling only one +? Another Series S04E01E02 +: options: --include season + -season: 4 + -episode: [1, 2] + +? Show.Name.Season.4.Episode.1 +: options: --include episode + -season: 4 + episode: 1 + +? Another.Show.Name.Season.4.Episode.1 +: options: --include season + season: 4 + -episode: 1 + +? Some Series S01 02 03 +: options: --exclude season + -season: [1, 2, 3] + +? Some Series E01 02 04 +: options: --exclude episode + -episode: [1, 2, 4] + +? A very special episode s06 special +: options: -t episode --exclude episode_details + season: 6 + -episode_details: Special + +? S01D02.3-5-GROUP +: options: --exclude disc + -season: 1 + -disc: [2, 3, 4, 5] + -episode: [2, 3, 4, 5] + +? S01D02&4-6&8 +: options: --exclude season + -season: 1 + -disc: [2, 4, 5, 6, 8] + -episode: [2, 4, 5, 6, 8] + +? Film Title-f01-Series Title.mkv +: options: --exclude film + -film: 1 + -film_title: Film Title + +? Another Film Title-f01-Series Title.mkv +: options: --exclude film_title + film: 1 + -film_title: Film Title + +? English +? .ENG. +: options: --exclude language + -language: English + +? SubFrench +? SubFr +? STFr +: options: --exclude subtitle_language + -language: French + -subtitle_language: French + +? ST.FR +: options: --exclude subtitle_language + language: French + -subtitle_language: French + +? ENG.-.sub.FR +? ENG.-.FR Sub +: options: --include language + language: [English, French] + -subtitle_language: French + +? ENG.-.SubFR +: options: --include language + language: English + -subtitle_language: French + +? ENG.-.FRSUB +? ENG.-.FRSUBS +? ENG.-.FR-SUBS +: options: --include subtitle_language + -language: English + subtitle_language: French + +? DVD.Real.XViD +? DVD.fix.XViD +: options: --exclude other + -other: Fix + -proper_count: 1 + +? Part 3 +? Part III +? Part Three +? Part Trois +? Part3 +: options: --exclude part + -part: 3 + +? Some.Title.XViD-by.Artik[SEDG].avi +: options: --exclude release_group + -release_group: Artik[SEDG] + +? "[ABC] Some.Title.avi" +? some/folder/[ABC]Some.Title.avi +: options: --exclude release_group + -release_group: ABC + +? 360p +? 360px +? "360" +? +500x360 +: options: --exclude screen_size + -screen_size: 360p + +? 640x360 +: options: --exclude aspect_ratio + screen_size: 360p + -aspect_ratio: 1.778 + +? 8196x4320 +: options: --exclude screen_size + -screen_size: 4320p + -aspect_ratio: 1.897 + +? 4.3gb +: options: --exclude size + -size: 4.3GB + +? VhS_rip +? VHS.RIP +: options: --exclude source + -source: VHS + -other: Rip + +? DVD.RIP +: options: --include other + -source: DVD + -other: Rip + +? Title Only.avi +: options: --exclude title + -title: Title Only + +? h265 +? x265 +? h.265 +? x.265 +? hevc +: options: --exclude video_codec + -video_codec: H.265 + +? hevc10 +: options: --include color_depth + -video_codec: H.265 + -color_depth: 10-bit + +? HEVC-YUV420P10 +: options: --include color_depth + -video_codec: H.265 + color_depth: 10-bit + +? h265-HP +: options: --exclude video_profile + video_codec: H.265 + -video_profile: High + +? House.of.Cards.2013.S02E03.1080p.NF.WEBRip.DD5.1.x264-NTb.mkv +? House.of.Cards.2013.S02E03.1080p.Netflix.WEBRip.DD5.1.x264-NTb.mkv +: options: --exclude streaming_service + -streaming_service: Netflix + +? wawa.co.uk +: options: --exclude website + -website: wawa.co.uk + +? movie.mp4 +: options: --exclude mimetype + -mimetype: video/mp4 + +? another movie.mkv +: options: --exclude container + -container: mkv + +? series s02e01 +: options: --exclude type + -type: episode + +? series s02e01 +: options: --exclude type + -type: episode + +? Hotel.Hell.S01E01.720p.DD5.1.448kbps-ALANiS +: options: --exclude audio_bit_rate + -audio_bit_rate: 448Kbps + +? Katy Perry - Pepsi & Billboard Summer Beats Concert Series 2012 1080i HDTV 20 Mbps DD2.0 MPEG2-TrollHD.ts +: options: --exclude video_bit_rate + -video_bit_rate: 20Mbps + +? "[Figmentos] Monster 34 - At the End of Darkness [781219F1].mkv" +: options: --exclude crc32 + -crc32: 781219F1 + +? 1080p25 +: options: --exclude frame_rate + screen_size: 1080p + -frame_rate: 25fps + +? 1080p25 +: options: --exclude screen_size + -screen_size: 1080p + -frame_rate: 25fps + +? 1080p25 +: options: --include frame_rate + -screen_size: 1080p + -frame_rate: 25fps + +? 1080p 30fps +: options: --exclude screen_size + -screen_size: 1080p + frame_rate: 30fps diff --git a/lib/guessit/test/episodes.yml b/lib/guessit/test/episodes.yml new file mode 100644 index 00000000..4bbbde4a --- /dev/null +++ b/lib/guessit/test/episodes.yml @@ -0,0 +1,4693 @@ +? __default__ +: type: episode + +? Series/Californication/Season 2/Californication.2x05.Vaginatown.HDTV.XviD-0TV.avi +: title: Californication + season: 2 + episode: 5 + episode_title: Vaginatown + source: HDTV + video_codec: Xvid + release_group: 0TV + container: avi + +? Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi +: title: Dexter + season: 5 + episode: 2 + episode_title: Hello, Bandit + language: English + subtitle_language: French + source: HDTV + video_codec: Xvid + release_group: AlFleNi-TeaM + website: tvu.org.ru + container: avi + +? Series/Treme/Treme.1x03.Right.Place,.Wrong.Time.HDTV.XviD-NoTV.avi +: title: Treme + season: 1 + episode: 3 + episode_title: Right Place, Wrong Time + source: HDTV + video_codec: Xvid + release_group: NoTV + +? Series/Duckman/Duckman - S1E13 Joking The Chicken (unedited).avi +: title: Duckman + season: 1 + episode: 13 + episode_title: Joking The Chicken + +? Series/Simpsons/Saison 12 Français/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.avi +: title: The Simpsons + season: 12 + episode: 8 + episode_title: A Bas Le Sergent Skinner + language: French + +? Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi +: title: Duckman + season: 1 + episode: 1 + episode_title: I, Duckman + date: 2002-11-07 + +? Series/Simpsons/Saison 12 Français/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.avi +: title: The Simpsons + season: 12 + episode: 8 + episode_title: A Bas Le Sergent Skinner + language: French + +? Series/Futurama/Season 3 (mkv)/[™] Futurama - S03E22 - Le chef de fer à 30% ( 30 Percent Iron Chef ).mkv +: title: Futurama + season: 3 + episode: 22 + episode_title: Le chef de fer à 30% + +? Series/The Office/Season 6/The Office - S06xE01.avi +: title: The Office + season: 6 + episode: 1 + +? series/The Office/Season 4/The Office [401] Fun Run.avi +: title: The Office + season: 4 + episode: 1 + episode_title: Fun Run + +? Series/Mad Men Season 1 Complete/Mad.Men.S01E01.avi +: title: Mad Men + season: 1 + episode: 1 + other: Complete + +? series/Psych/Psych S02 Season 2 Complete English DVD/Psych.S02E02.65.Million.Years.Off.avi +: title: Psych + season: 2 + episode: 2 + episode_title: 65 Million Years Off + language: english + source: DVD + other: Complete + +? series/Psych/Psych S02 Season 2 Complete English DVD/Psych.S02E03.Psy.Vs.Psy.Français.srt +: title: Psych + season: 2 + episode: 3 + episode_title: Psy Vs Psy + source: DVD + language: English + subtitle_language: French + other: Complete + +? Series/Pure Laine/Pure.Laine.1x01.Toutes.Couleurs.Unies.FR.(Québec).DVB-Kceb.[tvu.org.ru].avi +: title: Pure Laine + season: 1 + episode: 1 + episode_title: Toutes Couleurs Unies + source: Digital TV + release_group: Kceb + language: french + website: tvu.org.ru + +? Series/Pure Laine/2x05 - Pure Laine - Je Me Souviens.avi +: title: Pure Laine + season: 2 + episode: 5 + episode_title: Je Me Souviens + +? Series/Tout sur moi/Tout sur moi - S02E02 - Ménage à trois (14-01-2008) [Rip by Ampli].avi +: title: Tout sur moi + season: 2 + episode: 2 + episode_title: Ménage à trois + date: 2008-01-14 + +? The.Mentalist.2x21.18-5-4.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi +: title: The Mentalist + season: 2 + episode: 21 + episode_title: 18-5-4 + language: english + subtitle_language: french + source: HDTV + video_codec: Xvid + release_group: AlFleNi-TeaM + website: tvu.org.ru + +? series/__ Incomplete __/Dr Slump (Catalan)/Dr._Slump_-_003_DVB-Rip_Catalan_by_kelf.avi +: title: Dr Slump + episode: 3 + source: Digital TV + other: Rip + language: catalan + +# Disabling this test because it just doesn't looks like a serie ... +#? series/Ren and Stimpy - Black_hole_[DivX].avi +#: title: Ren and Stimpy +# episode_title: Black hole +# video_codec: DivX + +# Disabling this test because it just doesn't looks like a serie ... +# ? Series/Walt Disney/Donald.Duck.-.Good.Scouts.[www.bigernie.jump.to].avi +#: title: Donald Duck +# episode_title: Good Scouts +# website: www.bigernie.jump.to + +? Series/Neverwhere/Neverwhere.05.Down.Street.[tvu.org.ru].avi +: title: Neverwhere + episode: 5 + episode_title: Down Street + website: tvu.org.ru + +? Series/South Park/Season 4/South.Park.4x07.Cherokee.Hair.Tampons.DVDRip.[tvu.org.ru].avi +: title: South Park + season: 4 + episode: 7 + episode_title: Cherokee Hair Tampons + source: DVD + other: Rip + website: tvu.org.ru + +? Series/Kaamelott/Kaamelott - Livre V - Ep 23 - Le Forfait.avi +: title: Kaamelott + alternative_title: Livre V + episode: 23 + episode_title: Le Forfait + +? Series/Duckman/Duckman - 110 (10) - 20021218 - Cellar Beware.avi +: title: Duckman + season: 1 + episode: 10 + date: 2002-12-18 + episode_title: Cellar Beware + +# Removing this test because it doesn't look like a series +# ? Series/Ren & Stimpy/Ren And Stimpy - Onward & Upward-Adult Party Cartoon.avi +# : title: Ren And Stimpy +# episode_title: Onward & Upward-Adult Party Cartoon + +? Series/Breaking Bad/Minisodes/Breaking.Bad.(Minisodes).01.Good.Cop.Bad.Cop.WEBRip.XviD.avi +: title: Breaking Bad + episode_format: Minisode + episode: 1 + episode_title: Good Cop Bad Cop + source: Web + other: Rip + video_codec: Xvid + +? Series/My Name Is Earl/My.Name.Is.Earl.S01Extras.-.Bad.Karma.DVDRip.XviD.avi +: title: My Name Is Earl + season: 1 + episode_title: Bad Karma + source: DVD + other: [Extras, Rip] + video_codec: Xvid + +? series/Freaks And Geeks/Season 1/Episode 4 - Kim Kelly Is My Friend-eng(1).srt +: title: Freaks And Geeks + season: 1 + episode: 4 + episode_title: Kim Kelly Is My Friend + subtitle_language: English # This is really a subtitle_language, despite guessit 1.x assert for language. + +? /mnt/series/The Big Bang Theory/S01/The.Big.Bang.Theory.S01E01.mkv +: title: The Big Bang Theory + season: 1 + episode: 1 + +? /media/Parks_and_Recreation-s03-e01.mkv +: title: Parks and Recreation + season: 3 + episode: 1 + +? /media/Parks_and_Recreation-s03-e02-Flu_Season.mkv +: title: Parks and Recreation + season: 3 + episode_title: Flu Season + episode: 2 + +? /media/Parks_and_Recreation-s03-x01.mkv +: title: Parks and Recreation + season: 3 + episode: 1 + +? /media/Parks_and_Recreation-s03-x02-Gag_Reel.mkv +: title: Parks and Recreation + season: 3 + episode: 2 + episode_title: Gag Reel + +? /media/Band_of_Brothers-e01-Currahee.mkv +: title: Band of Brothers + episode: 1 + episode_title: Currahee + +? /media/Band_of_Brothers-x02-We_Stand_Alone_Together.mkv +: title: Band of Brothers + bonus: 2 + bonus_title: We Stand Alone Together + +? /TV Shows/Mad.M-5x9.mkv +: title: Mad M + season: 5 + episode: 9 + +? /TV Shows/new.girl.117.hdtv-lol.mp4 +: title: new girl + season: 1 + episode: 17 + source: HDTV + release_group: lol + +? Kaamelott - 5x44x45x46x47x48x49x50.avi +: title: Kaamelott + season: 5 + episode: [44, 45, 46, 47, 48, 49, 50] + +? Example S01E01-02.avi +? Example S01E01E02.avi +: title: Example + season: 1 + episode: [1, 2] + +? Series/Baccano!/Baccano!_-_T1_-_Trailer_-_[Ayu](dae8173e).mkv +: title: Baccano! + other: Trailer + release_group: Ayu + episode_title: T1 + crc32: dae8173e + +? Series/Doctor Who (2005)/Season 06/Doctor Who (2005) - S06E01 - The Impossible Astronaut (1).avi +: title: Doctor Who + year: 2005 + season: 6 + episode: 1 + episode_title: The Impossible Astronaut + +? The Sopranos - [05x07] - In Camelot.mp4 +: title: The Sopranos + season: 5 + episode: 7 + episode_title: In Camelot + +? The.Office.(US).1x03.Health.Care.HDTV.XviD-LOL.avi +: title: The Office + country: US + season: 1 + episode: 3 + episode_title: Health Care + source: HDTV + video_codec: Xvid + release_group: LOL + +? /Volumes/data-1/Series/Futurama/Season 3/Futurama_-_S03_DVD_Bonus_-_Deleted_Scenes_Part_3.ogm +: title: Futurama + season: 3 + part: 3 + source: DVD + other: Bonus + +? Ben.and.Kate.S01E02.720p.HDTV.X264-DIMENSION.mkv +: title: Ben and Kate + season: 1 + episode: 2 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: DIMENSION + +? /volume1/TV Series/Drawn Together/Season 1/Drawn Together 1x04 Requiem for a Reality Show.avi +: title: Drawn Together + season: 1 + episode: 4 + episode_title: Requiem for a Reality Show + +? Sons.of.Anarchy.S05E06.720p.WEB.DL.DD5.1.H.264-CtrlHD.mkv +: title: Sons of Anarchy + season: 5 + episode: 6 + screen_size: 720p + source: Web + audio_channels: "5.1" + audio_codec: Dolby Digital + video_codec: H.264 + release_group: CtrlHD + +? /media/bdc64bfe-e36f-4af8-b550-e6fd2dfaa507/TV_Shows/Doctor Who (2005)/Saison 6/Doctor Who (2005) - S06E13 - The Wedding of River Song.mkv +: title: Doctor Who + season: 6 + episode: 13 + year: 2005 + episode_title: The Wedding of River Song + uuid: bdc64bfe-e36f-4af8-b550-e6fd2dfaa507 + +? /mnt/videos/tvshows/Doctor Who/Season 06/E13 - The Wedding of River Song.mkv +: title: Doctor Who + season: 6 + episode: 13 + episode_title: The Wedding of River Song + +? The.Simpsons.S24E03.Adventures.in.Baby-Getting.720p.WEB-DL.DD5.1.H.264-CtrlHD.mkv +: title: The Simpsons + season: 24 + episode: 3 + episode_title: Adventures in Baby-Getting + screen_size: 720p + source: Web + audio_channels: "5.1" + audio_codec: Dolby Digital + video_codec: H.264 + release_group: CtrlHD + +? /home/disaster/Videos/TV/Merlin/merlin_2008.5x02.arthurs_bane_part_two.repack.720p_hdtv_x264-fov.mkv +: title: merlin + season: 5 + episode: 2 + part: 2 + episode_title: arthurs bane + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: fov + year: 2008 + other: Proper + proper_count: 1 + +? "Da Vinci's Demons - 1x04 - The Magician.mkv" +: title: "Da Vinci's Demons" + season: 1 + episode: 4 + episode_title: The Magician + +? CSI.S013E18.Sheltered.720p.WEB-DL.DD5.1.H.264.mkv +: title: CSI + season: 13 + episode: 18 + episode_title: Sheltered + screen_size: 720p + source: Web + audio_channels: "5.1" + audio_codec: Dolby Digital + video_codec: H.264 + +? Game of Thrones S03E06 1080i HDTV DD5.1 MPEG2-TrollHD.ts +: title: Game of Thrones + season: 3 + episode: 6 + screen_size: 1080i + source: HDTV + audio_channels: "5.1" + audio_codec: Dolby Digital + video_codec: MPEG-2 + release_group: TrollHD + +? gossip.girl.s01e18.hdtv.xvid-2hd.eng.srt +: title: gossip girl + season: 1 + episode: 18 + source: HDTV + video_codec: Xvid + release_group: 2hd + subtitle_language: english + +? Wheels.S03E01E02.720p.HDTV.x264-IMMERSE.mkv +: title: Wheels + season: 3 + episode: [1, 2] + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: IMMERSE + +? Wheels.S03E01-02.720p.HDTV.x264-IMMERSE.mkv +: title: Wheels + season: 3 + episode: [1, 2] + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: IMMERSE + +? Wheels.S03E01-E02.720p.HDTV.x264-IMMERSE.mkv +: title: Wheels + season: 3 + episode: [1, 2] + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: IMMERSE + +? Wheels.S03E01-04.720p.HDTV.x264-IMMERSE.mkv +: title: Wheels + season: 3 + episode: [1, 2, 3, 4] + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: IMMERSE + +? Marvels.Agents.of.S.H.I.E.L.D-S01E06.720p.HDTV.X264-DIMENSION.mkv +: title: Marvels Agents of S.H.I.E.L.D + season: 1 + episode: 6 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: DIMENSION + +? Marvels.Agents.of.S.H.I.E.L.D.S01E06.720p.HDTV.X264-DIMENSION.mkv +: title: Marvels Agents of S.H.I.E.L.D. + season: 1 + episode: 6 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: DIMENSION + +? Marvels.Agents.of.S.H.I.E.L.D..S01E06.720p.HDTV.X264-DIMENSION.mkv +: title: Marvels Agents of S.H.I.E.L.D. + season: 1 + episode: 6 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: DIMENSION + +? Series/Friday Night Lights/Season 1/Friday Night Lights S01E19 - Ch-Ch-Ch-Ch-Changes.avi +: title: Friday Night Lights + season: 1 + episode: 19 + episode_title: Ch-Ch-Ch-Ch-Changes + +? Dexter Saison VII FRENCH.BDRip.XviD-MiND.nfo +: title: Dexter + season: 7 + video_codec: Xvid + language: French + source: Blu-ray + other: Rip + release_group: MiND + +? Dexter Saison sept FRENCH.BDRip.XviD-MiND.nfo +: title: Dexter + season: 7 + video_codec: Xvid + language: French + source: Blu-ray + other: Rip + release_group: MiND + +? "Pokémon S16 - E29 - 1280*720 HDTV VF.mkv" +: title: Pokémon + source: HDTV + language: French + season: 16 + episode: 29 + screen_size: 720p + +? One.Piece.E576.VOSTFR.720p.HDTV.x264-MARINE-FORD.mkv +: episode: 576 + video_codec: H.264 + source: HDTV + title: One Piece + release_group: MARINE-FORD + subtitle_language: French + screen_size: 720p + +? Dexter.S08E12.FINAL.MULTi.1080p.BluRay.x264-MiND.mkv +: video_codec: H.264 + episode: 12 + season: 8 + source: Blu-ray + title: Dexter + episode_details: Final + language: Multiple languages + release_group: MiND + screen_size: 1080p + +? One Piece - E623 VOSTFR HD [www.manga-ddl-free.com].mkv +: website: www.manga-ddl-free.com + episode: 623 + subtitle_language: French + title: One Piece + other: HD + +? Falling Skies Saison 1.HDLight.720p.x264.VFF.mkv +: language: French + screen_size: 720p + season: 1 + title: Falling Skies + video_codec: H.264 + other: Micro HD + +? Sleepy.Hollow.S01E09.720p.WEB-DL.DD5.1.H.264-BP.mkv +: episode: 9 + video_codec: H.264 + source: Web + title: Sleepy Hollow + audio_channels: "5.1" + screen_size: 720p + season: 1 +# video_profile: BP # TODO: related to https://github.com/guessit-io/guessit/issues/458#issuecomment-305719715 + audio_codec: Dolby Digital + +? Sleepy.Hollow.S01E09.720p.WEB-DL.DD5.1.H.264-BS.mkv +: episode: 9 + video_codec: H.264 + source: Web + title: Sleepy Hollow + audio_channels: "5.1" + screen_size: 720p + season: 1 + release_group: BS + audio_codec: Dolby Digital + +? Battlestar.Galactica.S00.Pilot.FRENCH.DVDRip.XviD-NOTAG.avi +: title: Battlestar Galactica + season: 0 + episode_details: Pilot + episode_title: Pilot + language: French + source: DVD + other: Rip + video_codec: Xvid + release_group: NOTAG + +? The Big Bang Theory S00E00 Unaired Pilot VOSTFR TVRip XviD-VioCs +: title: The Big Bang Theory + season: 0 + episode: 0 + subtitle_language: French + source: TV + other: Rip + video_codec: Xvid + release_group: VioCs + episode_details: [Unaired, Pilot] + +? The Big Bang Theory S01E00 PROPER Unaired Pilot TVRip XviD-GIGGITY +: title: The Big Bang Theory + season: 1 + episode: 0 + source: TV + video_codec: Xvid + release_group: GIGGITY + other: [Proper, Rip] + proper_count: 1 + episode_details: [Unaired, Pilot] + +? Pawn.Stars.S2014E18.720p.HDTV.x264-KILLERS +: title: Pawn Stars + season: 2014 + year: 2014 + episode: 18 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: KILLERS + +? 2.Broke.Girls.S03E10.480p.HDTV.x264-mSD.mkv +: title: 2 Broke Girls + season: 3 + episode: 10 + screen_size: 480p + source: HDTV + video_codec: H.264 + release_group: mSD + +? the.100.109.hdtv-lol.mp4 +: title: the 100 + season: 1 + episode: 9 + source: HDTV + release_group: lol + +? Criminal.Minds.5x03.Reckoner.ENG.-.sub.FR.HDTV.XviD-STi.[tvu.org.ru].avi +: title: Criminal Minds + language: English + subtitle_language: French + season: 5 + episode: 3 + video_codec: Xvid + source: HDTV + website: tvu.org.ru + release_group: STi + episode_title: Reckoner + +? 03-Criminal.Minds.avi +: title: Criminal Minds + episode: 3 + +? '[Evil-Saizen]_Laughing_Salesman_14_[DVD][1C98686A].mkv' +: crc32: 1C98686A + episode: 14 + source: DVD + release_group: Evil-Saizen + title: Laughing Salesman + +? '[Kaylith] Zankyou no Terror - 04 [480p][B4D4514E].mp4' +: crc32: B4D4514E + episode: 4 + release_group: Kaylith + screen_size: 480p + title: Zankyou no Terror + +? '[PuyaSubs!] Seirei Tsukai no Blade Dance - 05 [720p][32DD560E].mkv' +: crc32: 32DD560E + episode: 5 + release_group: PuyaSubs! + screen_size: 720p + title: Seirei Tsukai no Blade Dance + +? '[Doremi].Happiness.Charge.Precure.27.[1280x720].[DC91581A].mkv' +: crc32: DC91581A + episode: 27 + release_group: Doremi + screen_size: 720p + title: Happiness Charge Precure + +? "[Daisei] Free!:Iwatobi Swim Club - 01 ~ (BD 720p 10-bit AAC) [99E8E009].mkv" +: audio_codec: AAC + crc32: 99E8E009 + episode: 1 + source: Blu-ray + release_group: Daisei + screen_size: 720p + title: Free!:Iwatobi Swim Club + color_depth: 10-bit + +? '[Tsundere] Boku wa Tomodachi ga Sukunai - 03 [BDRip h264 1920x1080 10bit FLAC][AF0C22CC].mkv' +: audio_codec: FLAC + crc32: AF0C22CC + episode: 3 + source: Blu-ray + release_group: Tsundere + screen_size: 1080p + title: Boku wa Tomodachi ga Sukunai + video_codec: H.264 + color_depth: 10-bit + +? '[t.3.3.d]_Mikakunin_de_Shinkoukei_-_12_[720p][5DDC1352].mkv' +: crc32: 5DDC1352 + episode: 12 + screen_size: 720p + title: Mikakunin de Shinkoukei + release_group: t.3.3.d + +? '[Anime-Koi] Sabagebu! - 06 [h264-720p][ABB3728A].mkv' +: crc32: ABB3728A + episode: 6 + release_group: Anime-Koi + screen_size: 720p + title: Sabagebu! + video_codec: H.264 + +? '[aprm-Diogo4D] [BD][1080p] Nagi no Asukara 08 [4D102B7C].mkv' +: crc32: 4D102B7C + episode: 8 + source: Blu-ray + release_group: aprm-Diogo4D + screen_size: 1080p + title: Nagi no Asukara + +? '[Akindo-SSK] Zankyou no Terror - 05 [720P][Sub_ITA][F5CCE87C].mkv' +: crc32: F5CCE87C + episode: 5 + release_group: Akindo-SSK + screen_size: 720p + title: Zankyou no Terror + subtitle_language: it + +? Naruto Shippuden Episode 366 VOSTFR.avi +: episode: 366 + title: Naruto Shippuden + subtitle_language: fr + +? Naruto Shippuden Episode 366v2 VOSTFR.avi +: episode: 366 + version: 2 + title: Naruto Shippuden + subtitle_language: fr + +? '[HorribleSubs] Ao Haru Ride - 06 [480p].mkv' +: episode: 6 + release_group: HorribleSubs + screen_size: 480p + title: Ao Haru Ride + +? '[DeadFish] Tari Tari - 01 [BD][720p][AAC].mp4' +: audio_codec: AAC + episode: 1 + source: Blu-ray + release_group: DeadFish + screen_size: 720p + title: Tari Tari + +? '[NoobSubs] Sword Art Online II 06 (720p 8bit AAC).mp4' +: audio_codec: AAC + episode: 6 + release_group: NoobSubs + screen_size: 720p + title: Sword Art Online II + color_depth: 8-bit + +? '[DeadFish] 01 - Tari Tari [BD][720p][AAC].mp4' +: audio_codec: AAC + episode: 1 + source: Blu-ray + release_group: DeadFish + screen_size: 720p + title: Tari Tari + +? '[NoobSubs] 06 Sword Art Online II (720p 8bit AAC).mp4' +: audio_codec: AAC + episode: 6 + release_group: NoobSubs + screen_size: 720p + title: Sword Art Online II + color_depth: 8-bit + +? '[DeadFish] 12 - Tari Tari [BD][720p][AAC].mp4' +: audio_codec: AAC + episode: 12 + source: Blu-ray + release_group: DeadFish + screen_size: 720p + title: Tari Tari + +? Something.Season.2.1of4.Ep.Title.HDTV.torrent +: episode_count: 4 + episode: 1 + source: HDTV + season: 2 + title: Something + episode_title: Title + container: torrent + +? Something.Season.2of5.3of9.Ep.Title.HDTV.torrent +: episode_count: 9 + episode: 3 + source: HDTV + season: 2 + season_count: 5 + title: Something + episode_title: Title + container: torrent + +? Something.Other.Season.3of5.Complete.HDTV.torrent +: source: HDTV + other: Complete + season: 3 + season_count: 5 + title: Something Other + container: torrent + +? Something.Other.Season.1-3.avi +: season: [1, 2, 3] + title: Something Other + +? Something.Other.Season.1&3.avi +: season: [1, 3] + title: Something Other + +? Something.Other.Season.1&3-1to12ep.avi +: season: [1, 3] + title: Something Other + +? W2Test.123.HDTV.XViD-FlexGet +: episode: 23 + season: 1 + source: HDTV + release_group: FlexGet + title: W2Test + video_codec: Xvid + +? W2Test.123.HDTV.XViD-FlexGet +: options: --episode-prefer-number + episode: 123 + source: HDTV + release_group: FlexGet + title: W2Test + video_codec: Xvid + +? FooBar.0307.PDTV-FlexGet +: episode: 7 + source: Digital TV + release_group: FlexGet + season: 3 + title: FooBar + +? FooBar.0307.PDTV-FlexGet +? FooBar.307.PDTV-FlexGet +: options: --episode-prefer-number + episode: 307 + source: Digital TV + release_group: FlexGet + title: FooBar + +? FooBar.07.PDTV-FlexGet +: episode: 7 + source: Digital TV + release_group: FlexGet + title: FooBar + +? FooBar.7.PDTV-FlexGet +: episode: 7 + source: Digital TV + release_group: FlexGet + title: FooBar + +? FooBar.0307.PDTV-FlexGet +: episode: 7 + source: Digital TV + release_group: FlexGet + season: 3 + title: FooBar + +? FooBar.307.PDTV-FlexGet +: episode: 7 + source: Digital TV + release_group: FlexGet + season: 3 + title: FooBar + +? FooBar.07.PDTV-FlexGet +: episode: 7 + source: Digital TV + release_group: FlexGet + title: FooBar + +? FooBar.07v4.PDTV-FlexGet +: episode: 7 + version: 4 + source: Digital TV + release_group: FlexGet + title: FooBar + +? FooBar.7.PDTV-FlexGet +: source: Digital TV + release_group: FlexGet + title: FooBar 7 + type: movie + +? FooBar.7.PDTV-FlexGet +: options: -t episode + episode: 7 + source: Digital TV + release_group: FlexGet + title: FooBar + +? FooBar.7v3.PDTV-FlexGet +: options: -t episode + episode: 7 + version: 3 + source: Digital TV + release_group: FlexGet + title: FooBar + +? Test.S02E01.hdtv.real.proper +: episode: 1 + source: HDTV + other: Proper + proper_count: 2 + season: 2 + title: Test + +? Real.Test.S02E01.hdtv.proper +: episode: 1 + source: HDTV + other: Proper + proper_count: 1 + season: 2 + title: Real Test + +? Test.Real.S02E01.hdtv.proper +: episode: 1 + source: HDTV + other: Proper + proper_count: 1 + season: 2 + title: Test Real + +? Test.S02E01.hdtv.proper +: episode: 1 + source: HDTV + other: Proper + proper_count: 1 + season: 2 + title: Test + +? Test.S02E01.hdtv.real.repack.proper +: episode: 1 + source: HDTV + other: Proper + proper_count: 3 + season: 2 + title: Test + +? Date.Show.03-29-2012.HDTV.XViD-FlexGet +: date: 2012-03-29 + source: HDTV + release_group: FlexGet + title: Date Show + video_codec: Xvid + +? Something.1x5.Season.Complete-FlexGet +: episode: 5 + other: Complete + season: 1 + title: Something + release_group: FlexGet + +? Something Seasons 1 & 2 - Complete +: other: Complete + season: + - 1 + - 2 + title: Something + +? Something Seasons 4 Complete +: other: Complete + season: 4 + title: Something + +? Something.1xAll.Season.Complete-FlexGet +: other: Complete + season: 1 + title: Something + release_group: FlexGet + +? Something.1xAll-FlexGet +: other: Complete + season: 1 + title: Something + release_group: FlexGet + +? FlexGet.US.S2013E14.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP +: audio_channels: '5.1' + audio_codec: AAC + country: US + episode: 14 + source: HDTV + release_group: NOGRP + screen_size: 720p + season: 2013 + title: FlexGet + episode_title: Title Here + video_codec: H.264 + year: 2013 + +? FlexGet.14.of.21.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP +: audio_channels: '5.1' + audio_codec: AAC + episode_count: 21 + episode: 14 + source: HDTV + release_group: NOGRP + screen_size: 720p + title: FlexGet + episode_title: Title Here + video_codec: H.264 + +? FlexGet.Series.2013.14.of.21.Title.Here.720p.HDTV.AAC5.1.x264-NOGRP +: audio_channels: '5.1' + audio_codec: AAC + episode_count: 21 + episode: 14 + source: HDTV + release_group: NOGRP + screen_size: 720p + season: 2013 + title: FlexGet Series + episode_title: Title Here + video_codec: H.264 + year: 2013 + +? Something.S04E05E09 +: episode: # 1.x guessit this as a range from 5 to 9. But not sure if it should ... + - 5 + - 9 + season: 4 + title: Something + +? FooBar 360 1080i +: options: --episode-prefer-number + episode: 360 + screen_size: 1080i + title: FooBar + +? FooBar 360 1080i +: episode: 60 + season: 3 + screen_size: 1080i + title: FooBar + +? FooBar 360 +: season: 3 + episode: 60 + title: FooBar + -screen_size: 360p + +? BarFood christmas special HDTV +: options: --expected-title BarFood + source: HDTV + title: BarFood + episode_title: christmas special + episode_details: Special + +? Something.2008x12.13-FlexGet +: title: Something + date: 2008-12-13 + episode_title: FlexGet + +? '[Ignored] Test 12' +: episode: 12 + release_group: Ignored + title: Test + +? '[FlexGet] Test 12' +: episode: 12 + release_group: FlexGet + title: Test + +? Test.13.HDTV-Ignored +: episode: 13 + source: HDTV + release_group: Ignored + title: Test + +? Test.13.HDTV-Ignored +: options: --expected-series test + episode: 13 + source: HDTV + release_group: Ignored + title: Test + +? Test.13.HDTV-Ignored +: title: Test + episode: 13 + source: HDTV + release_group: Ignored + +? Test.13.HDTV-Ignored +: episode: 13 + source: HDTV + release_group: Ignored + title: Test + +? Test.13.HDTV-FlexGet +: episode: 13 + source: HDTV + release_group: FlexGet + title: Test + +? Test.14.HDTV-Name +: episode: 14 + source: HDTV + release_group: Name + title: Test + +? Real.Time.With.Bill.Maher.2014.10.31.HDTV.XviD-AFG.avi +: date: 2014-10-31 + source: HDTV + release_group: AFG + title: Real Time With Bill Maher + video_codec: Xvid + +? Arrow.S03E21.Al.Sah-Him.1080p.WEB-DL.DD5.1.H.264-BS.mkv +: title: Arrow + season: 3 + episode: 21 + episode_title: Al Sah-Him + screen_size: 1080p + audio_codec: Dolby Digital + audio_channels: "5.1" + video_codec: H.264 + release_group: BS + source: Web + +? How to Make It in America - S02E06 - I'm Sorry, Who's Yosi?.mkv +: title: How to Make It in America + season: 2 + episode: 6 + episode_title: I'm Sorry, Who's Yosi? + +? 24.S05E07.FRENCH.DVDRip.XviD-FiXi0N.avi +: episode: 7 + source: DVD + other: Rip + language: fr + season: 5 + title: '24' + video_codec: Xvid + release_group: FiXi0N + +? 12.Monkeys.S01E12.FRENCH.BDRip.x264-VENUE.mkv +: episode: 12 + source: Blu-ray + other: Rip + language: fr + release_group: VENUE + season: 1 + title: 12 Monkeys + video_codec: H.264 + +? 90.Day.Fiance.S02E07.I.Have.To.Tell.You.Something.720p.HDTV.x264-W4F +: episode: 7 + source: HDTV + screen_size: 720p + season: 2 + title: 90 Day Fiance + episode_title: I Have To Tell You Something + release_group: W4F + +? Doctor.Who.2005.S04E06.FRENCH.LD.DVDRip.XviD-TRACKS.avi +: episode: 6 + source: DVD + language: fr + release_group: TRACKS + season: 4 + title: Doctor Who + other: [Line Dubbed, Rip] + video_codec: Xvid + year: 2005 + +? Astro.Le.Petit.Robot.S01E01+02.FRENCH.DVDRiP.X264.INT-BOOLZ.mkv +: episode: [1, 2] + source: DVD + other: Rip + language: fr + release_group: INT-BOOLZ + season: 1 + title: Astro Le Petit Robot + video_codec: H.264 + +? Annika.Bengtzon.2012.E01.Le.Testament.De.Nobel.FRENCH.DVDRiP.XViD-STVFRV.avi +: episode: 1 + source: DVD + other: Rip + language: fr + release_group: STVFRV + title: Annika Bengtzon + episode_title: Le Testament De Nobel + video_codec: Xvid + year: 2012 + +? Dead.Set.02.FRENCH.LD.DVDRip.XviD-EPZ.avi +: episode: 2 + source: DVD + language: fr + other: [Line Dubbed, Rip] + release_group: EPZ + title: Dead Set + video_codec: Xvid + +? Phineas and Ferb S01E00 & S01E01 & S01E02 +: episode: [0, 1, 2] + season: 1 + title: Phineas and Ferb + +? Show.Name.S01E02.S01E03.HDTV.XViD.Etc-Group +: episode: [2, 3] + source: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: Xvid + +? Show Name - S01E02 - S01E03 - S01E04 - Ep Name +: episode: [2, 3, 4] + season: 1 + title: Show Name + episode_title: Ep Name + +? Show.Name.1x02.1x03.HDTV.XViD.Etc-Group +: episode: [2, 3] + source: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: Xvid + +? Show Name - 1x02 - 1x03 - 1x04 - Ep Name +: episode: [2, 3, 4] + season: 1 + title: Show Name + episode_title: Ep Name + +? Show.Name.S01E02.HDTV.XViD.Etc-Group +: episode: 2 + source: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: Xvid + +? Show Name - S01E02 - My Ep Name +: episode: 2 + season: 1 + title: Show Name + episode_title: My Ep Name + +? Show Name - S01.E03 - My Ep Name +: episode: 3 + season: 1 + title: Show Name + episode_title: My Ep Name + +? Show.Name.S01E02E03.HDTV.XViD.Etc-Group +: episode: [2, 3] + source: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: Xvid + +? Show Name - S01E02-03 - My Ep Name +: episode: [2, 3] + season: 1 + title: Show Name + episode_title: My Ep Name + +? Show.Name.S01.E02.E03 +: episode: [2, 3] + season: 1 + title: Show Name + +? Show_Name.1x02.HDTV_XViD_Etc-Group +: episode: 2 + source: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: Xvid + +? Show Name - 1x02 - My Ep Name +: episode: 2 + season: 1 + title: Show Name + episode_title: My Ep Name + +? Show_Name.1x02x03x04.HDTV_XViD_Etc-Group +: episode: [2, 3, 4] + source: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: Xvid + +? Show Name - 1x02-03-04 - My Ep Name +: episode: [2, 3, 4] + season: 1 + title: Show Name + episode_title: My Ep Name + +# 1x guess this as episode 100 but 101 as episode 1 season 1. +? Show.Name.100.Event.2010.11.23.HDTV.XViD.Etc-Group +: date: 2010-11-23 + season: 1 + episode: 0 + source: HDTV + release_group: Etc-Group + title: Show Name + episode_title: Event + video_codec: Xvid + +? Show.Name.101.Event.2010.11.23.HDTV.XViD.Etc-Group +: date: 2010-11-23 + season: 1 + episode: 1 + source: HDTV + release_group: Etc-Group + title: Show Name + episode_title: Event + video_codec: Xvid + +? Show.Name.2010.11.23.HDTV.XViD.Etc-Group +: date: 2010-11-23 + source: HDTV + release_group: Etc-Group + title: Show Name + +? Show Name - 2010-11-23 - Ep Name +: date: 2010-11-23 + title: Show Name + episode_title: Ep Name + +? Show Name Season 1 Episode 2 Ep Name +: episode: 2 + season: 1 + title: Show Name + episode_title: Ep Name + +? Show.Name.S01.HDTV.XViD.Etc-Group +: source: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: Xvid + +? Show.Name.E02-03 +: episode: [2, 3] + title: Show Name + +? Show.Name.E02.2010 +: episode: 2 + year: 2010 + title: Show Name + +? Show.Name.E23.Test +: episode: 23 + title: Show Name + episode_title: Test + +? Show.Name.Part.3.HDTV.XViD.Etc-Group +: part: 3 + title: Show Name + source: HDTV + video_codec: Xvid + release_group: Etc-Group + type: movie + # Fallback to movie type because we can't tell it's a series ... + +? Show.Name.Part.1.and.Part.2.Blah-Group +: part: [1, 2] + title: Show Name + type: movie + # Fallback to movie type because we can't tell it's a series ... + +? Show Name - 01 - Ep Name +: episode: 1 + title: Show Name + episode_title: Ep Name + +? 01 - Ep Name +: episode: 1 + title: Ep Name + +? Show.Name.102.HDTV.XViD.Etc-Group +: episode: 2 + source: HDTV + release_group: Etc-Group + season: 1 + title: Show Name + video_codec: Xvid + +? '[HorribleSubs] Maria the Virgin Witch - 01 [720p].mkv' +: episode: 1 + release_group: HorribleSubs + screen_size: 720p + title: Maria the Virgin Witch + +? '[ISLAND]One_Piece_679_[VOSTFR]_[V1]_[8bit]_[720p]_[EB7838FC].mp4' +: crc32: EB7838FC + episode: 679 + release_group: ISLAND + screen_size: 720p + title: One Piece + subtitle_language: fr + color_depth: 8-bit + version: 1 + +? '[ISLAND]One_Piece_679_[VOSTFR]_[8bit]_[720p]_[EB7838FC].mp4' +: crc32: EB7838FC + episode: 679 + release_group: ISLAND + screen_size: 720p + title: One Piece + subtitle_language: fr + color_depth: 8-bit + +? '[Kaerizaki-Fansub]_One_Piece_679_[VOSTFR][HD_1280x720].mp4' +: episode: 679 + other: HD + release_group: Kaerizaki-Fansub + screen_size: 720p + title: One Piece + subtitle_language: fr + +? '[Kaerizaki-Fansub]_One_Piece_679_[VOSTFR][FANSUB][HD_1280x720].mp4' +: episode: 679 + other: [Fan Subtitled, HD] + release_group: Kaerizaki-Fansub + screen_size: 720p + title: One Piece + subtitle_language: fr + +? '[Kaerizaki-Fansub]_One_Piece_681_[VOSTFR][HD_1280x720]_V2.mp4' +: episode: 681 + other: HD + release_group: Kaerizaki-Fansub + screen_size: 720p + title: One Piece + subtitle_language: fr + version: 2 + +? '[Kaerizaki-Fansub] High School DxD New 04 VOSTFR HD (1280x720) V2.mp4' +: episode: 4 + other: HD + release_group: Kaerizaki-Fansub + screen_size: 720p + title: High School DxD New + subtitle_language: fr + version: 2 + +? '[Kaerizaki-Fansub] One Piece 603 VOSTFR PS VITA (960x544) V2.mp4' +: episode: 603 + release_group: Kaerizaki-Fansub + other: PS Vita + screen_size: 960x544 + title: One Piece + subtitle_language: fr + version: 2 + +? '[Group Name] Show Name.13' +: episode: 13 + release_group: Group Name + title: Show Name + +? '[Group Name] Show Name - 13' +: episode: 13 + release_group: Group Name + title: Show Name + +? '[Group Name] Show Name 13' +: episode: 13 + release_group: Group Name + title: Show Name + +# [Group Name] Show Name.13-14 +# [Group Name] Show Name - 13-14 +# Show Name 13-14 + +? '[Stratos-Subs]_Infinite_Stratos_-_12_(1280x720_H.264_AAC)_[379759DB]' +: audio_codec: AAC + crc32: 379759DB + episode: 12 + release_group: Stratos-Subs + screen_size: 720p + title: Infinite Stratos + video_codec: H.264 + +# [ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC) + +? '[SGKK] Bleach 312v1 [720p/MKV]' +: episode: 312 + release_group: SGKK + screen_size: 720p + title: Bleach + version: 1 + +? '[Ayako]_Infinite_Stratos_-_IS_-_07_[H264][720p][EB7838FC]' +: crc32: EB7838FC + episode: 7 + release_group: Ayako + screen_size: 720p + title: Infinite Stratos + video_codec: H.264 + +? '[Ayako] Infinite Stratos - IS - 07v2 [H264][720p][44419534]' +: crc32: '44419534' + episode: 7 + release_group: Ayako + screen_size: 720p + title: Infinite Stratos + video_codec: H.264 + version: 2 + +? '[Ayako-Shikkaku] Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne - 10 [LQ][h264][720p] [8853B21C]' +: crc32: 8853B21C + episode: 10 + release_group: Ayako-Shikkaku + screen_size: 720p + title: Oniichan no Koto Nanka Zenzen Suki Janain Dakara ne + video_codec: H.264 + +? Bleach - s16e03-04 - 313-314 +? Bleach.s16e03-04.313-314-GROUP +? Bleach s16e03e04 313-314 +: title: Bleach + season: 16 + episode: [3, 4] + absolute_episode: [313, 314] + +? Bleach - 313-314 +: options: -E + episode: [313, 314] + title: Bleach + +? '[ShinBunBu-Subs] Bleach - 02-03 (CX 1280x720 x264 AAC)' +: audio_codec: AAC + episode: [2, 3] + release_group: ShinBunBu-Subs + screen_size: 720p + title: Bleach + video_codec: H.264 + +? 003. Show Name - Ep Name.avi +: episode: 3 + title: Show Name + episode_title: Ep Name + +? 003-004. Show Name - Ep Name.avi +: episode: [3, 4] + title: Show Name + episode_title: Ep Name + +? One Piece - 102 +: episode: 2 + season: 1 + title: One Piece + +? "[ACX]_Wolf's_Spirit_001.mkv" +: episode: 1 + release_group: ACX + title: "Wolf's Spirit" + +? Project.Runway.S14E00.and.S14E01.(Eng.Subs).SDTV.x264-[2Maverick].mp4 +: episode: [0, 1] + source: TV + release_group: 2Maverick + season: 14 + title: Project Runway + subtitle_language: en + video_codec: H.264 + +? '[Hatsuyuki-Kaitou]_Fairy_Tail_2_-_16-20_[720p][10bit].torrent' +: episode: [16, 17, 18, 19, 20] + release_group: Hatsuyuki-Kaitou + screen_size: 720p + title: Fairy Tail 2 + color_depth: 10-bit + +? '[Hatsuyuki-Kaitou]_Fairy_Tail_2_-_16-20_(191-195)_[720p][10bit].torrent' +: episode: [16, 17, 18, 19, 20] + absolute_episode: [191, 192, 193, 194, 195] + release_group: Hatsuyuki-Kaitou + screen_size: 720p + title: Fairy Tail 2 + +? "Looney Tunes 1940x01 Porky's Last Stand.mkv" +: episode: 1 + season: 1940 + title: Looney Tunes + episode_title: Porky's Last Stand + year: 1940 + +? The.Good.Wife.S06E01.E10.720p.WEB-DL.DD5.1.H.264-CtrlHD/The.Good.Wife.S06E09.Trust.Issues.720p.WEB-DL.DD5.1.H.264-CtrlHD.mkv +: audio_channels: '5.1' + audio_codec: Dolby Digital + episode: 9 + source: Web + release_group: CtrlHD + screen_size: 720p + season: 6 + title: The Good Wife + episode_title: Trust Issues + video_codec: H.264 + +? Fear the Walking Dead - 01x02 - So Close, Yet So Far.REPACK-KILLERS.French.C.updated.Addic7ed.com.mkv +: episode: 2 + language: fr + other: Proper + proper_count: 1 + season: 1 + title: Fear the Walking Dead + episode_title: So Close, Yet So Far + +? Fear the Walking Dead - 01x02 - En Close, Yet En Far.REPACK-KILLERS.French.C.updated.Addic7ed.com.mkv +: episode: 2 + language: fr + other: Proper + proper_count: 1 + season: 1 + title: Fear the Walking Dead + episode_title: En Close, Yet En Far + +? /av/unsorted/The.Daily.Show.2015.07.22.Jake.Gyllenhaal.720p.HDTV.x264-BATV.mkv +: date: 2015-07-22 + source: HDTV + release_group: BATV + screen_size: 720p + title: The Daily Show + episode_title: Jake Gyllenhaal + video_codec: H.264 + +? "[7.1.7.8.5] Foo Bar - 11 (H.264) [5235532D].mkv" +: episode: 11 + +? my 720p show S01E02 +: options: -T "my 720p show" + title: my 720p show + season: 1 + episode: 2 + +? my 720p show S01E02 720p +: options: -T "my 720p show" + title: my 720p show + season: 1 + episode: 2 + screen_size: 720p + +? -my 720p show S01E02 +: options: -T "re:my \d+p show" + screen_size: 720p + +? Show S01E02 +: options: -T "The Show" + title: Show + season: 1 + episode: 2 + +? Foo's & Bars (2009) S01E01 720p XviD-2HD[AOEU] +: episode: 1 + release_group: 2HD[AOEU] + screen_size: 720p + season: 1 + title: Foo's & Bars + video_codec: Xvid + year: 2009 + +? Date.Series.10-11-2008.XViD +: date: 2008-11-10 + title: Date Series + video_codec: Xvid + +? Scrubs/SEASON-06/Scrubs.S06E09.My.Perspective.DVDRip.XviD-WAT/scrubs.s06e09.dvdrip.xvid-wat.avi +: container: avi + episode: 9 + episode_title: My Perspective + source: DVD + other: Rip + release_group: WAT + season: 6 + title: Scrubs + video_codec: Xvid + +? '[PuyaSubs!] Digimon Adventure tri - 01 [720p][F9967949].mkv' +: container: mkv + crc32: F9967949 + episode: 1 + release_group: PuyaSubs! + screen_size: 720p + title: Digimon Adventure tri + +? Sherlock.S01.720p.BluRay.x264-AVCHD +: source: Blu-ray + screen_size: 720p + season: 1 + title: Sherlock + video_codec: H.264 + +? Running.Wild.With.Bear.Grylls.S02E07.Michael.B.Jordan.PROPER.HDTV.x264-W4F.avi +: container: avi + episode: 7 + episode_title: Michael B Jordan + source: HDTV + other: Proper + proper_count: 1 + release_group: W4F + season: 2 + title: Running Wild With Bear Grylls + video_codec: H.264 + +? Homeland.S05E11.Our.Man.in.Damascus.German.Sub.720p.HDTV.x264.iNTERNAL-BaCKToRG +: episode: 11 + episode_title: Our Man in Damascus + source: HDTV + other: Internal + release_group: BaCKToRG + screen_size: 720p + season: 5 + subtitle_language: de + title: Homeland + type: episode + video_codec: H.264 + +? Breaking.Bad.S01E01.2008.BluRay.VC1.1080P.5.1.WMV-NOVO +: title: Breaking Bad + season: 1 + episode: 1 + year: 2008 + source: Blu-ray + screen_size: 1080p + audio_channels: '5.1' + container: WMV + release_group: NOVO + type: episode + +? Cosmos.A.Space.Time.Odyssey.S01E02.HDTV.x264.PROPER-LOL +: title: Cosmos A Space Time Odyssey + season: 1 + episode: 2 + source: HDTV + video_codec: H.264 + other: Proper + proper_count: 1 + release_group: LOL + type: episode + +? Fear.The.Walking.Dead.S02E01.HDTV.x264.AAC.MP4-k3n +: title: Fear The Walking Dead + season: 2 + episode: 1 + source: HDTV + video_codec: H.264 + audio_codec: AAC + container: mp4 + release_group: k3n + type: episode + +? Elementary.S01E01.Pilot.DVDSCR.x264.PREAiR-NoGRP +: title: Elementary + season: 1 + episode: 1 + episode_details: Pilot + episode_title: Pilot + source: DVD + video_codec: H.264 + other: [Screener, Preair] + release_group: NoGRP + type: episode + +? Once.Upon.a.Time.S05E19.HDTV.x264.REPACK-LOL[ettv] +: title: Once Upon a Time + season: 5 + episode: 19 + source: HDTV + video_codec: H.264 + other: Proper + proper_count: 1 + release_group: LOL[ettv] + type: episode + +? Show.Name.S01E03.WEB-DL.x264.HUN-nIk +: title: Show Name + season: 1 + episode: 3 + source: Web + video_codec: H.264 + language: hu + release_group: nIk + type: episode + +? Game.of.Thrones.S6.Ep5.X265.Dolby.2.0.KTM3.mp4 +: audio_channels: '2.0' + audio_codec: Dolby Digital + container: mp4 + episode: 5 + release_group: KTM3 + season: 6 + title: Game of Thrones + type: episode + video_codec: H.265 + +? Fargo.-.Season.1.-.720p.BluRay.-.x264.-.ShAaNiG +: source: Blu-ray + release_group: ShAaNiG + screen_size: 720p + season: 1 + title: Fargo + type: episode + video_codec: H.264 + +? Show.Name.S02E02.Episode.Title.1080p.WEB-DL.x264.5.1Ch.-.Group +: audio_channels: '5.1' + episode: 2 + episode_title: Episode Title + source: Web + release_group: Group + screen_size: 1080p + season: 2 + title: Show Name + type: episode + video_codec: H.264 + +? Breaking.Bad.S01E01.2008.BluRay.VC1.1080P.5.1.WMV-NOVO +: audio_channels: '5.1' + container: wmv + episode: 1 + source: Blu-ray + release_group: NOVO + screen_size: 1080p + season: 1 + title: Breaking Bad + type: episode + year: 2008 + +? Cosmos.A.Space.Time.Odyssey.S01E02.HDTV.x264.PROPER-LOL +: episode: 2 + source: HDTV + other: Proper + proper_count: 1 + release_group: LOL + season: 1 + title: Cosmos A Space Time Odyssey + type: episode + video_codec: H.264 + +? Elementary.S01E01.Pilot.DVDSCR.x264.PREAiR-NoGRP +: episode: 1 + episode_details: Pilot + episode_title: Pilot + source: DVD + other: + - Screener + - Preair + release_group: NoGRP + season: 1 + title: Elementary + type: episode + video_codec: H.264 + +? Fear.The.Walking.Dead.S02E01.HDTV.x264.AAC.MP4-k3n.mp4 +: audio_codec: AAC + container: mp4 + episode: 1 + source: HDTV + release_group: k3n + season: 2 + title: Fear The Walking Dead + type: episode + video_codec: H.264 + +? Game.of.Thrones.S03.1080p.BluRay.DTS-HD.MA.5.1.AVC.REMUX-FraMeSToR +: audio_channels: '5.1' + audio_codec: DTS-HD + audio_profile: Master Audio + source: Blu-ray + other: Remux + release_group: FraMeSToR + screen_size: 1080p + season: 3 + title: Game of Thrones + type: episode + +? Show.Name.S01E02.HDTV.x264.NL-subs-ABC +: episode: 2 + source: HDTV + release_group: ABC + season: 1 + subtitle_language: nl + title: Show Name + type: episode + video_codec: H.264 + +? Friends.S01-S10.COMPLETE.720p.BluRay.x264-PtM +: source: Blu-ray + other: Complete + release_group: PtM + screen_size: 720p + season: # Should it be [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ? + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + title: Friends + type: episode + video_codec: H.264 + +? Duck.Dynasty.S02E07.Streik.German.DOKU.DL.WS.DVDRiP.x264-CDP +: episode: 7 + episode_title: Streik + source: DVD + language: + - German + - Multi + other: [Documentary, Widescreen, Rip] + release_group: CDP + season: 2 + title: Duck Dynasty + type: episode + video_codec: H.264 + +? Family.Guy.S13E14.JOLO.German.AC3D.DL.720p.WebHD.x264-CDD +: audio_codec: Dolby Digital + episode: 14 + episode_title: JOLO + source: Web + language: + - German + - Multi + release_group: CDD + screen_size: 720p + season: 13 + title: Family Guy + type: episode + video_codec: H.264 + +? How.I.Met.Your.Mother.COMPLETE.SERIES.DVDRip.XviD-AR +: options: -L en -C us + source: DVD + other: [Complete, Rip] + release_group: AR + title: How I Met Your Mother + type: movie # Should be episode + video_codec: Xvid + +? Show Name The Complete Seasons 1 to 5 720p BluRay x265 HEVC-SUJAIDR[UTR] +: source: Blu-ray + other: Complete + release_group: SUJAIDR[UTR] + screen_size: 720p + season: + - 1 + - 2 + - 3 + - 4 + - 5 + title: Show Name + type: episode + video_codec: H.265 + +? Fear.the.Walking.Dead.-.Season.2.epi.02.XviD.Eng.Ac3-5.1.sub.ita.eng.iCV-MIRCrew +: options: -t episode + audio_channels: '5.1' + audio_codec: Dolby Digital + episode: 2 + episode_title: epi + language: en + release_group: iCV-MIRCrew + season: 2 + subtitle_language: it + title: Fear the Walking Dead + type: episode + video_codec: Xvid + +? Game.Of.Thrones.S06E04.720p.PROPER.HDTV.x264-HDD +: episode: 4 + source: HDTV + other: Proper + proper_count: 1 + release_group: HDD + screen_size: 720p + season: 6 + title: Game Of Thrones + type: episode + video_codec: H.264 + +? Marvels.Daredevil.S02E04.WEBRip.x264-NF69.mkv +: container: mkv + episode: 4 + source: Web + other: Rip + release_group: NF69 + season: 2 + title: Marvels Daredevil + type: episode + video_codec: H.264 + +? The.Walking.Dead.S06E01.FRENCH.1080p.WEB-DL.DD5.1.HEVC.x265-GOLF68 +: audio_channels: '5.1' + audio_codec: Dolby Digital + episode: 1 + source: Web + language: fr + release_group: GOLF68 + screen_size: 1080p + season: 6 + title: The Walking Dead + type: episode + video_codec: H.265 + +? American.Crime.S01E03.FASTSUB.VOSTFR.720p.HDTV.x264-F4ST +: episode: 3 + source: HDTV + other: Fast Subtitled + release_group: F4ST + screen_size: 720p + season: 1 + subtitle_language: fr + title: American Crime + type: episode + video_codec: H.264 + +? Gotham.S02E12.FASTSUB.VOSTFR.HDTV.X264-F4ST3R +: episode: 12 + source: HDTV + other: Fast Subtitled + release_group: F4ST3R + season: 2 + subtitle_language: fr + title: Gotham + type: episode + video_codec: H.264 + +# WEBRip + LD +? Australian.Story.2016.05.23.Into.The.Fog.of.War.Part.1.360p.LDTV.WEBRIP.[MPup] +: title: Australian Story + date: 2016-05-23 + episode_title: Into The Fog of War + part: 1 + screen_size: 360p + other: [Low Definition, Rip] + source: Web + release_group: MPup + type: episode + +# AHDTV +? Show.Name.S04E06.FRENCH.AHDTV.XviD +: title: Show Name + season: 4 + episode: 6 + language: fr + source: Analog HDTV + video_codec: Xvid + type: episode + +# WEBDLRip +? Show.Name.s06e14.WEBDLRip.-qqss44.avi +: title: Show Name + season: 6 + episode: 14 + source: Web + other: Rip + release_group: qqss44 + container: avi + type: episode + +# WEBCap +? Steven.Universe.S03E06.Steven.Floats.720p.WEBCap.x264-SRS +: title: Steven Universe + season: 3 + episode: 6 + episode_title: Steven Floats + screen_size: 720p + source: Web + other: Rip + video_codec: H.264 + release_group: SRS + type: episode + +# DSR +? Show.Name.S05E09.Some.Episode.Title.WS.DSR.x264-[NY2] +: title: Show Name + season: 5 + episode: 9 + episode_title: Some Episode Title + other: Widescreen + source: Satellite + video_codec: H.264 + release_group: NY2 + type: episode + +# DSRip +? Squidbillies.S04E05.WS.DSRip.XviD-aAF +: title: Squidbillies + season: 4 + episode: 5 + other: [Widescreen, Rip] + source: Satellite + video_codec: Xvid + release_group: aAF + type: episode + + +? /series/The.B*.B*.T*.S10E01.1080p.HDTV.X264-DIMENSION[rarbg]/The.B*.B*.T*.S10E01.1080p.HDTV.X264-DIMENSION.mkv +: container: mkv + episode: 1 + source: HDTV + release_group: DIMENSION + screen_size: 1080p + season: 10 + title: The B B T + type: episode + video_codec: H.264 + +? '[Y-F] Very long Show Name Here - 03 Vostfr HD 8bits' +: release_group: Y-F + title: Very long Show Name Here + episode: 3 + subtitle_language: fr + other: HD + color_depth: 8-bit + type: episode + +? '[.www.site.com.].-.Snooze.and.Go.Sleep.S03E02.1080p.HEVC.x265-MeGusta' +: episode: 2 + release_group: MeGusta + screen_size: 1080p + season: 3 + title: Snooze and Go Sleep + type: episode + video_codec: H.265 + website: www.site.com + +? Show.Name.S01.720p.HDTV.DD5.1.x264-Group/show.name.0106.720p-group.mkv +: title: Show Name + season: 1 + screen_size: 720p + source: HDTV + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: Group + episode: 6 + container: mkv + type: episode + + +? Coupling Season 1 - 4 Complete DVDRip/Coupling Season 4/Coupling - (4x03) - Bed Time.mkv +: title: Coupling + other: [Complete, Rip] + source: DVD + season: 4 + episode: 3 + episode_title: Bed Time + container: mkv + type: episode + + +? Vice.News.Tonight.2016.10.10.1080p.HBO.WEBRip.AAC2.0.H.264-monkee +: title: Vice News Tonight + date: 2016-10-10 + screen_size: 1080p + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +? frasier.s8e6-768660.srt +: container: srt + episode: 6 + episode_title: '768660' + season: 8 + title: frasier + type: episode + +? Show.Name.S03E15.480p.177mb.Proper.HDTV.x264 +: title: Show Name + season: 3 + episode: 15 + screen_size: 480p + size: 177MB + other: Proper + proper_count: 1 + source: HDTV + video_codec: H.264 + type: episode + +? Show.Name.S03E15.480p.4.8GB.Proper.HDTV.x264 +: title: Show Name + season: 3 + episode: 15 + screen_size: 480p + size: 4.8GB + other: Proper + proper_count: 1 + source: HDTV + video_codec: H.264 + type: episode + +? Show.Name.S03.1.1TB.Proper.HDTV.x264 +: title: Show Name + season: 3 + size: 1.1TB + other: Proper + proper_count: 1 + source: HDTV + video_codec: H.264 + type: episode + +? Some.Show.S02E14.1080p.HDTV.X264-reenc.GROUP +? Some.Show.S02E14.1080p.HDTV.X264-re-enc.GROUP +? Some.Show.S02E14.1080p.HDTV.X264-re-encoded.GROUP +? Some.Show.S02E14.1080p.HDTV.X264-reencoded.GROUP +: title: Some Show + season: 2 + episode: 14 + screen_size: 1080p + source: HDTV + video_codec: H.264 + other: Reencoded + release_group: GROUP + type: episode + +# DDP is DD+ +? Show.Name.2016.S01E01.2160p.AMZN.WEBRip.DDP5.1.x264-Group +: title: Show Name + year: 2016 + season: 1 + episode: 1 + screen_size: 2160p + streaming_service: Amazon Prime + source: Web + other: Rip + audio_codec: Dolby Digital Plus + audio_channels: '5.1' + video_codec: H.264 + release_group: Group + type: episode + +? Show Name S02e19 [Mux - H264 - Ita Aac] DLMux by UBi +: title: Show Name + season: 2 + episode: 19 + video_codec: H.264 + language: it + audio_codec: AAC + source: Web + other: Mux + release_group: UBi + type: episode + +? Show Name S01e10[Mux - 1080p - H264 - Ita Eng Ac3 - Sub Ita Eng]DLMux By GiuseppeTnT Littlelinx +: title: Show Name + season: 1 + episode: 10 + screen_size: 1080p + video_codec: H.264 + language: [it, en] + source: Web + other: Mux + audio_codec: Dolby Digital + subtitle_language: [it, en] + release_group: GiuseppeTnT Littlelinx + type: episode + +? Show Name S04e07-08 [H264 - Ita Aac] HDTVMux by Group +: title: Show Name + season: 4 + episode: [7, 8] + video_codec: H.264 + language: it + audio_codec: AAC + source: HDTV + other: Mux + release_group: Group + type: episode + +? Show Name 3x18 Un Tuffo Nel Passato ITA HDTVMux x264 Group +: title: Show Name + season: 3 + episode: 18 + episode_title: Un Tuffo Nel Passato + language: it + source: HDTV + other: Mux + video_codec: H.264 + release_group: Group + type: episode + +? Show.Name.S03.1080p.BlurayMUX.AVC.DTS-HD.MA +: title: Show Name + season: 3 + screen_size: 1080p + source: Blu-ray + other: Mux + video_codec: H.264 + audio_codec: DTS-HD + audio_profile: Master Audio + type: episode + +? Show.Name.-.07.(2016).[RH].[English.Dubbed][WEBRip]..[HD.1080p] +: options: -t episode + episode: 7 + source: Web + other: Rip + language: en + other: [HD, Rip] + screen_size: 1080p + title: Show Name + type: episode + year: 2016 + +? Show.Name.-.476-479.(2007).[HorribleSubs][WEBRip]..[HD.720p] +: options: -t episode + episode: + - 476 + - 477 + - 478 + - 479 + source: Web + other: [Rip, HD] + release_group: HorribleSubs + screen_size: 720p + title: Show Name + type: episode + year: 2007 + +? /11.22.63/Season 1/11.22.63.106.hdtv-abc +: options: -T 11.22.63 + title: 11.22.63 + season: 1 + episode: 6 + source: HDTV + release_group: abc + type: episode + +? Proof.2015.S01E10.1080p.WEB-DL.DD5.1.H.264-KINGS.mkv +: title: Proof + season: 1 + episode: 10 + screen_size: 1080p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: KINGS + container: mkv + type: episode + +# Hardcoded subtitles +? Show.Name.S06E16.HC.SWESUB.HDTV.x264 +: title: Show Name + season: 6 + episode: 16 + other: Hardcoded Subtitles + source: HDTV + video_codec: H.264 + subtitle_language: sv + type: episode + +? From [ WWW.TORRENTING.COM ] - White.Rabbit.Project.S01E08.1080p.NF.WEBRip.DD5.1.x264-ViSUM/White.Rabbit.Project.S01E08.1080p.NF.WEBRip.DD5.1.x264-ViSUM.mkv +: title: White Rabbit Project + website: WWW.TORRENTING.COM + season: 1 + episode: 8 + screen_size: 1080p + streaming_service: Netflix + source: Web + other: Rip + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: ViSUM + container: mkv + type: episode + +? /tv/Daniel Tiger's Neighborhood/S02E06 - Playtime Is Different.mp4 +: season: 2 + episode: 6 + title: Daniel Tiger's Neighborhood + episode_title: Playtime Is Different + container: mp4 + type: episode + +? Zoo.S02E05.1080p.WEB-DL.DD5.1.H.264.HKD/160725_02.mkv +: title: Zoo + season: 2 + episode: 5 + screen_size: 1080p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: HKD + container: mkv + type: episode + +? We.Bare.Bears.S01E14.Brother.Up.1080p.WEB-DL.AAC2.0.H.264-TVSmash/mxNMuJWeO7PUWCMEwqKSsS6D8Vs9S6V3PHD.mkv +: title: We Bare Bears + season: 1 + episode: 14 + episode_title: Brother Up + screen_size: 1080p + source: Web + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: TVSmash + container: mkv + type: episode + +? Beyond.S01E02.Tempus.Fugit.720p.FREE.WEBRip.AAC2.0.x264-BTW/gNWDXow11s7E0X7GTDrZ.mkv +: title: Beyond + season: 1 + episode: 2 + episode_title: Tempus Fugit + screen_size: 720p + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: BTW + container: mkv + type: episode + +? Bones.S12E02.The.Brain.In.The.Bot.1080p.WEB-DL.DD5.1.H.264-R2D2/161219_06.mkv +: title: Bones + season: 12 + episode: 2 + episode_title: The Brain In The Bot + screen_size: 1080p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: R2D2 + container: mkv + type: episode + +? The.Messengers.2015.S01E07.1080p.WEB-DL.DD5.1.H264.Nlsubs-Q/QoQ-sbuSLN.462.H.1.5DD.LD-BEW.p0801.70E10S.5102.sregnesseM.ehT.mkv +: title: The Messengers + year: 2015 + season: 1 + episode: 7 + screen_size: 1080p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + subtitle_language: nl + release_group: Q + container: mkv + type: episode + +? /Finding.Carter.S02E01.Love.the.Way.You.Lie.1080p.WEB-DL.AAC2.0.H.264-NL/LN-462.H.0.2CAA.LD-BEW.p0801.eiL.uoY.yaW.eht.evoL.10E20S.retraC.gnidniF.mkv +: title: Finding Carter + season: 2 + episode: 1 + episode_title: Love the Way You Lie + screen_size: 1080p + source: Web + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: NL + container: mkv + type: episode + +? Mr.Robot.S02E12.1080p.WEB-DL.DD5.1-NL.Subs-Het.Robot.Team.OYM/sbuS LN-1.5DD LD-BEW p0801 21E20S toboR .rM.mkv +: title: Mr Robot + season: 2 + episode: 12 + screen_size: 1080p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + release_group: Het.Robot.Team.OYM + type: episode + +? Show.Name.-.Temporada.1.720p.HDTV.x264[Cap.102]SPANISH.AUDIO-NEWPCT +? /Show Name/Season 01/Show.Name.-.Temporada.1.720p.HDTV.x264[Cap.102]SPANISH.AUDIO-NEWPCT +? /Show Name/Temporada 01/Show.Name.-.Temporada.1.720p.HDTV.x264[Cap.102]SPANISH.AUDIO-NEWPCT +: title: Show Name + season: 1 + episode: 2 + screen_size: 720p + source: HDTV + video_codec: H.264 + language: es + release_group: NEWPCT + type: episode + +# newpct +? Show Name - Temporada 4 [HDTV][Cap.408][Espanol Castellano] +? Show Name - Temporada 4 [HDTV][Cap.408][Español Castellano] +: title: Show Name + season: 4 + episode: 8 + source: HDTV + language: ca + type: episode + +# newpct +? -Show Name - Temporada 4 [HDTV][Cap.408][Espanol Castellano] +? -Show Name - Temporada 4 [HDTV][Cap.408][Español Castellano] +: release_group: Castellano + +# newpct +? Show.Name.-.Temporada1.[HDTV][Cap.105][Español.Castellano] +: title: Show Name + source: HDTV + season: 1 + episode: 5 + language: ca + type: episode + +# newpct +? Show.Name.-.Temporada1.[HDTV][Cap.105][Español] +: title: Show Name + source: HDTV + season: 1 + episode: 5 + language: es + type: episode + +# newpct - season and episode with range: +? Show.Name.-.Temporada.1.720p.HDTV.x264[Cap.102_104]SPANISH.AUDIO-NEWPCT +: title: Show Name + season: 1 + episode: [2, 3, 4] + screen_size: 720p + source: HDTV + video_codec: H.264 + language: es + release_group: NEWPCT + type: episode + +# newpct - season and episode (2 digit season) +? Show.Name.-.Temporada.15.720p.HDTV.x264[Cap.1503]SPANISH.AUDIO-NEWPCT +: title: Show Name + season: 15 + episode: 3 + screen_size: 720p + source: HDTV + video_codec: H.264 + language: es + release_group: NEWPCT + type: episode + +# newpct - season and episode (2 digit season with range) +? Show.Name.-.Temporada.15.720p.HDTV.x264[Cap.1503_1506]SPANISH.AUDIO-NEWPCT +: title: Show Name + season: 15 + episode: [3, 4, 5, 6] + screen_size: 720p + source: HDTV + video_codec: H.264 + language: es + release_group: NEWPCT + type: episode + +# newpct - season and episode: +? Show.Name.-.Temp.1.720p.HDTV.x264[Cap.102]SPANISH.AUDIO-NEWPCT +: title: Show Name + season: 1 + episode: 2 + screen_size: 720p + source: HDTV + video_codec: H.264 + language: es + release_group: NEWPCT + type: episode + +# newpct - season and episode: +? Show.Name.-.Tem.1.720p.HDTV.x264[Cap.102]SPANISH.AUDIO-NEWPCT +: title: Show Name + season: 1 + episode: 2 + screen_size: 720p + source: HDTV + video_codec: H.264 + language: es + release_group: NEWPCT + type: episode + +# newpct - season and episode: +? Show.Name.-.Tem.1.720p.HDTV.x264[Cap.112_114.Final]SPANISH.AUDIO-NEWPCT +: title: Show Name + season: 1 + episode: [12, 13, 14] + screen_size: 720p + source: HDTV + video_codec: H.264 + language: es + release_group: NEWPCT + episode_details: Final + type: episode + +? Mastercook Italia - Stagione 6 (2016) 720p ep13 spyro.mkv +: title: Mastercook Italia + season: 6 + episode: 13 + year: 2016 + screen_size: 720p + episode_title: spyro + container: mkv + type: episode + +? Mastercook Italia - Stagione 6 (2016) 720p Episodio 13 spyro.mkv +: title: Mastercook Italia + season: 6 + year: 2016 + screen_size: 720p + episode: 13 + episode_title: spyro + container: mkv + type: episode + +# Italian releases +? Show Name 3x18 Un Tuffo Nel Passato ITA HDTVMux x264 NovaRip +: title: Show Name + season: 3 + episode: 18 + episode_title: Un Tuffo Nel Passato + language: it + source: HDTV + other: Mux + video_codec: H.264 + release_group: NovaRip + type: episode + +# Italian releases +? Show Name 3x18 Un Tuffo Nel Passato ITA HDTVMux x264 NovaRip +: title: Show Name + season: 3 + episode: 18 + episode_title: Un Tuffo Nel Passato + language: it + source: HDTV + other: Mux + video_codec: H.264 + release_group: NovaRip + type: episode + +# Subbed: No language hint +? Show.Name.S06E03.1080p.HDTV.Legendado +: subtitle_language: und + +# Subbed: No language hint +? Show.Name.S01E09.Subbed.1080p.BluRay.x264-RRH +: title: Show Name + season: 1 + episode: 9 + subtitle_language: und + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + release_group: RRH + type: episode + +# Legendado PT-BR +? Show.Name.S06E05.1080p.WEBRip.Legendado.PT-BR +? Show.Name.S06E05.1080p.WEBRip.Legendas.PT-BR +? Show.Name.S06E05.1080p.WEBRip.Legenda.PT-BR +: title: Show Name + season: 6 + episode: 5 + screen_size: 1080p + source: Web + other: Rip + subtitle_language: pt-BR + type: episode + +? Show.Name.S01E07.Super, Title.WEB-DL 720p.br.srt +: title: Show Name + season: 1 + episode: 7 + episode_title: Super, Title + source: Web + screen_size: 720p + subtitle_language: pt-BR + container: srt + type: episode + +? -Show.Name.S01E07.Super, Title.WEB-DL 720p.br.srt +: language: pt-BR + +# Legendado PT +? Show.Name.S06E05.1080p.WEBRip.Legendado.PT +: title: Show Name + season: 6 + episode: 5 + screen_size: 1080p + source: Web + other: Rip + subtitle_language: pt + type: episode + +? Show.Name.S05E01.SPANISH.SUBBED.720p.HDTV.x264-sPHD +: title: Show Name + season: 5 + episode: 1 + subtitle_language: spa + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: sPHD + type: episode + +? Show.Name.S01E01.German.Subbed.HDTV.XviD-ASAP +: title: Show Name + season: 1 + episode: 1 + subtitle_language: deu + source: HDTV + video_codec: Xvid + release_group: ASAP + type: episode + +? Show.Name.S04E21.Aint.Nothing.Like.the.Real.Thing.German.Custom.Subbed.720p.HDTV.x264.iNTERNAL-BaCKToRG +: title: Show Name + season: 4 + episode: 21 + episode_title: Aint Nothing Like the Real Thing + subtitle_language: deu + screen_size: 720p + source: HDTV + video_codec: H.264 + type: episode + +? Show.Name.S01.Season.Complet.WEBRiP.Ro.Subbed.TM +: title: Show Name + season: 1 + other: [Complete, Rip] + source: Web + subtitle_language: ro + type: episode + +? Show.Name.(2013).Season.3.-.Eng.Soft.Subtitles.720p.WEBRip.x264.[MKV,AC3,5.1].Ehhhh +: title: Show Name + year: 2013 + season: 3 + subtitle_language: en + screen_size: 720p + source: Web + other: Rip + video_codec: H.264 + container: mkv + audio_codec: Dolby Digital + audio_channels: '5.1' + release_group: Ehhhh + type: episode + +# Dublado +? Show.Name.S02E03.720p.HDTV.x264-Belex.-.Dual.Audio.-.Dublado +: title: Show Name + season: 2 + episode: 3 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: Belex + other: Dual Audio + language: und + type: episode + +? Show.Name.S06E10.1080p.WEB-DL.DUAL.[Dublado].RK +: title: Show Name + season: 6 + episode: 10 + screen_size: 1080p + source: Web + other: Dual Audio + language: und + release_group: RK + type: episode + +? Show.Name.S06E12.720p.WEB-DL.Dual.Audio.Dublado +: title: Show Name + season: 6 + episode: 12 + screen_size: 720p + source: Web + other: Dual Audio + language: und + type: episode + +? Show.Name.S05E07.720p.DUBLADO.HDTV.x264-0SEC-pia.mkv +: title: Show Name + season: 5 + episode: 7 + screen_size: 720p + language: und + source: HDTV + video_codec: H.264 + release_group: 0SEC-pia + container: mkv + type: episode + +? Show.Name.S02E07.Shiva.AC3.Dubbed.WEBRip.x264 +: title: Show Name + season: 2 + episode: 7 + episode_title: Shiva + audio_codec: Dolby Digital + language: und + source: Web + other: Rip + video_codec: H.264 + type: episode + +# Legendas +? Show.Name.S05.1080p.BluRay.x264-Belex.-.Dual.Audio.+.Legendas +: title: Show Name + season: 5 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + release_group: Belex + other: Dual Audio + subtitle_language: und + type: episode + +# Legendas +? Show.Name.S05.1080p.BluRay.x264-Belex.-.Dual.Audio.+.Legendas +: title: Show Name + season: 5 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + release_group: Belex + other: Dual Audio + subtitle_language: und + type: episode + +# Subtitulado +? Show.Name.S01E03.HDTV.Subtitulado.Esp.SC +? Show.Name.S01E03.HDTV.Subtitulado.Espanol.SC +? Show.Name.S01E03.HDTV.Subtitulado.Español.SC +: title: Show Name + season: 1 + episode: 3 + source: HDTV + subtitle_language: es + release_group: SC + type: episode + +# Subtitles/Subbed +? Show.Name.S02E08.720p.WEB-DL.Subtitles +? Show.Name.S02E08.Subbed.720p.WEB-DL +: title: Show Name + season: 2 + episode: 8 + screen_size: 720p + source: Web + subtitle_language: und + type: episode + +# Dubbed +? Show.Name.s01e01.german.Dubbed +: title: Show Name + season: 1 + episode: 1 + language: de + type: episode + +? Show.Name.S06E05.Das.Toor.German.AC3.Dubbed.HDTV.German +: title: Show Name + season: 6 + episode: 5 + language: de + audio_codec: Dolby Digital + source: HDTV + type: episode + +? Show.Name.S01E01.Savage.Season.GERMAN.DUBBED.WS.HDTVRip.x264-TVP +: title: Show Name + season: 1 + episode: 1 + episode_title: Savage Season + language: de + other: [Widescreen, Rip] + source: HDTV + video_codec: H.264 + release_group: TVP + type: episode + +# Dubbed +? "[AnimeRG].Show.Name.-.03.[Eng.Dubbed].[720p].[WEB-DL].[JRR]" +: title: Show Name + episode: 3 + language: en + screen_size: 720p + source: Web + release_group: JRR + type: episode + +# Dubbed +? "[RH].Show.Name.-.03.[English.Dubbed].[1080p]" +: title: Show Name + episode: 3 + language: en + screen_size: 1080p + release_group: RH + type: episode + +# Hebsubs +? Show.Name.S05E05.HDTV.XviD-AFG.HebSubs +: title: Show Name + season: 5 + episode: 5 + source: HDTV + video_codec: Xvid + release_group: AFG + subtitle_language: he + type: episode + +? Show Name - S02E31 - Episode 55 (720p.HDTV) +: title: Show Name + season: 2 + episode: 31 + episode_title: Episode 55 + screen_size: 720p + source: HDTV + type: episode + +# Scenario: Removing invalid season and episode matches. Correct episode_title match +? Show.Name.S02E06.eps2.4.m4ster-s1ave.aes.1080p.AMZN.WEBRip.DD5.1.x264-GROUP +: title: Show Name + season: 2 + episode: 6 + episode_title: eps2 4 m4ster-s1ave aes + screen_size: 1080p + streaming_service: Amazon Prime + source: Web + other: Rip + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: GROUP + type: episode + +? Show.Name.S01E05.3xpl0its.wmv.720p.WEBdl.EN-SUB.x264-[MULVAcoded].mkv +: title: Show Name + season: 1 + episode: 5 + episode_title: 3xpl0its + screen_size: 720p + source: Web + subtitle_language: en + video_codec: H.264 + type: episode + +# Regression: S4L release group detected as season 4 +# https://github.com/guessit-io/guessit/issues/352 +? Show Name S01E06 DVD-RIP x264-S4L +: title: Show Name + season: 1 + episode: 6 + source: DVD + video_codec: H.264 + release_group: S4L + type: episode + +# Corner case with only date and 720p +? The.Show.Name.2016.05.18.720.HDTV.x264-GROUP.VTV +: title: The Show Name + date: 2016-05-18 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: GROUP.VTV + type: episode + +# Corner case with only date and 720p +? -The.Show.Name.2016.05.18.720.HDTV.x264-GROUP.VTV +: season: 7 + episode: 20 + +# https://github.com/guessit-io/guessit/issues/308 (conflict with screen size) +? "[SuperGroup].Show.Name.-.06.[720.Hi10p][1F5578AC]" +: title: Show Name + episode: 6 + screen_size: 720p + color_depth: 10-bit + crc32: 1F5578AC + release_group: SuperGroup + type: episode + +# https://github.com/guessit-io/guessit/issues/308 (conflict with screen size) +? "[SuperGroup].Show.Name.-.06.[1080.Hi10p][1F5578AC]" +: title: Show Name + episode: 6 + screen_size: 1080p + color_depth: 10-bit + crc32: 1F5578AC + release_group: SuperGroup + type: episode + +? "[MK-Pn8].Dimension.W.-.05.[720p][Hi10][Dual][TV-Dub][EDA6E7F1]" +: options: -C us -L und + release_group: MK-Pn8 + title: Dimension W + episode: 5 + screen_size: 720p + color_depth: 10-bit + other: Dual Audio + source: TV + language: und + crc32: EDA6E7F1 + type: episode + +? "[Zero-Raws].Show.Name.493-498.&.500-507.(CX.1280x720.VFR.x264.AAC)" +: release_group: Zero-Raws + title: Show Name + episode: [493, 494, 495, 496, 497, 498, 500, 501, 502, 503, 504, 505, 506, 507] + screen_size: 720p + other: Variable Frame Rate + video_codec: H.264 + audio_codec: AAC + type: episode + +# NetflixUHD +? Show.Name.S01E06.NetflixUHD +: title: Show Name + season: 1 + episode: 6 + streaming_service: Netflix + other: Ultra HD + type: episode + +? Show.Name.S04E13.FINAL.MULTI.DD51.2160p.NetflixUHDRip.x265-TVS +: title: Show Name + season: 4 + episode: 13 + episode_details: Final + language: mul + audio_codec: Dolby Digital + audio_channels: '5.1' + screen_size: 2160p + streaming_service: Netflix + source: Ultra HDTV + other: Rip + video_codec: H.265 + release_group: TVS + type: episode + +? Show.Name.S06E11.Of.Late.I.Think.of.Rosewood.iTunesHD.x264 +: title: Show Name + season: 6 + episode: 11 + episode_title: Of Late I Think of Rosewood + streaming_service: iTunes + other: HD + video_codec: H.264 + type: episode + +? Show.Name.S01.720p.iTunes.h264-Group +: title: Show Name + season: 1 + screen_size: 720p + streaming_service: iTunes + video_codec: H.264 + release_group: Group + type: episode + +? Show.Name.1x01.eps1.0.hellofriend.(HDiTunes.Ac3.Esp).(2015).By.Malaguita.avi +: title: Show Name + season: 1 + episode: 1 + episode_title: eps1 0 hellofriend + other: HD + streaming_service: iTunes + audio_codec: Dolby Digital + language: spa + year: 2015 + container: avi + type: episode + +? "[Hanamaru&LoliHouse] The Dragon Dentist - 01 [WebRip 1920x1080 HEVC-yuv420p10 AAC].mkv" +: release_group: Hanamaru&LoliHouse + title: The Dragon Dentist + episode: 1 + source: Web + other: Rip + screen_size: 1080p + video_codec: H.265 + color_depth: 10-bit + audio_codec: AAC + container: mkv + type: episode + +? Show Name - Season 1 Episode 50 +: title: Show Name + season: 1 + episode: 50 + type: episode + +? Vikings.Seizoen.4.1080p.Web.NLsubs +: title: Vikings + season: 4 + screen_size: 1080p + source: Web + subtitle_language: nl + type: episode + +? Star.Wars.Rebels.S01E01.Spark.of.Rebellion.ALTERNATE.CUT.HDTV.x264-W4F.mp4 +: title: Star Wars Rebels + season: 1 + episode: 1 + episode_title: Spark of Rebellion + edition: Alternative Cut + source: HDTV + video_codec: H.264 + release_group: W4F + container: mp4 + type: episode + +? DCs.Legends.of.Tomorrow.S02E12.HDTV.XviD-FUM +: title: DCs Legends of Tomorrow + season: 2 + episode: 12 + source: HDTV + video_codec: Xvid + release_group: FUM + type: episode + +? DC's Legends of Tomorrow 2016 - S02E02 +: title: DC's Legends of Tomorrow + year: 2016 + season: 2 + episode: 2 + type: episode + +? Broadchurch.S01.DIRFIX.720p.BluRay.x264-SHORTBREHD +: title: Broadchurch + season: 1 + other: Fix + screen_size: 720p + source: Blu-ray + video_codec: H.264 + release_group: SHORTBREHD + -proper_count: 1 + type: episode + +? Simply Red - 2016-07-08 Montreux Jazz Festival 720p +: title: Simply Red + date: 2016-07-08 + episode_title: Montreux Jazz Festival + screen_size: 720p + type: episode + +? Ridiculousness.S07E14.iNTERNAL.HDTV.x264-YesTV +: title: Ridiculousness + season: 7 + episode: 14 + other: Internal + source: HDTV + video_codec: H.264 + release_group: YesTV + type: episode + +? Stephen.Colbert.2016.05.25.James.McAvoy.iNTERNAL.XviD-AFG +: title: Stephen Colbert + date: 2016-05-25 + episode_title: James McAvoy + other: Internal + video_codec: Xvid + release_group: AFG + type: episode + +? The.100.S01E13.iNTERNAL.READNFO.720p.HDTV.x264-2HD +: title: The 100 + season: 1 + episode: 13 + other: [Internal, Read NFO] + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: 2HD + type: episode + +? The.100.S01E13.READ.NFO.720p.HDTV.x264-2HD +: title: The 100 + season: 1 + episode: 13 + other: Read NFO + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: 2HD + type: episode + +? Dr.Ken.S01E21.SAMPLEFIX.720p.HDTV.x264-SVA +: title: Dr Ken + season: 1 + episode: 21 + other: Fix + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: SVA + type: episode + +? Rick and Morty Season 1 [UNCENSORED] [BDRip] [1080p] [HEVC] +: title: Rick and Morty + season: 1 + edition: Uncensored + other: Rip + source: Blu-ray + screen_size: 1080p + video_codec: H.265 + type: episode + +? 12.Monkeys.S01E01.LiMiTED.FRENCH.1080p.WEB-DL.H264-AUTHORiTY +: title: 12 Monkeys + season: 1 + episode: 1 + edition: Limited + language: french + screen_size: 1080p + source: Web + video_codec: H.264 + release_group: AUTHORiTY + type: episode + +? Undateable.2014.S03E05.West.Feed.HDTV.x264-2HD +: title: Undateable + year: 2014 + season: 3 + episode: 5 + other: West Coast Feed + source: HDTV + video_codec: H.264 + release_group: 2HD + type: episode + +? Undateable.2014.S02E07-E08.Live.Episode.West.Coast.Feed.HDTV.x264-2HD +: title: Undateable + year: 2014 + season: 2 + episode: [7, 8] + other: West Coast Feed + source: HDTV + video_codec: H.264 + release_group: 2HD + type: episode + +? Undateable.S03E01-E02.LIVE.EAST.FEED.720p.HDTV.x264-KILLERS +: title: Undateable + season: 3 + episode: [1, 2] + other: East Coast Feed + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: KILLERS + type: episode + +? Undateable.2014.S02E07.Live.Episode.East.Coast.Feed.HDTV.x264-2HD +: title: Undateable + year: 2014 + season: 2 + episode: 7 + other: East Coast Feed + source: HDTV + video_codec: H.264 + release_group: 2HD + type: episode + +? Undateable.2014.S02E07.East.Coast.Feed.720p.WEB-DL.DD5.1.H.264-NTb +: title: Undateable + year: 2014 + season: 2 + episode: 7 + other: East Coast Feed + screen_size: 720p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: NTb + type: episode + +? True Detective S02E04 720p HDTV x264-0SEC [GloDLS].mkv +: title: True Detective + season: 2 + episode: 4 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: 0SEC [GloDLS] + container: mkv + type: episode + +? Anthony.Bourdain.Parts.Unknown.S09E01.Los.Angeles.720p.HDTV.x264-MiNDTHEGAP +: title: Anthony Bourdain Parts Unknown + season: 9 + episode: 1 + episode_title: Los Angeles + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: MiNDTHEGAP + type: episode + +? -feud.s01e05.and.the.winner.is.(the.oscars.of.1963).720p.amzn.webrip.dd5.1.x264-casstudio.mkv +: year: 1963 + +? feud.s01e05.and.the.winner.is.(the.oscars.of.1963).720p.amzn.webrip.dd5.1.x264-casstudio.mkv +: title: feud + season: 1 + episode: 5 + episode_title: and the winner is + screen_size: 720p + streaming_service: Amazon Prime + source: Web + other: Rip + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: casstudio + container: mkv + type: episode + +? Adventure.Time.S08E16.Elements.Part.1.Skyhooks.720p.WEB-DL.AAC2.0.H.264-RTN.mkv +: title: Adventure Time + season: 8 + episode: 16 + episode_title: Elements Part 1 Skyhooks + screen_size: 720p + source: Web + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: RTN + container: mkv + type: episode + +? D:\TV\SITCOMS (CLASSIC)\That '70s Show\Season 07\That '70s Show - S07E22 - 2000 Light Years from Home.mkv +: title: That '70s Show + season: 7 + episode: 22 + episode_title: 2000 Light Years from Home + container: mkv + type: episode + +? Show.Name.S02E01.Super.Title.720p.WEB-DL.DD5.1.H.264-ABC.nzb +: title: Show Name + season: 2 + episode: 1 + episode_title: Super Title + screen_size: 720p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: ABC + container: nzb + type: episode + +? "[SGKK] Bleach 312v1 [720p/mkv]-Group.mkv" +: title: Bleach + episode: 312 + version: 1 + screen_size: 720p + release_group: Group + container: mkv + type: episode + +? The.Expanse.S02E08.720p.WEBRip.x264.EAC3-KiNGS.mkv +: title: The Expanse + season: 2 + episode: 8 + screen_size: 720p + source: Web + other: Rip + video_codec: H.264 + audio_codec: Dolby Digital Plus + release_group: KiNGS + container: mkv + type: episode + +? Series_name.2005.211.episode.title.avi +: title: Series name + year: 2005 + season: 2 + episode: 11 + episode_title: episode title + container: avi + type: episode + +? the.flash.2014.208.hdtv-lol[ettv].mkv +: title: the flash + year: 2014 + season: 2 + episode: 8 + source: HDTV + release_group: lol[ettv] + container: mkv + type: episode + +? "[Despair-Paradise].Kono.Subarashii.Sekai.ni.Shukufuku.wo!.2.-..09.vostfr.FHD" +: release_group: Despair-Paradise + title: Kono Subarashii Sekai ni Shukufuku wo! 2 + episode: 9 + subtitle_language: fr + other: Full HD + type: episode + +? Whose Line is it anyway/Season 01/Whose.Line.is.it.Anyway.US.S13E01.720p.WEB.x264-TBS.mkv +: title: Whose Line is it Anyway + season: 13 + episode: 1 + country: US + screen_size: 720p + source: Web + video_codec: H.264 + release_group: TBS + container: mkv + type: episode + +? Planet.Earth.II.S01.2160p.UHD.BluRay.HDR.DTS-HD.MA5.1.x265-ULTRAHDCLUB +: title: Planet Earth II + season: 1 + screen_size: 2160p + source: Ultra HD Blu-ray + other: HDR10 + audio_codec: DTS-HD + audio_profile: Master Audio + audio_channels: '5.1' + video_codec: H.265 + release_group: ULTRAHDCLUB + type: episode + +? Reizen.Waes.S03.FLEMISH.1080p.HDTV.MP2.H.264-NOGRP/Reizen.Waes.S03E05.China.PART1.FLEMISH.1080p.HDTV.MP2.H.264-NOGRP.mkv +: title: Reizen Waes + season: 3 + episode: 5 + part: 1 + language: nl-BE + screen_size: 1080p + source: HDTV + video_codec: H.264 + audio_codec: MP2 + release_group: NOGRP + container: mkv + type: episode + +? "/folder/Marvels.Agent.Carter.S02E05.The.Atomic.Job.1080p.WEB-DL.DD5.1.H264-Coo7[rartv]/Marvel's.Agent.Carter.S02E05.The.Atomic.Job.1080p.WEB-DL.DD5.1.H.264-Coo7.mkv" +: title: Marvel's Agent Carter + season: 2 + episode: 5 + episode_title: The Atomic Job + release_group: Coo7 + type: episode + +? My.Name.Is.Earl.S01-S04.DVDRip.XviD-AR +: title: My Name Is Earl + season: [1, 2, 3, 4] + source: DVD + other: Rip + video_codec: Xvid + release_group: AR + type: episode + +? American.Dad.S01E01.Pilot.DVDRip.x264-CS +: title: American Dad + season: 1 + episode: 1 + episode_details: Pilot + source: DVD + other: Rip + video_codec: H.264 + release_group: CS + type: episode + +? Black.Sails.S01E01.HDTV.XviD.HebSubs-DR +: title: Black Sails + season: 1 + episode: 1 + source: HDTV + video_codec: Xvid + subtitle_language: he + release_group: DR + type: episode + +? The.West.Wing.S04E06.Game.On.720p.WEB-DL.AAC2.0.H.264-MC +: title: The West Wing + season: 4 + episode: 6 + episode_title: Game On + screen_size: 720p + source: Web + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: MC + type: episode + +? 12.Monkeys.S02E05.1080p.WEB-DL.DD5.1.H.264-NA +: title: 12 Monkeys + season: 2 + episode: 5 + screen_size: 1080p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: NA + type: episode + +? Fear.the.Walking.Dead.S03E07.1080p.AMZN.WEBRip.DD5.1.x264-VLAD[rarbg]/Fear.the.Walking.Dead.S03E07.1080p.AMZN.WEB-DL.DD+5.1.H.264-VLAD.mkv +: title: Fear the Walking Dead + season: 3 + episode: 7 + screen_size: 1080p + source: Web + audio_codec: Dolby Digital Plus + audio_channels: '5.1' + video_codec: H.264 + release_group: VLAD + container: mkv + type: episode + +? American.Crime.S01E02.1080p.WEB-DL.DD5.1.H.264-NL +: title: American Crime + season: 1 + episode: 2 + screen_size: 1080p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: NL + type: episode + +? Better.Call.Saul.S02.720p.HDTV.x264-TL +: title: Better Call Saul + season: 2 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: TL + type: episode + +? 60.Minutes.2008.12.14.HDTV.XviD-YT +: options: -T '60 Minutes' + title: 60 Minutes + date: 2008-12-14 + source: HDTV + video_codec: Xvid + release_group: YT + type: episode + +? Storm.Chasers.Season.1 +: title: Storm Chasers + season: 1 + type: episode + +? Faking.It.2014.S03E08.720p.HDTV.x264-AVS +: title: Faking It + year: 2014 + season: 3 + episode: 8 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: AVS + type: episode + +? /series/Marvel's Agents of S.H.I.E.L.D/Season 4/Marvels.Agents.of.S.H.I.E.L.D.S04E01.The.Ghost.1080p.WEB-DL.DD5.1.H.264-AG.mkv +: title: Marvels Agents of S.H.I.E.L.D. + season: 4 + episode: 1 + episode_title: The Ghost + screen_size: 1080p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: AG + container: mkv + type: episode + +? "[FASubs & TTF] Inuyasha - 099 [DVD] [B15AA1AC].mkv" +: release_group: FASubs & TTF + title: Inuyasha + episode: 99 + source: DVD + crc32: B15AA1AC + container: mkv + type: episode + +? Show.Name.S01E03.PL.SUBBED.480p.WEBRiP.x264 +: title: Show Name + season: 1 + episode: 3 + subtitle_language: pl + screen_size: 480p + source: Web + other: Rip + video_codec: H.264 + type: episode + +? Show.Name.s10e15(233).480p.BDRip-AVC.Ukr.hurtom +: title: Show Name + season: 10 + episode: 15 + screen_size: 480p + source: Blu-ray + other: Rip + video_codec: H.264 + language: uk + release_group: hurtom + type: episode + +? Goof.Troop.1x24.Waste.Makes.Haste.720p.HDTV.x264.CZ-SDTV +: title: Goof Troop + season: 1 + episode: 24 + episode_title: Waste Makes Haste + screen_size: 720p + source: HDTV + video_codec: H.264 + language: cs + release_group: SDTV + type: episode + +? Marvels.Daredevil.S02E11.German.DL.DUBBED.2160p.WebUHD.x264-UHDTV +: title: Marvels Daredevil + season: 2 + episode: 11 + language: [de, mul] + screen_size: 2160p + source: Web + video_codec: H.264 + release_group: UHDTV + type: episode + +? BBC The Story of China 1 of 6 - Ancestors CC HDTV x264 AC3 2.0 720p mkv +: title: BBC The Story of China + episode: 1 + episode_count: 6 + episode_title: Ancestors + source: HDTV + video_codec: H.264 + audio_codec: Dolby Digital + audio_channels: '2.0' + screen_size: 720p + container: mkv + type: episode + +? Duck.Dynasty.S09E04.Drone.Survivor.720p.AE.WEBRip.AAC2.0.H264-BTW[rartv] +: title: Duck Dynasty + season: 9 + episode: 4 + episode_title: Drone Survivor + screen_size: 720p + streaming_service: A&E + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: BTW[rartv] + type: episode + +? Mr.Selfridge.S04E03.720p.WEB-DL.AAC2.0.H264-MS[rartv] +: title: Mr Selfridge + season: 4 + episode: 3 + screen_size: 720p + source: Web + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: MS[rartv] + type: episode + +? Second.Chance.S01E02.One.More.Notch.1080p.WEB-DL.DD5.1.H264-SC[rartv] +: title: Second Chance + season: 1 + episode: 2 + episode_title: One More Notch + screen_size: 1080p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: rartv + type: episode + +? Total.Divas.S05E01.720p.HDTV.AAC2.0.H.264-SC-SDH +: title: Total Divas + season: 5 + episode: 1 + screen_size: 720p + source: HDTV + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + video_profile: Scalable Video Coding + release_group: SDH + type: episode + +? Marvel's Jessica Jones (2015) s01e09 - AKA Sin Bin.mkv +: title: Marvel's Jessica Jones + season: 1 + episode: 9 + episode_title: AKA Sin Bin + container: mkv + type: episode + +? Hotel.Hell.S01E01.720p.DD5.1.448kbps-ALANiS +: title: Hotel Hell + season: 1 + episode: 1 + screen_size: 720p + audio_codec: Dolby Digital + audio_channels: '5.1' + audio_bit_rate: 448Kbps + release_group: ALANiS + type: episode + +? Greys.Anatomy.S07D1.NTSC.DVDR-ToF +: title: Greys Anatomy + season: 7 + disc: 1 + other: NTSC + source: DVD + release_group: ToF + type: episode + +? Greys.Anatomy.S07D1.NTSC.DVDR-ToF +: title: Greys Anatomy + season: 7 + disc: 1 + other: NTSC + source: DVD + release_group: ToF + type: episode + +? Greys.Anatomy.S07D1-3&5.NTSC.DVDR-ToF +: title: Greys Anatomy + season: 7 + disc: [1, 2, 3, 5] + other: NTSC + source: DVD + release_group: ToF + type: episode + +? El.Principe.2014.S01D01.SPANiSH.COMPLETE.BLURAY-COJONUDO +: title: El Principe + year: 2014 + season: 1 + disc: 1 + language: spa + other: Complete + source: Blu-ray + release_group: COJONUDO + type: episode + +? The Simpsons - Season 2 Complete [DVDRIP VP7 KEGGERMAN +: title: The Simpsons + season: 2 + other: [Complete, Rip] + source: DVD + video_codec: VP7 + release_group: KEGGERMAN + type: episode + +? Barney & Friends_ Easy as ABC (Season 9_ Episode 15)_VP8_Vorbis_360p.webm +: title: Barney & Friends Easy as ABC + season: 9 + episode: 15 + video_codec: VP8 + audio_codec: Vorbis + screen_size: 360p + container: webm + type: episode + +? Victoria.S01.1080p.BluRay.HEVC.DTSMA.LPCM.PGS-OZM +: title: Victoria + season: 1 + screen_size: 1080p + source: Blu-ray + video_codec: H.265 + audio_codec: [DTS-HD, LPCM] + audio_profile: Master Audio + # Does it worth to add subtitle_format? Such rare case + # subtitle_format: PGS + # release_group: OZM + type: episode + +? The.Prisoners.S01E03.1080p.DM.AAC2.0.x264-BTN +: title: The Prisoners + season: 1 + episode: 3 + screen_size: 1080p + source: Digital Master + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: BTN + type: episode + +? Panorama.S2013E25.Broken.by.Battle.1080p.DM.AAC2.0.x264-BTN +: title: Panorama + season: 2013 + episode: 25 + episode_title: Broken by Battle + screen_size: 1080p + source: Digital Master + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: BTN + type: episode + +? Our.World.S2014E11.Chinas.Model.Army.720p.DM.AAC2.0.x264-BTN +: title: Our World + season: 2014 + episode: 11 + episode_title: Chinas Model Army + screen_size: 720p + source: Digital Master + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: BTN + type: episode + +? Storyville.S2016E08.My.Nazi.Legacy.1080p.DM.x264-BTN +: title: Storyville + season: 2016 + episode: 8 + episode_title: My Nazi Legacy + screen_size: 1080p + source: Digital Master + video_codec: H.264 + release_group: BTN + type: episode + +? Comedians.in.Cars.Getting.Coffee.S07E01.1080p.DM.FLAC2.0.x264-NTb +: title: Comedians in Cars Getting Coffee + season: 7 + episode: 1 + screen_size: 1080p + source: Digital Master + audio_codec: FLAC + audio_channels: '2.0' + video_codec: H.264 + release_group: NTb + type: episode + +? "[SomeGroup-Fansub]_Show_Name_727_[VOSTFR][HD_1280x720]" +: release_group: SomeGroup-Fansub + title: Show Name + episode: 727 + subtitle_language: fr + other: HD + screen_size: 720p + type: episode + +? "[GROUP]Show_Name_726_[VOSTFR]_[V1]_[8bit]_[720p]_[2F7B3FA2]" +: release_group: GROUP + title: Show Name + episode: 726 + subtitle_language: fr + version: 1 + color_depth: 8-bit + screen_size: 720p + crc32: 2F7B3FA2 + type: episode + +? Show Name 445 VOSTFR par Fansub-Resistance (1280*720) - version MQ +: title: Show Name + episode: 445 + subtitle_language: fr + screen_size: 720p + type: episode + +? Anime Show Episode 159 v2 [VOSTFR][720p][AAC].mp4 +: title: Anime Show + episode: 159 + version: 2 + subtitle_language: fr + screen_size: 720p + audio_codec: AAC + container: mp4 + type: episode + +? "[Group] Anime Super Episode 161 [VOSTFR][720p].mp4" +: release_group: Group + title: Anime Super + episode: 161 + subtitle_language: fr + screen_size: 720p + container: mp4 + type: episode + +? Anime Show Episode 59 v2 [VOSTFR][720p][AAC].mp4 +: title: Anime Show + episode: 59 + version: 2 + subtitle_language: fr + screen_size: 720p + audio_codec: AAC + container: mp4 + type: episode + +? Show.Name.-.476-479.(2007).[HorribleSubs][WEBRip]..[HD.720p] +: title: Show Name + episode: [476, 477, 478, 479] + year: 2007 + release_group: HorribleSubs + source: Web + other: [Rip, HD] + screen_size: 720p + type: episode + +? Show Name - 722 [HD_1280x720].mp4 +: title: Show Name + episode: 722 + other: HD + screen_size: 720p + container: mp4 + type: episode + +? Show!.Name.2.-.10.(2016).[HorribleSubs][WEBRip]..[HD.720p] +: title: Show! Name 2 + episode: 10 + year: 2016 + release_group: HorribleSubs + source: Web + other: [Rip, HD] + screen_size: 720p + type: episode + +? 'C:\folder\[GROUP]_An_Anime_Show_100_-_10_[1080p]_mkv' +: options: -T 'An Anime Show 100' + release_group: GROUP + title: An Anime Show 100 + episode: 10 + screen_size: 1080p + container: mkv + type: episode + +? "[Group].Show.Name!.Super!!.-.05.[720p][AAC].mp4" +: release_group: Group + title: Show Name! Super!! + episode: 5 + screen_size: 720p + audio_codec: AAC + container: mp4 + type: episode + +? "[GROUP].Mobile.Suit.Gundam.Unicorn.RE.0096.-.14.[720p].mkv" +: options: -T 'Mobile Suit Gundam Unicorn RE 0096' + release_group: GROUP + title: Mobile Suit Gundam Unicorn RE 0096 + episode: 14 + screen_size: 720p + container: mkv + type: episode + +? Show.Name.-.Other Name.-.02.(1280x720.HEVC.AAC) +: title: Show Name + alternative_title: Other Name + episode: 2 + screen_size: 720p + video_codec: H.265 + audio_codec: AAC + type: episode + +? "[GroupName].Show.Name.-.02.5.(Special).[BD.1080p]" +: release_group: GroupName + title: Show Name + episode: 2 + episode_details: Special + screen_size: 1080p + source: Blu-ray + type: episode + +? "[Group].Show.Name.2.The.Big.Show.-.11.[1080p]" +: title: Show Name 2 The Big Show + episode: 11 + screen_size: 1080p + type: episode + +? "[SuperGroup].Show.Name.-.Still.Name.-.11.[1080p]" +: release_group: SuperGroup + title: Show Name + alternative_title: Still Name + episode: 11 + screen_size: 1080p + type: episode + +? "[SuperGroup].Show.Name.-.462" +: release_group: SuperGroup + title: Show Name + episode: 462 + type: episode + +? Show.Name.10.720p +: title: Show Name + episode: 10 + screen_size: 720p + type: episode + +? "[Group].Show.Name.G2.-.19.[1080p]" +: release_group: Group + title: Show Name G2 + episode: 19 + screen_size: 1080p + type: episode + +? "[Group].Show.Name.S2.-.19.[1080p]" +? /Show.Name.S2/[Group].Show.Name.S2.-.19.[1080p] +? /Show Name S2/[Group].Show.Name.S2.-.19.[1080p] +: options: -T 'Show Name S2' + release_group: Group + title: Show Name S2 + episode: 19 + screen_size: 1080p + type: episode + +? "[ABC]_Show_Name_001.mkv" +: release_group: ABC + title: Show Name + episode: 1 + container: mkv + type: episode + +? 003-005. Show Name - Ep Name.mkv +: episode: [3, 4, 5] + title: Show Name + episode_title: Ep Name + container: mkv + type: episode + +? 003. Show Name - Ep Name.mkv +: episode: 3 + title: Show Name + episode_title: Ep Name + container: mkv + type: episode + +? 165.Show Name.s08e014 +: absolute_episode: 165 + title: Show Name + season: 8 + episode: 14 + type: episode + +? Show Name - 16x03-05 - 313-315 +? Show.Name.16x03-05.313-315-GROUP +? Show Name 16x03-05 313-315 +? Show Name - 313-315 - s16e03-05 +? Show.Name.313-315.s16e03-05 +? Show Name 313-315 s16e03-05 +: title: Show Name + absolute_episode: [313, 314, 315] + season: 16 + episode: [3, 4, 5] + type: episode + +? Show Name 13-16 +: title: Show Name + episode: [13, 14, 15, 16] + type: episode + +? Show Name 804 vostfr HD +: options: --episode-prefer-number + title: Show Name + episode: 804 + subtitle_language: fr + other: HD + type: episode + +? "[Doki] Re Zero kara Hajimeru Isekai Seikatsu - 01 1920x1080 Hi10P BD FLAC [7F64383D].mkv" +: release_group: Doki + title: Re Zero kara Hajimeru Isekai Seikatsu + episode: 1 + screen_size: 1080p + aspect_ratio: 1.778 + video_profile: High 10 + color_depth: 10-bit + source: Blu-ray + audio_codec: FLAC + crc32: 7F64383D + container: mkv + type: episode + +? Shark Tank (AU) - S02E01 - HDTV-720p.mkv +: title: Shark Tank + country: AU + season: 2 + episode: 1 + source: HDTV + screen_size: 720p + container: mkv + type: episode + +? "[HorribleSubs] Garo - Vanishing Line - 01 [1080p].mkv" +: release_group: HorribleSubs + title: Garo + alternative_title: Vanishing Line + episode: 1 + screen_size: 1080p + container: mkv + type: episode + +? "[HorribleSubs] Yowamushi Pedal - Glory Line - 01 [1080p].mkv" +: release_group: HorribleSubs + title: Yowamushi Pedal + alternative_title: Glory Line + episode: 1 + screen_size: 1080p + container: mkv + type: episode + +? c:\Temp\autosubliminal\completed\2 Broke Girls\Season 01\2 Broke Girls - S01E01 - HDTV-720p Proper - x264 AC3 - IMMERSE - [2011-09-19].mkv +: title: 2 Broke Girls + season: 1 + episode: 1 + source: HDTV + screen_size: 720p + other: Proper + video_codec: H.264 + audio_codec: Dolby Digital + release_group: IMMERSE + date: 2011-09-19 + container: mkv + type: episode + +? c:\Temp\postprocessing\Marvels.Agents.of.S.H.I.E.L.D.s01e02.0.8.4.720p.WEB.DL.mkv +: title: Marvels Agents of S.H.I.E.L.D. + season: 1 + episode: 2 + episode_title: 0.8.4. + screen_size: 720p + source: Web + container: mkv + type: episode + +? Mind.Field.S02E06.The.Power.of.Suggestion.1440p.H264.WEBDL.Subtitles +: title: Mind Field + season: 2 + episode: 6 + episode_title: The Power of Suggestion + screen_size: 1440p + video_codec: H.264 + source: Web + subtitle_language: und + type: episode + +? The Power of Suggestion - Mind Field S2 (Ep 6) (1440p_24fps_H264-384kbit_AAC 6Ch).mp4 +: title: The Power of Suggestion + alternative_title: Mind Field + season: 2 + episode: 6 + screen_size: 1440p + frame_rate: 24fps + video_codec: H.264 + audio_bit_rate: 384Kbps + audio_codec: AAC + audio_channels: '5.1' + container: mp4 + type: episode + +? Mind.Field.S02E06.The.Power.of.Suggestion.1440p.H264.WEBDL.Subtitles/The Power of Suggestion - Mind Field S2 (Ep 6) (1440p_24fps_H264-384kbit_AAC 6Ch).mp4 +: season: 2 + episode: 6 + title: The Power of Suggestion + alternative_title: Mind Field + screen_size: 1440p + frame_rate: 24fps + video_codec: H.264 + source: Web + subtitle_language: und + audio_bit_rate: 384Kbps + audio_codec: AAC + audio_channels: '5.1' + container: mp4 + type: episode + +? Mind.Field.S02E06.The.Power.of.Suggestion.1440p.H264.WEBDL.Subtitles/The Power of Suggestion - Mind Field S2 (Ep 6) (English).srt +: title: Mind Field + season: 2 + episode: 6 + episode_title: The Power of Suggestion + screen_size: 1440p + video_codec: H.264 + source: Web + subtitle_language: en + container: srt + type: episode + +? Mind.Field.S02E06.The.Power.of.Suggestion.1440p.H264.WEBDL.Subtitles/The Power of Suggestion - Mind Field S2 (Ep 6) (Korean).srt +: title: Mind Field + season: 2 + episode: 6 + episode_title: The Power of Suggestion + screen_size: 1440p + video_codec: H.264 + source: Web + subtitle_language: ko + container: srt + type: episode + +? '[HorribleSubs] Overlord II - 01 [1080p] 19.1mbits - 120fps.mkv' +: release_group: HorribleSubs + title: Overlord II + episode: 1 + screen_size: 1080p + video_bit_rate: 19.1Mbps + frame_rate: 120fps + container: mkv + type: episode + +? One Piece - 720 +: title: One Piece + season: 7 + episode: 20 + type: episode + +? foobar.213.avi +: options: -E + title: foobar + episode: 213 + container: avi + type: episode + +? FooBar - 360 368p-Grp +: options: -E + title: FooBar + episode: 360 + screen_size: 368p + release_group: Grp + type: episode + +? wwiis.most.daring.raids.s01e04.storming.mussolinis.island.1080p.web.h.264-edhd-sample.mkv +: title: wwiis most daring raids + season: 1 + episode: 4 + episode_title: storming mussolinis island + screen_size: 1080p + source: Web + video_codec: H.264 + release_group: edhd + other: Sample + container: mkv + type: episode + +? WWIIs.Most.Daring.Raids.S01E04.Storming.Mussolinis.Island.1080p.WEB.h264-EDHD/wwiis.most.daring.raids.s01e04.storming.mussolinis.island.1080p.web.h.264-edhd-sample.mkv +: title: wwiis most daring raids + season: 1 + episode: 4 + episode_title: Storming Mussolinis Island + screen_size: 1080p + source: Web + video_codec: H.264 + release_group: edhd + other: Sample + container: mkv + type: episode + +? dcs.legends.of.tomorrow.s02e01.1080p.bluray.x264-rovers.proof +: title: dcs legends of tomorrow + season: 2 + episode: 1 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + release_group: rovers + other: Proof + type: episode + +? dcs.legends.of.tomorrow.s02e01.720p.bluray.x264-demand.sample.mkv +: title: dcs legends of tomorrow + season: 2 + episode: 1 + screen_size: 720p + source: Blu-ray + video_codec: H.264 + release_group: demand + other: Sample + container: mkv + type: episode + +? Season 06/e01.1080p.bluray.x264-wavey-obfuscated.mkv +: season: 6 + episode: 1 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + title: wavey + other: Obfuscated + container: mkv + type: episode + +? Hells.Kitchen.US.S17E08.1080p.HEVC.x265-MeGusta-Obfuscated/c48db7d2aeb040e8a920a9fd6effcbf4.mkv +: title: Hells Kitchen + country: US + season: 17 + episode: 8 + screen_size: 1080p + video_codec: H.265 + release_group: MeGusta + other: Obfuscated + uuid: c48db7d2aeb040e8a920a9fd6effcbf4 + container: mkv + type: episode + +? Blue.Bloods.S08E09.1080p.HEVC.x265-MeGusta-Obfuscated/afaae96ae7a140e0981ced2a79221751.mkv +: title: Blue Bloods + season: 8 + episode: 9 + screen_size: 1080p + video_codec: H.265 + release_group: MeGusta + other: Obfuscated + container: mkv + type: episode + +? MacGyver.2016.S02E09.CD-ROM.and.Hoagie.Foil.1080p.AMZN.WEBRip.DDP5.1.x264-NTb-Scrambled/c329b27187d44a94b4a25b21502db552.mkv +: title: MacGyver + year: 2016 + season: 2 + episode: 9 + screen_size: 1080p + streaming_service: Amazon Prime + source: Web + other: [Rip, Obfuscated] + audio_codec: Dolby Digital Plus + audio_channels: '5.1' + video_codec: H.264 + release_group: NTb + uuid: c329b27187d44a94b4a25b21502db552 + container: mkv + type: episode + +? The.Late.Late.Show.with.James.Corden.2017.11.27.Armie.Hammer.Juno.Temple.Charlie.Puth.1080p.AMZN.WEB-DL.DDP2.0.H.264-monkee-Scrambled/42e7e8a48eb7454aaebebcf49705ce41.mkv +: title: The Late Late Show with James Corden + date: 2017-11-27 + episode_title: Armie Hammer Juno Temple Charlie Puth + screen_size: 1080p + streaming_service: Amazon Prime + source: Web + audio_codec: Dolby Digital Plus + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + other: Obfuscated + uuid: 42e7e8a48eb7454aaebebcf49705ce41 + container: mkv + type: episode + +? Educating Greater Manchester S01E07 720p HDTV x264-PLUTONiUM-AsRequested +: title: Educating Greater Manchester + season: 1 + episode: 7 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: PLUTONiUM + other: Repost + type: episode + +? Im A Celebrity Get Me Out Of Here S17E14 HDTV x264-PLUTONiUM-xpost +: title: Im A Celebrity Get Me Out Of Here + season: 17 + episode: 14 + source: HDTV + video_codec: H.264 + release_group: PLUTONiUM + other: Repost + type: episode + +? Tales S01E08 All I Need Method Man Featuring Mary J Blige 720p BET WEBRip AAC2 0 x264-RTN-xpost +: title: Tales + season: 1 + episode: 8 + episode_title: All I Need Method Man Featuring Mary J Blige + screen_size: 720p + source: Web + other: [Rip, Repost] + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: RTN + type: episode + +? This is Us S01E11 Herzensangelegenheiten German DL WS DVDRip x264-CDP-xpost +: options: --exclude country + title: This is Us + season: 1 + episode: 11 + episode_title: Herzensangelegenheiten + language: + - de + - mul + other: + - Widescreen + - Rip + - Repost + source: DVD + video_codec: H.264 + release_group: CDP + type: episode + +? The Girlfriend Experience S02E10 1080p WEB H264-STRiFE-postbot +: title: The Girlfriend Experience + season: 2 + episode: 10 + screen_size: 1080p + source: Web + video_codec: H.264 + release_group: STRiFE + other: Repost + type: episode + +? The.Girlfriend.Experience.S02E10.1080p.WEB.H264-STRiFE-postbot/90550c1adaf44c47b60d24f59603bb98.mkv +: title: The Girlfriend Experience + season: 2 + episode: 10 + screen_size: 1080p + source: Web + video_codec: H.264 + release_group: STRiFE + other: Repost + uuid: 90550c1adaf44c47b60d24f59603bb98 + container: mkv + type: episode + +? 24.S01E02.1080p.BluRay.REMUX.AVC.DD.2.0-EPSiLON-xpost/eb518eaf33f641a1a8c6e0973a67aec2.mkv +: title: '24' + season: 1 + episode: 2 + screen_size: 1080p + source: Blu-ray + other: [Remux, Repost] + video_codec: H.264 + audio_codec: Dolby Digital + audio_channels: '2.0' + release_group: EPSiLON + uuid: eb518eaf33f641a1a8c6e0973a67aec2 + container: mkv + type: episode + +? Educating.Greater.Manchester.S01E02.720p.HDTV.x264-PLUTONiUM-AsRequested/47fbcb2393aa4b5cbbb340d3173ca1a9.mkv +: title: Educating Greater Manchester + season: 1 + episode: 2 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: PLUTONiUM + other: Repost + uuid: 47fbcb2393aa4b5cbbb340d3173ca1a9 + container: mkv + type: episode + +? Stranger.Things.S02E05.Chapter.Five.Dig.Dug.720p.NF.WEBRip.DD5.1.x264-PSYPHER-AsRequested-Obfuscated +: title: Stranger Things + season: 2 + episode: 5 + episode_title: Chapter Five Dig Dug + screen_size: 720p + streaming_service: Netflix + source: Web + other: [Rip, Repost, Obfuscated] + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: PSYPHER + type: episode + +? Show.Name.-.Season.1.3.4-.Mp4.1080p +: title: Show Name + season: [1, 3, 4] + container: mp4 + screen_size: 1080p + type: episode + +? Bones.S03.720p.HDTV.x264-SCENE +: title: Bones + season: 3 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: SCENE + type: episode + +? shes.gotta.have.it.s01e08.720p.web.x264-strife.mkv +: title: shes gotta have it + season: 1 + episode: 8 + screen_size: 720p + source: Web + video_codec: H.264 + release_group: strife + type: episode + +? DuckTales.2017.S01E10.The.Missing.Links.of.Moorshire.PDTV.H.264.MP2-KIDKAT +: title: DuckTales + year: 2017 + season: 1 + episode: 10 + episode_title: The Missing Links of Moorshire + source: Digital TV + video_codec: H.264 + audio_codec: MP2 + release_group: KIDKAT + type: episode + +? Por Trece Razones - Temporada 2 [HDTV 720p][Cap.201][AC3 5.1 Castellano]/Por Trece Razones 2x01 [des202].mkv +: title: Por Trece Razones + season: 2 + source: HDTV + screen_size: 720p + episode: 1 + audio_codec: Dolby Digital + audio_channels: '5.1' + language: Catalan + release_group: des202 + container: mkv + type: episode + +? Cuerpo de Elite - Temporada 1 [HDTV 720p][Cap.113][AC3 5.1 Esp Castellano]\CuerpoDeElite720p_113_desca202.mkv +: title: Cuerpo de Elite + season: 1 + source: HDTV + screen_size: 720p + episode: 13 + audio_codec: Dolby Digital + audio_channels: '5.1' + language: + - Spanish + - Catalan + container: mkv + type: episode + +? Show.Name.S01E01.St.Patricks.Day.1080p.mkv +: title: Show Name + season: 1 + episode: 1 + episode_title: St Patricks Day + screen_size: 1080p + container: mkv + type: episode + +? Show.Name.S01E01.St.Patricks.Day.1080p-grp.mkv +: title: Show Name + season: 1 + episode: 1 + episode_title: St Patricks Day + screen_size: 1080p + release_group: grp + container: mkv + type: episode + +? Titans.2018.S01E09.Hank.And.Dawn.720p.DCU.WEB-DL.AAC2.0.H264-NTb +: title: Titans + year: 2018 + season: 1 + episode: 9 + episode_title: Hank And Dawn + screen_size: 720p + streaming_service: DC Universe + source: Web + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: NTb + type: episode + +? S.W.A.T.2017.S01E21.Treibjagd.German.Dubbed.DL.AmazonHD.x264-TVS +: title: S.W.A.T. + year: 2017 + season: 1 + episode: 21 + episode_title: Treibjagd + language: + - German + - Multi + streaming_service: Amazon Prime + other: HD + video_codec: H.264 + release_group: TVS + type: episode + +? S.W.A.T.2017.S01E16.READNFO.720p.HDTV.x264-KILLERS +: title: S.W.A.T. + year: 2017 + season: 1 + episode: 16 + other: Read NFO + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: KILLERS + type: episode + +? /mnt/NAS/NoSubsTVShows/Babylon 5/Season 01/Ep. 02 - Soul Hunter +: title: Babylon 5 + season: 1 + episode: 2 + episode_title: Soul Hunter + type: episode + +? This.is.Us.S01E01.HDTV.x264-KILLERS.mkv +: title: This is Us + season: 1 + episode: 1 + source: HDTV + video_codec: H.264 + release_group: KILLERS + container: mkv + type: episode + +? Videos/Office1080/The Office (US) (2005) Season 2 S02 + Extras (1080p AMZN WEB-DL x265 HEVC 10bit AAC 2.0 LION)/The Office (US) (2005) - S02E12 - The Injury (1080p AMZN WEB-DL x265 LION).mkv +: title: The Office + country: US + year: 2005 + season: 2 + other: Extras + screen_size: 1080p + streaming_service: Amazon Prime + source: Web + video_codec: H.265 + video_profile: High Efficiency Video Coding + color_depth: 10-bit + audio_codec: AAC + audio_channels: '2.0' + release_group: LION + episode: 12 + episode_title: The Injury + container: mkv + type: episode + +? Thumping.Spike.2.E01.DF.WEBRip.720p-DRAMATV.mp4 +: title: Thumping Spike 2 + episode: 1 + source: Web + other: Rip + screen_size: 720p + streaming_service: DramaFever + release_group: DRAMATV + container: mp4 + mimetype: video/mp4 + type: episode + +? About.Time.E01.1080p.VIKI.WEB-DL-BLUEBERRY.mp4 +: title: About Time + episode: 1 + screen_size: 1080p + streaming_service: Viki + source: Web + release_group: BLUEBERRY + container: mp4 + mimetype: video/mp4 + type: episode + +? Eyes.Of.Dawn.1991.E01.480p.MBCVOD.AAC.x264-NOGPR.mp4 +: title: Eyes Of Dawn + year: 1991 + season: 1991 + episode: 1 + screen_size: 480p + streaming_service: MBC + audio_codec: AAC + video_codec: H.264 + release_group: NOGPR + container: mp4 + mimetype: video/mp4 + type: episode \ No newline at end of file diff --git a/lib/guessit/test/movies.yml b/lib/guessit/test/movies.yml new file mode 100644 index 00000000..a534ca0f --- /dev/null +++ b/lib/guessit/test/movies.yml @@ -0,0 +1,1786 @@ +? __default__ +: type: movie + +? Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv +: title: Fear and Loathing in Las Vegas + year: 1998 + screen_size: 720p + source: HD-DVD + audio_codec: DTS + video_codec: H.264 + container: mkv + release_group: ESiR + +? Movies/El Dia de la Bestia (1995)/El.dia.de.la.bestia.DVDrip.Spanish.DivX.by.Artik[SEDG].avi +: title: El Dia de la Bestia + year: 1995 + source: DVD + other: Rip + language: spanish + video_codec: DivX + release_group: Artik[SEDG] + container: avi + +? Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv +: title: Dark City + year: 1998 + source: Blu-ray + other: Rip + screen_size: 720p + audio_codec: DTS + video_codec: H.264 + release_group: CHD + +? Movies/Sin City (BluRay) (2005)/Sin.City.2005.BDRip.720p.x264.AC3-SEPTiC.mkv +: title: Sin City + year: 2005 + source: Blu-ray + other: Rip + screen_size: 720p + video_codec: H.264 + audio_codec: Dolby Digital + release_group: SEPTiC + +? Movies/Borat (2006)/Borat.(2006).R5.PROPER.REPACK.DVDRip.XviD-PUKKA.avi +: title: Borat + year: 2006 + proper_count: 2 + source: DVD + other: [ Region 5, Proper, Rip ] + video_codec: Xvid + release_group: PUKKA + +? "[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv" +: title: Le Prestige + source: DVD + other: Rip + video_codec: H.264 + video_profile: High + audio_codec: AAC + audio_profile: High Efficiency + language: [ french, english ] + subtitle_language: [ french, english ] + release_group: Chaps + +? Battle Royale (2000)/Battle.Royale.(Batoru.Rowaiaru).(2000).(Special.Edition).CD1of2.DVDRiP.XviD-[ZeaL].avi +: title: Battle Royale + year: 2000 + edition: Special + cd: 1 + cd_count: 2 + source: DVD + other: Rip + video_codec: Xvid + release_group: ZeaL + +? Movies/Brazil (1985)/Brazil_Criterion_Edition_(1985).CD2.avi +: title: Brazil + edition: Criterion + year: 1985 + cd: 2 + +? Movies/Persepolis (2007)/[XCT] Persepolis [H264+Aac-128(Fr-Eng)+ST(Fr-Eng)+Ind].mkv +: title: Persepolis + year: 2007 + video_codec: H.264 + audio_codec: AAC + language: [ French, English ] + subtitle_language: [ French, English ] + release_group: Ind + +? Movies/Toy Story (1995)/Toy Story [HDTV 720p English-Spanish].mkv +: title: Toy Story + year: 1995 + source: HDTV + screen_size: 720p + language: [ english, spanish ] + +? Movies/Office Space (1999)/Office.Space.[Dual-DVDRip].[Spanish-English].[XviD-AC3-AC3].[by.Oswald].avi +: title: Office Space + year: 1999 + other: [Dual Audio, Rip] + source: DVD + language: [ english, spanish ] + video_codec: Xvid + audio_codec: Dolby Digital + +? Movies/Wild Zero (2000)/Wild.Zero.DVDivX-EPiC.avi +: title: Wild Zero + year: 2000 + video_codec: DivX + release_group: EPiC + +? movies/Baraka_Edition_Collector.avi +: title: Baraka + edition: Collector + +? Movies/Blade Runner (1982)/Blade.Runner.(1982).(Director's.Cut).CD1.DVDRip.XviD.AC3-WAF.avi +: title: Blade Runner + year: 1982 + edition: Director's Cut + cd: 1 + source: DVD + other: Rip + video_codec: Xvid + audio_codec: Dolby Digital + release_group: WAF + +? movies/American.The.Bill.Hicks.Story.2009.DVDRip.XviD-EPiSODE.[UsaBit.com]/UsaBit.com_esd-americanbh.avi +: title: American The Bill Hicks Story + year: 2009 + source: DVD + other: Rip + video_codec: Xvid + release_group: EPiSODE + website: UsaBit.com + +? movies/Charlie.And.Boots.DVDRip.XviD-TheWretched/wthd-cab.avi +: title: Charlie And Boots + source: DVD + other: Rip + video_codec: Xvid + release_group: TheWretched + +? movies/Steig Larsson Millenium Trilogy (2009) BRrip 720 AAC x264/(1)The Girl With The Dragon Tattoo (2009) BRrip 720 AAC x264.mkv +: title: The Girl With The Dragon Tattoo + #film_title: Steig Larsson Millenium Trilogy + #film: 1 + year: 2009 + source: Blu-ray + other: [Reencoded, Rip] + audio_codec: AAC + video_codec: H.264 + screen_size: 720p + +? movies/Greenberg.REPACK.LiMiTED.DVDRip.XviD-ARROW/arw-repack-greenberg.dvdrip.xvid.avi +: title: Greenberg + source: DVD + video_codec: Xvid + release_group: ARROW + other: [Proper, Rip] + edition: Limited + proper_count: 1 + +? Movies/Fr - Paris 2054, Renaissance (2005) - De Christian Volckman - (Film Divx Science Fiction Fantastique Thriller Policier N&B).avi +: title: Paris 2054, Renaissance + year: 2005 + language: french + video_codec: DivX + +? Movies/[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi +: title: Avida + year: 2006 + language: french + source: DVD + other: Rip + video_codec: Xvid + release_group: PROD + +? Movies/Alice in Wonderland DVDRip.XviD-DiAMOND/dmd-aw.avi +: title: Alice in Wonderland + source: DVD + other: Rip + video_codec: Xvid + release_group: DiAMOND + +? Movies/Ne.Le.Dis.A.Personne.Fr 2 cd/personnea_mp.avi +: title: Ne Le Dis A Personne + language: french + cd_count: 2 + +? Movies/Bunker Palace Hôtel (Enki Bilal) (1989)/Enki Bilal - Bunker Palace Hotel (Fr Vhs Rip).avi +: title: Bunker Palace Hôtel + year: 1989 + language: french + source: VHS + other: Rip + +? Movies/21 (2008)/21.(2008).DVDRip.x264.AC3-FtS.[sharethefiles.com].mkv +: title: "21" + year: 2008 + source: DVD + other: Rip + video_codec: H.264 + audio_codec: Dolby Digital + release_group: FtS + website: sharethefiles.com + +? Movies/9 (2009)/9.2009.Blu-ray.DTS.720p.x264.HDBRiSe.[sharethefiles.com].mkv +: title: "9" + year: 2009 + source: Blu-ray + audio_codec: DTS + screen_size: 720p + video_codec: H.264 + release_group: HDBRiSe + website: sharethefiles.com + +? Movies/Mamma.Mia.2008.DVDRip.AC3.XviD-CrazyTeam/Mamma.Mia.2008.DVDRip.AC3.XviD-CrazyTeam.avi +: title: Mamma Mia + year: 2008 + source: DVD + other: Rip + audio_codec: Dolby Digital + video_codec: Xvid + release_group: CrazyTeam + +? Movies/M.A.S.H. (1970)/MASH.(1970).[Divx.5.02][Dual-Subtitulos][DVDRip].ogm +: title: MASH + year: 1970 + video_codec: DivX + source: DVD + other: [Dual Audio, Rip] + +? Movies/The Doors (1991)/09.03.08.The.Doors.(1991).BDRip.720p.AC3.X264-HiS@SiLUHD-English.[sharethefiles.com].mkv +: title: The Doors + year: 1991 + date: 2008-03-09 + source: Blu-ray + other: Rip + screen_size: 720p + audio_codec: Dolby Digital + video_codec: H.264 + release_group: HiS@SiLUHD + language: english + website: sharethefiles.com + +? Movies/The Doors (1991)/08.03.09.The.Doors.(1991).BDRip.720p.AC3.X264-HiS@SiLUHD-English.[sharethefiles.com].mkv +: options: --date-year-first + title: The Doors + year: 1991 + date: 2008-03-09 + source: Blu-ray + other: Rip + screen_size: 720p + audio_codec: Dolby Digital + video_codec: H.264 + release_group: HiS@SiLUHD + language: english + website: sharethefiles.com + +? Movies/Ratatouille/video_ts-ratatouille.srt +: title: Ratatouille + source: DVD + +# Removing this one because 001 is guessed as an episode number. +# ? Movies/001 __ A classer/Fantomas se déchaine - Louis de Funès.avi +# : title: Fantomas se déchaine + +? Movies/Comme une Image (2004)/Comme.Une.Image.FRENCH.DVDRiP.XViD-NTK.par-www.divx-overnet.com.avi +: title: Comme une Image + year: 2004 + language: french + source: DVD + other: Rip + video_codec: Xvid + release_group: NTK + website: www.divx-overnet.com + +? Movies/Fantastic Mr Fox/Fantastic.Mr.Fox.2009.DVDRip.{x264+LC-AAC.5.1}{Fr-Eng}{Sub.Fr-Eng}-™.[sharethefiles.com].mkv +: title: Fantastic Mr Fox + year: 2009 + source: DVD + other: Rip + video_codec: H.264 + audio_codec: AAC + audio_profile: Low Complexity + audio_channels: "5.1" + language: [ french, english ] + subtitle_language: [ french, english ] + website: sharethefiles.com + +? Movies/Somewhere.2010.DVDRip.XviD-iLG/i-smwhr.avi +: title: Somewhere + year: 2010 + source: DVD + other: Rip + video_codec: Xvid + release_group: iLG + +? Movies/Moon_(2009).mkv +: title: Moon + year: 2009 + +? Movies/Moon_(2009)-x02-Making_Of.mkv +: title: Moon + year: 2009 + bonus: 2 + bonus_title: Making Of + +? movies/James_Bond-f17-Goldeneye.mkv +: title: Goldeneye + film_title: James Bond + film: 17 + + +? /movies/James_Bond-f21-Casino_Royale.mkv +: title: Casino Royale + film_title: James Bond + film: 21 + +? /movies/James_Bond-f21-Casino_Royale-x01-Becoming_Bond.mkv +: title: Casino Royale + film_title: James Bond + film: 21 + bonus: 1 + bonus_title: Becoming Bond + +? /movies/James_Bond-f21-Casino_Royale-x02-Stunts.mkv +: title: Casino Royale + film_title: James Bond + film: 21 + bonus: 2 + bonus_title: Stunts + +? OSS_117--Cairo,_Nest_of_Spies.mkv +: title: OSS 117 +# TODO: Implement subTitle for movies. + +? The Godfather Part 3.mkv +? The Godfather Part III.mkv +: title: The Godfather + part: 3 + +? Foobar Part VI.mkv +: title: Foobar + part: 6 + +? The_Insider-(1999)-x02-60_Minutes_Interview-1996.mp4 +: title: The Insider + year: 1999 + bonus: 2 + bonus_title: 60 Minutes Interview-1996 + +? Rush.._Beyond_The_Lighted_Stage-x09-Between_Sun_and_Moon-2002_Hartford.mkv +: title: Rush Beyond The Lighted Stage + bonus: 9 + bonus_title: Between Sun and Moon + year: 2002 + +? /public/uTorrent/Downloads Finished/Movies/Indiana.Jones.and.the.Temple.of.Doom.1984.HDTV.720p.x264.AC3.5.1-REDµX/Indiana.Jones.and.the.Temple.of.Doom.1984.HDTV.720p.x264.AC3.5.1-REDµX.mkv +: title: Indiana Jones and the Temple of Doom + year: 1984 + source: HDTV + screen_size: 720p + video_codec: H.264 + audio_codec: Dolby Digital + audio_channels: "5.1" + release_group: REDµX + +? The.Director’s.Notebook.2006.Blu-Ray.x264.DXVA.720p.AC3-de[42].mkv +: title: The Director’s Notebook + year: 2006 + source: Blu-ray + video_codec: H.264 + video_api: DXVA + screen_size: 720p + audio_codec: Dolby Digital + release_group: de[42] + + +? Movies/Cosmopolis.2012.LiMiTED.720p.BluRay.x264-AN0NYM0US[bb]/ano-cosmo.720p.mkv +: title: Cosmopolis + year: 2012 + screen_size: 720p + video_codec: H.264 + release_group: AN0NYM0US[bb] + source: Blu-ray + edition: Limited + +? movies/La Science des Rêves (2006)/La.Science.Des.Reves.FRENCH.DVDRip.XviD-MP-AceBot.avi +: title: La Science des Rêves + year: 2006 + source: DVD + other: Rip + video_codec: Xvid + video_profile: Main + release_group: AceBot + language: French + +? The_Italian_Job.mkv +: title: The Italian Job + +? The.Rum.Diary.2011.1080p.BluRay.DTS.x264.D-Z0N3.mkv +: title: The Rum Diary + year: 2011 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + audio_codec: DTS + release_group: D-Z0N3 + +? Life.Of.Pi.2012.1080p.BluRay.DTS.x264.D-Z0N3.mkv +: title: Life Of Pi + year: 2012 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + audio_codec: DTS + release_group: D-Z0N3 + +? The.Kings.Speech.2010.1080p.BluRay.DTS.x264.D Z0N3.mkv +: title: The Kings Speech + year: 2010 + screen_size: 1080p + source: Blu-ray + audio_codec: DTS + video_codec: H.264 + release_group: D Z0N3 + +? Street.Kings.2008.BluRay.1080p.DTS.x264.dxva EuReKA.mkv +: title: Street Kings + year: 2008 + source: Blu-ray + screen_size: 1080p + audio_codec: DTS + video_codec: H.264 + video_api: DXVA + release_group: EuReKA + +? 2001.A.Space.Odyssey.1968.HDDVD.1080p.DTS.x264.dxva EuReKA.mkv +: title: 2001 A Space Odyssey + year: 1968 + source: HD-DVD + screen_size: 1080p + audio_codec: DTS + video_codec: H.264 + video_api: DXVA + release_group: EuReKA + +? 2012.2009.720p.BluRay.x264.DTS WiKi.mkv +: title: "2012" + year: 2009 + screen_size: 720p + source: Blu-ray + video_codec: H.264 + audio_codec: DTS + release_group: WiKi + +? /share/Download/movie/Dead Man Down (2013) BRRiP XViD DD5_1 Custom NLSubs =-_lt Q_o_Q gt-=_/XD607ebb-BRc59935-5155473f-1c5f49/XD607ebb-BRc59935-5155473f-1c5f49.avi +: title: Dead Man Down + year: 2013 + source: Blu-ray + other: [Reencoded, Rip] + video_codec: Xvid + audio_channels: "5.1" + audio_codec: Dolby Digital + uuid: XD607ebb-BRc59935-5155473f-1c5f49 + +? Pacific.Rim.3D.2013.COMPLETE.BLURAY-PCH.avi +: title: Pacific Rim + year: 2013 + source: Blu-ray + other: + - Complete + - 3D + release_group: PCH + +? Immersion.French.2011.STV.READNFO.QC.FRENCH.ENGLISH.NTSC.DVDR.nfo +: title: Immersion French + year: 2011 + language: + - French + - English + source: DVD + other: [Straight to Video, Read NFO, NTSC] + +? Immersion.French.2011.STV.READNFO.QC.FRENCH.NTSC.DVDR.nfo +: title: Immersion French + year: 2011 + language: French + source: DVD + other: [Straight to Video, Read NFO, NTSC] + +? Immersion.French.2011.STV.READNFO.QC.NTSC.DVDR.nfo +: title: Immersion + language: French + year: 2011 + source: DVD + other: [Straight to Video, Read NFO, NTSC] + +? French.Immersion.2011.STV.READNFO.QC.ENGLISH.NTSC.DVDR.nfo +: title: French Immersion + year: 2011 + language: ENGLISH + source: DVD + other: [Straight to Video, Read NFO, NTSC] + +? Howl's_Moving_Castle_(2004)_[720p,HDTV,x264,DTS]-FlexGet.avi +: video_codec: H.264 + source: HDTV + title: Howl's Moving Castle + screen_size: 720p + year: 2004 + audio_codec: DTS + release_group: FlexGet + +? Pirates de langkasuka.2008.FRENCH.1920X1080.h264.AVC.AsiaRa.mkv +: screen_size: 1080p + year: 2008 + language: French + video_codec: H.264 + title: Pirates de langkasuka + release_group: AsiaRa + +? Masala (2013) Telugu Movie HD DVDScr XviD - Exclusive.avi +: year: 2013 + video_codec: Xvid + title: Masala + source: HD-DVD + other: Screener + release_group: Exclusive + +? Django Unchained 2012 DVDSCR X264 AAC-P2P.nfo +: year: 2012 + other: Screener + video_codec: H.264 + title: Django Unchained + audio_codec: AAC + source: DVD + release_group: P2P + +? Ejecutiva.En.Apuros(2009).BLURAY.SCR.Xvid.Spanish.LanzamientosD.nfo +: year: 2009 + other: Screener + source: Blu-ray + video_codec: Xvid + language: Spanish + title: Ejecutiva En Apuros + +? Die.Schluempfe.2.German.DL.1080p.BluRay.x264-EXQUiSiTE.mkv +: title: Die Schluempfe 2 + source: Blu-ray + language: + - Multiple languages + - German + video_codec: H.264 + release_group: EXQUiSiTE + screen_size: 1080p + +? Rocky 1976 French SubForced BRRip x264 AC3-FUNKY.mkv +: title: Rocky + year: 1976 + subtitle_language: French + source: Blu-ray + other: [Reencoded, Rip] + video_codec: H.264 + audio_codec: Dolby Digital + release_group: FUNKY + +? REDLINE (BD 1080p H264 10bit FLAC) [3xR].mkv +: title: REDLINE + source: Blu-ray + video_codec: H.264 + color_depth: 10-bit + audio_codec: FLAC + screen_size: 1080p + +? The.Lizzie.McGuire.Movie.(2003).HR.DVDRiP.avi +: title: The Lizzie McGuire Movie + year: 2003 + source: DVD + other: [High Resolution, Rip] + +? Hua.Mulan.BRRIP.MP4.x264.720p-HR.avi +: title: Hua Mulan + video_codec: H.264 + source: Blu-ray + screen_size: 720p + other: [Reencoded, Rip] + release_group: HR + +? Dr.Seuss.The.Lorax.2012.DVDRip.LiNE.XviD.AC3.HQ.Hive-CM8.mp4 +: video_codec: Xvid + title: Dr Seuss The Lorax + source: DVD + other: [Rip, Line Audio] + year: 2012 + audio_codec: Dolby Digital + audio_profile: High Quality + release_group: Hive-CM8 + +? "Star Wars: Episode IV - A New Hope (2004) Special Edition.MKV" +: title: "Star Wars: Episode IV" + alternative_title: A New Hope + year: 2004 + edition: Special + +? Dr.LiNE.The.Lorax.2012.DVDRip.LiNE.XviD.AC3.HQ.Hive-CM8.mp4 +: video_codec: Xvid + title: Dr LiNE The Lorax + source: DVD + other: [Rip, Line Audio] + year: 2012 + audio_codec: Dolby Digital + audio_profile: High Quality + release_group: Hive-CM8 + +? Dr.LiNE.The.Lorax.2012.DVDRip.XviD.AC3.HQ.Hive-CM8.mp4 +: video_codec: Xvid + title: Dr LiNE The Lorax + source: DVD + other: Rip + year: 2012 + audio_codec: Dolby Digital + audio_profile: High Quality + release_group: Hive-CM8 + +? Perfect Child-2007-TRUEFRENCH-TVRip.Xvid-h@mster.avi +: release_group: h@mster + title: Perfect Child + video_codec: Xvid + language: French + source: TV + other: Rip + year: 2007 + +? entre.ciel.et.terre.(1994).dvdrip.h264.aac-psypeon.avi +: audio_codec: AAC + source: DVD + other: Rip + release_group: psypeon + title: entre ciel et terre + video_codec: H.264 + year: 1994 + +? Yves.Saint.Laurent.2013.FRENCH.DVDSCR.MD.XviD-ViVARiUM.avi +: source: DVD + language: French + other: [Screener, Mic Dubbed] + release_group: ViVARiUM + title: Yves Saint Laurent + video_codec: Xvid + year: 2013 + +? Echec et Mort - Hard to Kill - Steven Seagal Multi 1080p BluRay x264 CCATS.avi +: source: Blu-ray + language: Multiple languages + release_group: CCATS + screen_size: 1080p + title: Echec et Mort + alternative_title: + - Hard to Kill + - Steven Seagal + video_codec: H.264 + +? Paparazzi - Timsit/Lindon (MKV 1080p tvripHD) +: options: -n + title: Paparazzi + alternative_title: + - Timsit + - Lindon + screen_size: 1080p + container: mkv + source: HDTV + other: Rip + +? some.movie.720p.bluray.x264-mind +: title: some movie + screen_size: 720p + video_codec: H.264 + release_group: mind + source: Blu-ray + +? Dr LiNE The Lorax 720p h264 BluRay +: title: Dr LiNE The Lorax + screen_size: 720p + video_codec: H.264 + source: Blu-ray + +#TODO: Camelcase implementation +#? BeatdownFrenchDVDRip.mkv +#: options: -c +# title: Beatdown +# language: French +# source: DVD + +#? YvesSaintLaurent2013FrenchDVDScrXvid.avi +#: options: -c +# source: DVD +# language: French +# other: Screener +# title: Yves saint laurent +# video_codec: Xvid +# year: 2013 + + +? Elle.s.en.va.720p.mkv +: screen_size: 720p + title: Elle s en va + +? FooBar.7.PDTV-FlexGet +: source: Digital TV + release_group: FlexGet + title: FooBar 7 + +? h265 - HEVC Riddick Unrated Director Cut French 1080p DTS.mkv +: audio_codec: DTS + edition: [Unrated, Director's Cut] + language: fr + screen_size: 1080p + title: Riddick + video_codec: H.265 + +? "[h265 - HEVC] Riddick Unrated Director Cut French [1080p DTS].mkv" +: audio_codec: DTS + edition: [Unrated, Director's Cut] + language: fr + screen_size: 1080p + title: Riddick + video_codec: H.265 + +? Barbecue-2014-French-mHD-1080p +: language: fr + other: Micro HD + screen_size: 1080p + title: Barbecue + year: 2014 + +? Underworld Quadrilogie VO+VFF+VFQ 1080p HDlight.x264~Tonyk~Monde Infernal +: language: fr + other: [Original Video, Micro HD] + screen_size: 1080p + title: Underworld Quadrilogie + video_codec: H.264 + +? A Bout Portant (The Killers).PAL.Multi.DVD-R-KZ +: source: DVD + language: mul + release_group: KZ + title: A Bout Portant + +? "Mise à Sac (Alain Cavalier, 1967) [Vhs.Rip.Vff]" +: source: VHS + language: fr + title: "Mise à Sac" + year: 1967 + +? A Bout Portant (The Killers).PAL.Multi.DVD-R-KZ +: source: DVD + other: PAL + language: mul + release_group: KZ + title: A Bout Portant + +? Youth.In.Revolt.(Be.Bad).2009.MULTI.1080p.LAME3*92-MEDIOZZ +: audio_codec: MP3 + language: mul + release_group: MEDIOZZ + screen_size: 1080p + title: Youth In Revolt + year: 2009 + +? La Defense Lincoln (The Lincoln Lawyer) 2011 [DVDRIP][Vostfr] +: source: DVD + other: Rip + subtitle_language: fr + title: La Defense Lincoln + year: 2011 + +? '[h265 - HEVC] Fight Club French 1080p DTS.' +: audio_codec: DTS + language: fr + screen_size: 1080p + title: Fight Club + video_codec: H.265 + +? Love Gourou (Mike Myers) - FR +: language: fr + title: Love Gourou + +? '[h265 - hevc] transformers 2 1080p french ac3 6ch.' +: audio_channels: '5.1' + audio_codec: Dolby Digital + language: fr + screen_size: 1080p + title: transformers 2 + video_codec: H.265 + +? 1.Angry.Man.1957.mkv +: title: 1 Angry Man + year: 1957 + +? 12.Angry.Men.1957.mkv +: title: 12 Angry Men + year: 1957 + +? 123.Angry.Men.1957.mkv +: title: 123 Angry Men + year: 1957 + +? "Looney Tunes 1444x866 Porky's Last Stand.mkv" +: screen_size: 1444x866 + title: Looney Tunes + +? Das.Appartement.German.AC3D.DL.720p.BluRay.x264-TVP +: audio_codec: Dolby Digital + source: Blu-ray + language: + - German + - Multi + release_group: TVP + screen_size: 720p + title: Das Appartement + type: movie + video_codec: H.264 + +? Das.Appartement.GERMAN.AC3D.DL.720p.BluRay.x264-TVP +: audio_codec: Dolby Digital + source: Blu-ray + language: + - de + - mul + release_group: TVP + screen_size: 720p + title: Das Appartement + video_codec: H.264 + +? Hyena.Road.2015.German.1080p.DL.DTSHD.Bluray.x264-pmHD +: audio_codec: DTS-HD + source: Blu-ray + language: + - de + - mul + release_group: pmHD + screen_size: 1080p + title: Hyena Road + type: movie + video_codec: H.264 + year: 2015 + +? Hyena.Road.2015.German.1080p.DL.DTSHD.Bluray.x264-pmHD +: audio_codec: DTS-HD + source: Blu-ray + language: + - de + - mul + release_group: pmHD + screen_size: 1080p + title: Hyena Road + type: movie + video_codec: H.264 + year: 2015 + +? Name.BDMux.720p +: title: Name + source: Blu-ray + other: Mux + screen_size: 720p + type: movie + +? Name.BRMux.720p +: title: Name + source: Blu-ray + other: [Reencoded, Mux] + screen_size: 720p + type: movie + +? Name.BDRipMux.720p +: title: Name + source: Blu-ray + other: [Rip, Mux] + screen_size: 720p + type: movie + +? Name.BRRipMux.720p +: title: Name + source: Blu-ray + other: [Reencoded, Rip, Mux] + screen_size: 720p + type: movie + +? Secondary Education (2013).mkv +: options: -T Second + title: Secondary Education + year: 2013 + type: movie + +? Mad Max Beyond Thunderdome () +: title: Mad Max Beyond Thunderdome + type: movie + +? Hacksaw Ridge 2016 Multi 2160p UHD BluRay Hevc10 HDR10 DTSHD & ATMOS 7.1 -DDR.mkv +: title: Hacksaw Ridge + year: 2016 + language: mul + screen_size: 2160p + source: Ultra HD Blu-ray + video_codec: H.265 + color_depth: 10-bit + audio_codec: [DTS-HD, Dolby Atmos] + audio_channels: '7.1' + release_group: DDR + container: mkv + type: movie + +? Special.Correspondents.2016.iTA.ENG.4K.2160p.NetflixUHD.TeamPremium.mp4 +: title: Special Correspondents + year: 2016 + language: [it, en] + screen_size: 2160p + streaming_service: Netflix + other: Ultra HD + release_group: TeamPremium + container: mp4 + type: movie + +? -Special.Correspondents.2016.iTA.ENG.4K.2160p.NetflixUHD.TeamPremium.mp4 +: alternative_title: 4K + +? -Special.Correspondents.2016.iTA.ENG.4K.2160p.NetflixUHD.TeamPremium.mp4 +: alternative_title: 2160p + +? Suicide Squad EXTENDED (2016) 2160p 4K UltraHD Blu-Ray x265 (HEVC 10bit BT709) Dolby Atmos 7.1 -DDR +: title: Suicide Squad + edition: Extended + year: 2016 + screen_size: 2160p + source: Ultra HD Blu-ray + video_codec: H.265 + color_depth: 10-bit + audio_codec: Dolby Atmos + audio_channels: '7.1' + release_group: DDR + type: movie + +? Queen - A Kind of Magic (Alternative Extended Version) 2CD 2014 +: title: Queen + alternative_title: A Kind of Magic + edition: [Alternative Cut, Extended] + cd_count: 2 + year: 2014 + type: movie + +? Jour.de.Fete.1949.ALTERNATiVE.CUT.1080p.BluRay.x264-SADPANDA[rarbg] +: title: Jour de Fete + year: 1949 + edition: Alternative Cut + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + release_group: SADPANDA[rarbg] + +? The.Movie.CONVERT.720p.HDTV.x264-C4TV +: title: The Movie + other: Converted + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: C4TV + type: movie + +? Its.A.Wonderful.Life.1946.Colorized.720p.BRRip.999MB.MkvCage.com +: title: Its A Wonderful Life + year: 1946 + other: [Colorized, Reencoded, Rip] + screen_size: 720p + source: Blu-ray + size: 999MB + website: MkvCage.com + type: movie + +? Alien DC (1979) [1080p] +: title: Alien + edition: Director's Cut + year: 1979 + screen_size: 1080p + type: movie + +? Requiem.For.A.Dream.2000.DC.1080p.BluRay.x264.anoXmous +: title: Requiem For A Dream + year: 2000 + edition: Director's Cut + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + release_group: anoXmous + type: movie + +? Before.the.Flood.2016.DOCU.1080p.WEBRip.x264.DD5.1-FGT +: title: Before the Flood + year: 2016 + other: [Documentary, Rip] + screen_size: 1080p + source: Web + video_codec: H.264 + audio_codec: Dolby Digital + audio_channels: '5.1' + release_group: FGT + type: movie + +? Zootopia.2016.HDRip.1.46Gb.Dub.MegaPeer +: title: Zootopia + year: 2016 + other: [HD, Rip] + size: 1.46GB + language: und + release_group: MegaPeer + type: movie + +? Suntan.2016.FESTiVAL.DVDRip.x264-IcHoR +: title: Suntan + year: 2016 + edition: Festival + source: DVD + other: Rip + video_codec: H.264 + release_group: IcHoR + type: movie + +? Hardwired.STV.NFOFiX.FRENCH.DVDRiP.XviD-SURViVAL +: title: Hardwired + other: [Straight to Video, Fix, Rip] + language: french + source: DVD + video_codec: Xvid + release_group: SURViVAL + -proper_count: 1 + type: movie + +? Maze.Runner.The.Scorch.Trials.OM.2015.WEB-DLRip.by.Seven +: title: Maze Runner The Scorch Trials + other: [Open Matte, Rip] + year: 2015 + source: Web + release_group: Seven + type: movie + +? Kampen Om Tungtvannet aka The Heavy Water War COMPLETE 720p x265 HEVC-Lund +: title: Kampen Om Tungtvannet aka The Heavy Water War + other: Complete + screen_size: 720p + video_codec: H.265 + release_group: Lund + type: movie + +? All.Fall.Down.x264.PROOFFIX-OUTLAWS +: title: All Fall Down + video_codec: H.264 + other: Fix + release_group: OUTLAWS + -proper_count: 1 + type: movie + +? The.Last.Survivors.2014.PROOF.SAMPLE.FiX.BDRip.x264-TOPCAT +: title: The Last Survivors + year: 2014 + other: [Fix, Rip] + source: Blu-ray + video_codec: H.264 + release_group: TOPCAT + type: movie + +? Bad Santa 2 2016 THEATRiCAL FRENCH BDRip XviD-EXTREME +: title: Bad Santa 2 + year: 2016 + edition: Theatrical + language: french + source: Blu-ray + other: Rip + video_codec: Xvid + release_group: EXTREME + type: movie + +? The Lord of the Rings The Fellowship of the Ring THEATRICAL EDITION (2001) [1080p] +: title: The Lord of the Rings The Fellowship of the Ring + edition: Theatrical + year: 2001 + screen_size: 1080p + type: movie + +? World War Z (2013) Theatrical Cut 720p BluRay x264 +: title: World War Z + year: 2013 + edition: Theatrical + screen_size: 720p + source: Blu-ray + video_codec: H.264 + type: movie + +? The Heartbreak Kid (1993) UNCUT 720p WEBRip x264 +: title: The Heartbreak Kid + year: 1993 + edition: Uncut + other: Rip + screen_size: 720p + source: Web + video_codec: H.264 + type: movie + +? Mrs.Doubtfire.1993.720p.OAR.Bluray.DTS.x264-CtrlHD +: title: Mrs Doubtfire + year: 1993 + screen_size: 720p + other: Original Aspect Ratio + source: Blu-ray + audio_codec: DTS + video_codec: H.264 + release_group: CtrlHD + type: movie + +? Aliens.SE.1986.BDRip.1080p +: title: Aliens + edition: Special + year: 1986 + source: Blu-ray + other: Rip + screen_size: 1080p + type: movie + +? 10 Cloverfield Lane.[Blu-Ray 1080p].[MULTI] +: options: --type movie + title: 10 Cloverfield Lane + source: Blu-ray + screen_size: 1080p + language: Multiple languages + type: movie + +? 007.Spectre.[HDTC.MD].[TRUEFRENCH] +: options: --type movie + title: 007 Spectre + source: HD Telecine + language: French + type: movie + +? We.Are.X.2016.LIMITED.BDRip.x264-BiPOLAR +: title: We Are X + year: 2016 + edition: Limited + source: Blu-ray + other: Rip + video_codec: H.264 + release_group: BiPOLAR + type: movie + +? The Rack (VHS) [1956] Paul Newman +: title: The Rack + source: VHS + year: 1956 + type: movie + +? Les.Magiciens.1976.VHSRip.XViD.MKO +: title: Les Magiciens + year: 1976 + source: VHS + other: Rip + video_codec: Xvid + release_group: MKO + type: movie + +? The Boss Baby 2017 720p CAM x264 AC3 TiTAN +: title: The Boss Baby + year: 2017 + screen_size: 720p + source: Camera + video_codec: H.264 + audio_codec: Dolby Digital + release_group: TiTAN + type: movie + +? The.Boss.Baby.2017.HDCAM.XviD-MrGrey +: title: The Boss Baby + year: 2017 + source: HD Camera + video_codec: Xvid + release_group: MrGrey + type: movie + +? The Martian 2015 Multi 2160p 4K UHD Bluray HEVC10 SDR DTSHD 7.1 -Zeus +: title: The Martian + year: 2015 + language: mul + screen_size: 2160p + source: Ultra HD Blu-ray + video_codec: H.265 + color_depth: 10-bit + other: Standard Dynamic Range + audio_codec: DTS-HD + audio_channels: '7.1' + release_group: Zeus + type: movie + +? Fantastic Beasts and Where to Find Them 2016 Multi 2160p UHD BluRay HEVC HDR Atmos7.1-DDR +: title: Fantastic Beasts and Where to Find Them + year: 2016 + language: mul + screen_size: 2160p + source: Ultra HD Blu-ray + video_codec: H.265 + other: HDR10 + audio_codec: Dolby Atmos + audio_channels: '7.1' + release_group: DDR + type: movie + +? Life of Pi 2012 2160p 4K BluRay HDR10 HEVC BT2020 DTSHD 7.1 subs -DDR +: title: Life of Pi + year: 2012 + screen_size: 2160p + source: Ultra HD Blu-ray + other: [HDR10, BT.2020] + subtitle_language: und + release_group: DDR + +? Captain.America.Civil.War.HDR.1080p.HEVC.10bit.BT.2020.DTS-HD.MA.7.1-VISIONPLUSHDR +: title: Captain America Civil War + other: [HDR10, BT.2020] + screen_size: 1080p + video_codec: H.265 + color_depth: 10-bit + audio_codec: DTS-HD + audio_profile: Master Audio + audio_channels: '7.1' + release_group: VISIONPLUSHDR + type: movie + +? Deadpool.2016.4K.2160p.UHD.HQ.8bit.BluRay.8CH.x265.HEVC-MZABI.mkv +: title: Deadpool + year: 2016 + screen_size: 2160p + source: Ultra HD Blu-ray + other: High Quality + color_depth: 8-bit + audio_channels: '7.1' + video_codec: H.265 + release_group: MZABI + type: movie + +? Fantastic.Beasts.and.Where.to.Find.Them.2016.2160p.4K.UHD.10bit.HDR.BluRay.7.1.x265.HEVC-MZABI.mkv +: title: Fantastic Beasts and Where to Find Them + year: 2016 + screen_size: 2160p + source: Ultra HD Blu-ray + color_depth: 10-bit + other: HDR10 + audio_channels: '7.1' + video_codec: H.265 + release_group: MZABI + container: mkv + type: movie + +? The.Arrival.4K.HDR.HEVC.10bit.BT2020.DTS.HD-MA-MadVR.HDR10.Dolby.Vision-VISIONPLUSHDR1000 +: title: The Arrival + screen_size: 2160p + other: [HDR10, BT.2020, Dolby Vision] + video_codec: H.265 + color_depth: 10-bit + audio_codec: DTS-HD + audio_profile: Master Audio + release_group: VISIONPLUSHDR1000 + type: movie + +? How To Steal A Dog.2014.BluRay.1080p.12bit.HEVC.OPUS 5.1-Hn1Dr2.mkv +: title: How To Steal A Dog + year: 2014 + source: Blu-ray + screen_size: 1080p + color_depth: 12-bit + video_codec: H.265 + audio_codec: Opus + audio_channels: '5.1' + release_group: Hn1Dr2 + container: mkv + type: movie + +? Interstelar.2014.IMAX.RUS.BDRip.x264.-HELLYWOOD.mkv +: title: Interstelar + year: 2014 + edition: IMAX + language: ru + source: Blu-ray + other: Rip + video_codec: H.264 + release_group: HELLYWOOD + container: mkv + type: movie + +? The.Dark.Knight.IMAX.EDITION.HQ.BluRay.1080p.x264.AC3.Hindi.Eng.ETRG +: title: The Dark Knight + edition: IMAX + other: High Quality + source: Blu-ray + screen_size: 1080p + video_codec: H.264 + audio_codec: Dolby Digital + language: [hindi, english] + release_group: ETRG + type: movie + +? The.Martian.2015.4K.UHD.UPSCALED-ETRG +: title: The Martian + year: 2015 + screen_size: 2160p + other: [Ultra HD, Upscaled] + release_group: ETRG + type: movie + +? Delibal 2015 720p Upscale DVDRip x264 DD5.1 AC3 +: title: Delibal + year: 2015 + screen_size: 720p + other: [Upscaled, Rip] + source: DVD + video_codec: H.264 + audio_codec: Dolby Digital + audio_channels: '5.1' + type: movie + +? Casablanca [Ultimate Collector's Edition].1942.BRRip.XviD-VLiS +: title: Casablanca + edition: [Ultimate, Collector] + year: 1942 + source: Blu-ray + other: [Reencoded, Rip] + video_codec: Xvid + release_group: VLiS + type: movie + +? Batman V Superman Dawn of Justice 2016 Extended Cut Ultimate Edition HDRip x264 AC3-DaDDy +: title: Batman V Superman Dawn of Justice + year: 2016 + edition: [Extended, Ultimate] + other: [HD, Rip] + video_codec: H.264 + audio_codec: Dolby Digital + release_group: DaDDy + type: movie + +? Stargate SG1 Ultimate Fan Collection +: title: Stargate SG1 + edition: [Ultimate, Fan] + +? The.Jungle.Book.2016.MULTi.1080p.BluRay.x264.DTS-HD.MA.7.1.DTS-HD.HRA.5.1-LeRalou +: title: The Jungle Book + year: 2016 + language: mul + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + audio_codec: DTS-HD + audio_profile: [Master Audio, High Resolution Audio] + audio_channels: ['7.1', '5.1'] + release_group: LeRalou + type: movie + +? Terminus.2015.BluRay.1080p.x264.DTS-HD.HRA.5.1-LTT +: title: Terminus + year: 2015 + source: Blu-ray + screen_size: 1080p + video_codec: H.264 + audio_codec: DTS-HD + audio_profile: High Resolution Audio + audio_channels: '5.1' + release_group: LTT + type: movie + +? Ghost.in.the.Shell.1995.1080p.Bluray.DTSES.x264-SHiTSoNy +: title: Ghost in the Shell + year: 1995 + screen_size: 1080p + source: Blu-ray + audio_codec: DTS + audio_profile: Extended Surround + +? The.Boss.Baby.2017.BluRay.1080p.DTS-ES.x264-PRoDJi +: title: The Boss Baby + year: 2017 + source: Blu-ray + screen_size: 1080p + audio_codec: DTS + audio_profile: Extended Surround + video_codec: H.264 + release_group: PRoDJi + type: movie + +? Title.2000.720p.BluRay.DDEX.x264-HDClub.mkv +: title: Title + year: 2000 + screen_size: 720p + source: Blu-ray + audio_codec: Dolby Digital + audio_profile: EX + video_codec: H.264 + release_group: HDClub + container: mkv + type: movie + +? Jack Reacher Never Go Back 2016 720p Bluray DD-EX x264-BluPanther +: title: Jack Reacher Never Go Back + year: 2016 + screen_size: 720p + source: Blu-ray + audio_codec: Dolby Digital + audio_profile: EX + video_codec: H.264 + release_group: BluPanther + type: movie + +? How To Steal A Dog.2014.BluRay.1080p.12bit.HEVC.OPUS 5.1-Hn1Dr2.mkv +: title: How To Steal A Dog + year: 2014 + source: Blu-ray + screen_size: 1080p + color_depth: 12-bit + video_codec: H.265 + audio_codec: Opus + audio_channels: '5.1' + release_group: Hn1Dr2 + container: mkv + type: movie + +? How.To.Be.Single.2016.1080p.BluRay.x264-BLOW/blow-how.to.be.single.2016.1080p.bluray.x264.mkv +: title: How To Be Single + year: 2016 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + release_group: BLOW + container: mkv + type: movie + +? After.the.Storm.2016.720p.YIFY +: title: After the Storm + year: 2016 + screen_size: 720p + release_group: YIFY + type: movie + +? Battle Royale 2000 DC (1080p Bluray x265 HEVC 10bit AAC 7.1 Japanese Tigole) +: title: Battle Royale + year: 2000 + edition: Director's Cut + screen_size: 1080p + source: Blu-ray + video_codec: H.265 + color_depth: 10-bit + audio_codec: AAC + audio_channels: '7.1' + language: jp + release_group: Tigole + +? Congo.The.Grand.Inga.Project.2013.1080p.BluRay.x264-OBiTS +: title: Congo The Grand Inga Project + year: 2013 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + release_group: OBiTS + type: movie + +? Congo.The.Grand.Inga.Project.2013.BRRip.XviD.MP3-RARBG +: title: Congo The Grand Inga Project + year: 2013 + source: Blu-ray + other: [Reencoded, Rip] + video_codec: Xvid + audio_codec: MP3 + release_group: RARBG + type: movie + +? Congo.The.Grand.Inga.Project.2013.720p.BluRay.H264.AAC-RARBG +: title: Congo The Grand Inga Project + year: 2013 + screen_size: 720p + source: Blu-ray + video_codec: H.264 + audio_codec: AAC + release_group: RARBG + type: movie + +? Mit.dem.Bauch.durch.die.Wand.SWiSSGERMAN.DOKU.DVDRiP.x264-DEFLOW +: title: Mit dem Bauch durch die Wand + language: de-CH + other: [Documentary, Rip] + source: DVD + video_codec: H.264 + release_group: DEFLOW + type: movie + +? InDefinitely.Maybe.2008.1080p.EUR.BluRay.VC-1.DTS-HD.MA.5.1-FGT +: title: InDefinitely Maybe + year: 2008 + screen_size: 1080p + source: Blu-ray + video_codec: VC-1 + audio_codec: DTS-HD + audio_profile: Master Audio + audio_channels: '5.1' + release_group: FGT + type: movie + +? Bjyukujyo Kyoushi Kan XXX 720P WEBRIP MP4-GUSH +: title: Bjyukujyo Kyoushi Kan + other: [XXX, Rip] + screen_size: 720p + source: Web + container: mp4 + release_group: GUSH + type: movie + +? The.Man.With.The.Golden.Arm.1955.1080p.BluRay.x264.DTS-FGT +: title: The Man With The Golden Arm + year: 1955 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + audio_codec: DTS + release_group: FGT + type: movie + +? blow-how.to.be.single.2016.1080p.bluray.x264.mkv +: release_group: blow + title: how to be single + year: 2016 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + container: mkv + type: movie + +? ulshd-the.right.stuff.1983.multi.1080p.bluray.x264.mkv +: release_group: ulshd + title: the right stuff + year: 1983 + language: mul + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + container: mkv + type: movie + +? FROZEN [2010] LiMiTED DVDRip H262 AAC[ ENG SUBS]-MANTESH +: title: FROZEN + year: 2010 + edition: Limited + source: DVD + other: Rip + video_codec: MPEG-2 + audio_codec: AAC + subtitle_language: english + release_group: MANTESH + type: movie + +? Family.Katta.2016.1080p.WEB-DL.H263.DD5.1.ESub-DDR +: title: Family Katta + year: 2016 + screen_size: 1080p + source: Web + video_codec: H.263 + audio_codec: Dolby Digital + audio_channels: '5.1' + subtitle_language: und + release_group: DDR + type: movie + +? Bad Boys 2 1080i.mpg2.rus.eng.ts +: title: Bad Boys 2 + screen_size: 1080i + video_codec: MPEG-2 + language: [russian, english] + container: ts + type: movie + +? Alien.Director.Cut.Ita.Eng.VP9.Opus.AlphaBot.webm +: title: Alien + edition: Director's Cut + language: [english, italian] + video_codec: VP9 + audio_codec: Opus + release_group: AlphaBot + container: webm + type: movie + +? The.Stranger.1946.US.(Kino.Classics).Bluray.1080p.LPCM.DD-2.0.x264-Grym@BTNET +: title: The Stranger + year: 1946 + country: US + source: Blu-ray + screen_size: 1080p + audio_codec: [LPCM, Dolby Digital] + audio_channels: '2.0' + video_codec: H.264 + release_group: Grym@BTNET + type: movie + +? X-Men.Apocalypse.2016.complete.hdts.pcm.TrueFrench-Scarface45.avi +: title: X-Men Apocalypse + year: 2016 + other: Complete + source: HD Telesync + audio_codec: PCM + language: french + release_group: Scarface45 + container: avi + type: movie + +? Tears.of.Steel.2012.2160p.DMRip.Eng.HDCLUB.mkv +: title: Tears of Steel + year: 2012 + screen_size: 2160p + source: Digital Master + other: Rip + language: english + release_group: HDCLUB + container: mkv + type: movie + +? "/Movies/Open Season 2 (2008)/Open Season 2 (2008) - Bluray-1080p.x264.DTS.mkv" +: options: --type movie + title: Open Season 2 + year: 2008 + source: Blu-ray + screen_size: 1080p + video_codec: H.264 + audio_codec: DTS + container: mkv + type: movie + +? Re-Animator.1985.INTEGRAL VERSION LIMITED EDITION.1080p.BluRay.REMUX.AVC.DTS-HD MA 5.1-LAZY +: title: Re-Animator + year: 1985 + edition: Limited + screen_size: 1080p + source: Blu-ray + other: Remux + video_codec: H.264 + audio_codec: DTS-HD + audio_profile: Master Audio + audio_channels: '5.1' + release_group: LAZY + type: movie + +? Test (2013) [WEBDL-1080p] [x264 AC3] [ENG+RU+PT] [NTb].mkv +: title: Test + year: 2013 + source: Web + screen_size: 1080p + video_codec: H.264 + audio_codec: Dolby Digital + language: [en, ru, pt] + release_group: NTb + container: mkv + type: movie + +? "[nextorrent.org] Bienvenue.Au.Gondwana.2016.FRENCH.DVDRiP.XViD-AViTECH.avi" +: website: nextorrent.org + title: Bienvenue Au Gondwana + year: 2016 + language: french + source: DVD + other: Rip + video_codec: Xvid + release_group: AViTECH + container: avi + type: movie + +? Star Trek First Contact (1996) Blu-Ray 1080p24 H.264 TrueHD 5.1 CtrlHD +: title: Star Trek First Contact + year: 1996 + source: Blu-ray + screen_size: 1080p + frame_rate: 24fps + video_codec: H.264 + audio_codec: Dolby TrueHD + audio_channels: '5.1' + release_group: CtrlHD + type: movie + +? The.Hobbit.The.Desolation.of.Smaug.Extended.HFR.48fps.ITA.ENG.AC3.BDRip.1080p.x264_ZMachine.mkv +: title: The Hobbit The Desolation of Smaug + edition: Extended + other: [High Frame Rate, Rip] + frame_rate: 48fps + language: [it, en] + audio_codec: Dolby Digital + source: Blu-ray + screen_size: 1080p + video_codec: H.264 + release_group: ZMachine + container: mkv + type: movie + +? Test (2013) [WEBDL-1080p] [x264 AC3] [ENG+PT+DE] [STANDARD] +: title: Test + year: 2013 + source: Web + screen_size: 1080p + video_codec: H.264 + audio_codec: Dolby Digital + language: [en, pt, de] + release_group: STANDARD + type: movie + +? Test (2013) [WEBDL-1080p] [x264 AC3] [ENG+DE+IT] [STANDARD] +: title: Test + year: 2013 + source: Web + screen_size: 1080p + video_codec: H.264 + audio_codec: Dolby Digital + language: [en, de, it] + release_group: STANDARD + type: movie + +? Ant-Man.and.the.Wasp.2018.Digital.Extras.1080p.AMZN.WEB-DL.DDP5.1.H.264-NTG.mkv +: title: Ant-Man and the Wasp + year: 2018 + other: Extras + screen_size: 1080p + streaming_service: Amazon Prime + source: Web + audio_codec: Dolby Digital Plus + audio_channels: '5.1' + video_codec: H.264 + release_group: NTG + type: movie + +? Ant-Man.and.the.Wasp.2018.1080p.AMZN.WEB-DL.DDP5.1.H.264-NTG.mkv +: title: Ant-Man and the Wasp + year: 2018 + screen_size: 1080p + streaming_service: Amazon Prime + source: Web + audio_codec: Dolby Digital Plus + audio_channels: '5.1' + video_codec: H.264 + release_group: NTG + type: movie + +? Avengers.Infinity.War.2018.3D.Hybrid.REPACK.1080p.BluRay.REMUX.AVC.Atmos-EPSiLON.mk3d +: title: Avengers Infinity War + year: 2018 + other: + - 3D + - Proper + - Remux + proper_count: 1 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + audio_codec: Dolby Atmos + release_group: EPSiLON + container: mk3d + type: movie + +? Ouija.Seance.The.Final.Game.2018.1080p.WEB-DL.DD5.1.H264-CMRG +: title: Ouija Seance The Final Game + year: 2018 + screen_size: 1080p + source: Web + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: CMRG + type: movie + +? The.Girl.in.the.Spiders.Web.2019.1080p.WEB-DL.x264.AC3-EVO.mkv +: title: The Girl in the Spiders Web + year: 2019 + screen_size: 1080p + source: Web + video_codec: H.264 + audio_codec: Dolby Digital + release_group: EVO + container: mkv + type: movie diff --git a/lib/guessit/test/rules/__init__.py b/lib/guessit/test/rules/__init__.py new file mode 100644 index 00000000..e5be370e --- /dev/null +++ b/lib/guessit/test/rules/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name diff --git a/lib/guessit/test/rules/audio_codec.yml b/lib/guessit/test/rules/audio_codec.yml new file mode 100644 index 00000000..9e381c34 --- /dev/null +++ b/lib/guessit/test/rules/audio_codec.yml @@ -0,0 +1,134 @@ +# Multiple input strings having same expected results can be chained. +# Use $ marker to check inputs that should not match results. + + +? +MP3 +? +lame +? +lame3.12 +? +lame3.100 +: audio_codec: MP3 + +? +MP2 +: audio_codec: MP2 + +? +DolbyDigital +? +DD +? +Dolby Digital +? +AC3 +: audio_codec: Dolby Digital + +? +DDP +? +DD+ +? +EAC3 +: audio_codec: Dolby Digital Plus + +? +DolbyAtmos +? +Dolby Atmos +? +Atmos +? -Atmosphere +: audio_codec: Dolby Atmos + +? +AAC +: audio_codec: AAC + +? +Flac +: audio_codec: FLAC + +? +DTS +: audio_codec: DTS + +? +True-HD +? +trueHD +: audio_codec: Dolby TrueHD + +? +True-HD51 +? +trueHD51 +: audio_codec: Dolby TrueHD + audio_channels: '5.1' + +? +DTSHD +? +DTS HD +? +DTS-HD +: audio_codec: DTS-HD + +? +DTS-HDma +? +DTSMA +: audio_codec: DTS-HD + audio_profile: Master Audio + +? +AC3-hq +: audio_codec: Dolby Digital + audio_profile: High Quality + +? +AAC-HE +: audio_codec: AAC + audio_profile: High Efficiency + +? +AAC-LC +: audio_codec: AAC + audio_profile: Low Complexity + +? +AAC2.0 +? +AAC20 +: audio_codec: AAC + audio_channels: '2.0' + +? +7.1 +? +7ch +? +8ch +: audio_channels: '7.1' + +? +5.1 +? +5ch +? +6ch +: audio_channels: '5.1' + +? +2ch +? +2.0 +? +stereo +: audio_channels: '2.0' + +? +1ch +? +mono +: audio_channels: '1.0' + +? DD5.1 +? DD51 +: audio_codec: Dolby Digital + audio_channels: '5.1' + +? -51 +: audio_channels: '5.1' + +? DTS-HD.HRA +? DTSHD.HRA +? DTS-HD.HR +? DTSHD.HR +? -HRA +? -HR +: audio_codec: DTS-HD + audio_profile: High Resolution Audio + +? DTSES +? DTS-ES +? -ES +: audio_codec: DTS + audio_profile: Extended Surround + +? DD-EX +? DDEX +? -EX +: audio_codec: Dolby Digital + audio_profile: EX + +? OPUS +: audio_codec: Opus + +? Vorbis +: audio_codec: Vorbis + +? PCM +: audio_codec: PCM + +? LPCM +: audio_codec: LPCM diff --git a/lib/guessit/test/rules/bonus.yml b/lib/guessit/test/rules/bonus.yml new file mode 100644 index 00000000..6ef6f5b2 --- /dev/null +++ b/lib/guessit/test/rules/bonus.yml @@ -0,0 +1,9 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Movie Title-x01-Other Title.mkv +? Movie Title-x01-Other Title +? directory/Movie Title-x01-Other Title/file.mkv +: title: Movie Title + bonus_title: Other Title + bonus: 1 + diff --git a/lib/guessit/test/rules/cds.yml b/lib/guessit/test/rules/cds.yml new file mode 100644 index 00000000..d76186c6 --- /dev/null +++ b/lib/guessit/test/rules/cds.yml @@ -0,0 +1,10 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? cd 1of3 +: cd: 1 + cd_count: 3 + +? Some.Title-DVDRIP-x264-CDP +: cd: !!null + release_group: CDP + video_codec: H.264 diff --git a/lib/guessit/test/rules/common_words.yml b/lib/guessit/test/rules/common_words.yml new file mode 100644 index 00000000..d403a457 --- /dev/null +++ b/lib/guessit/test/rules/common_words.yml @@ -0,0 +1,467 @@ +? is +: title: is + +? it +: title: it + +? am +: title: am + +? mad +: title: mad + +? men +: title: men + +? man +: title: man + +? run +: title: run + +? sin +: title: sin + +? st +: title: st + +? to +: title: to + +? 'no' +: title: 'no' + +? non +: title: non + +? war +: title: war + +? min +: title: min + +? new +: title: new + +? car +: title: car + +? day +: title: day + +? bad +: title: bad + +? bat +: title: bat + +? fan +: title: fan + +? fry +: title: fry + +? cop +: title: cop + +? zen +: title: zen + +? gay +: title: gay + +? fat +: title: fat + +? one +: title: one + +? cherokee +: title: cherokee + +? got +: title: got + +? an +: title: an + +? as +: title: as + +? cat +: title: cat + +? her +: title: her + +? be +: title: be + +? hat +: title: hat + +? sun +: title: sun + +? may +: title: may + +? my +: title: my + +? mr +: title: mr + +? rum +: title: rum + +? pi +: title: pi + +? bb +: title: bb + +? bt +: title: bt + +? tv +: title: tv + +? aw +: title: aw + +? by +: title: by + +? md +: other: Mic Dubbed + +? mp +: title: mp + +? cd +: title: cd + +? in +: title: in + +? ad +: title: ad + +? ice +: title: ice + +? ay +: title: ay + +? at +: title: at + +? star +: title: star + +? so +: title: so + +? he +: title: he + +? do +: title: do + +? ax +: title: ax + +? mx +: title: mx + +? bas +: title: bas + +? de +: title: de + +? le +: title: le + +? son +: title: son + +? ne +: title: ne + +? ca +: title: ca + +? ce +: title: ce + +? et +: title: et + +? que +: title: que + +? mal +: title: mal + +? est +: title: est + +? vol +: title: vol + +? or +: title: or + +? mon +: title: mon + +? se +: title: se + +? je +: title: je + +? tu +: title: tu + +? me +: title: me + +? ma +: title: ma + +? va +: title: va + +? au +: country: AU + +? lu +: title: lu + +? wa +: title: wa + +? ga +: title: ga + +? ao +: title: ao + +? la +: title: la + +? el +: title: el + +? del +: title: del + +? por +: title: por + +? mar +: title: mar + +? al +: title: al + +? un +: title: un + +? ind +: title: ind + +? arw +: title: arw + +? ts +: source: Telesync + +? ii +: title: ii + +? bin +: title: bin + +? chan +: title: chan + +? ss +: title: ss + +? san +: title: san + +? oss +: title: oss + +? iii +: title: iii + +? vi +: title: vi + +? ben +: title: ben + +? da +: title: da + +? lt +: title: lt + +? ch +: title: ch + +? sr +: title: sr + +? ps +: title: ps + +? cx +: title: cx + +? vo +: title: vo + +? mkv +: container: mkv + +? avi +: container: avi + +? dmd +: title: dmd + +? the +: title: the + +? dis +: title: dis + +? cut +: title: cut + +? stv +: title: stv + +? des +: title: des + +? dia +: title: dia + +? and +: title: and + +? cab +: title: cab + +? sub +: title: sub + +? mia +: title: mia + +? rim +: title: rim + +? las +: title: las + +? une +: title: une + +? par +: title: par + +? srt +: container: srt + +? ano +: title: ano + +? toy +: title: toy + +? job +: title: job + +? gag +: title: gag + +? reel +: title: reel + +? www +: title: www + +? for +: title: for + +? ayu +: title: ayu + +? csi +: title: csi + +? ren +: title: ren + +? moi +: title: moi + +? sur +: title: sur + +? fer +: title: fer + +? fun +: title: fun + +? two +: title: two + +? big +: title: big + +? psy +: title: psy + +? air +: title: air + +? brazil +: title: brazil + +? jordan +: title: jordan + +? bs +: title: bs + +? kz +: title: kz + +? gt +: title: gt + +? im +: title: im + +? pt +: language: pt + +? scr +: title: scr + +? sd +: title: sd + +? hr +: other: High Resolution diff --git a/lib/guessit/test/rules/country.yml b/lib/guessit/test/rules/country.yml new file mode 100644 index 00000000..b3d4d8f1 --- /dev/null +++ b/lib/guessit/test/rules/country.yml @@ -0,0 +1,13 @@ +# Multiple input strings having same expected results can be chained. +# Use $ marker to check inputs that should not match results. +? Us.this.is.title +? this.is.title.US +: country: US + title: this is title + +? This.is.Us +: title: This is Us + +? This.Is.Us +: options: --no-default-config + title: This Is Us diff --git a/lib/guessit/test/rules/date.yml b/lib/guessit/test/rules/date.yml new file mode 100644 index 00000000..d7379f03 --- /dev/null +++ b/lib/guessit/test/rules/date.yml @@ -0,0 +1,50 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +09.03.08 +? +09.03.2008 +? +2008.03.09 +: date: 2008-03-09 + +? +31.01.15 +? +31.01.2015 +? +15.01.31 +? +2015.01.31 +: date: 2015-01-31 + +? +01.02.03 +: date: 2003-02-01 + +? +01.02.03 +: options: --date-year-first + date: 2001-02-03 + +? +01.02.03 +: options: --date-day-first + date: 2003-02-01 + +? 1919 +? 2030 +: !!map {} + +? 2029 +: year: 2029 + +? (1920) +: year: 1920 + +? 2012 +: year: 2012 + +? 2011 2013 (2012) (2015) # first marked year is guessed. +: title: "2011 2013" + year: 2012 + +? 2012 2009 S01E02 2015 # If no year is marked, the second one is guessed. +: title: "2012" + year: 2009 + episode_title: "2015" + +? Something 2 mar 2013) +: title: Something + date: 2013-03-02 + type: episode diff --git a/lib/guessit/test/rules/edition.yml b/lib/guessit/test/rules/edition.yml new file mode 100644 index 00000000..4b7fd986 --- /dev/null +++ b/lib/guessit/test/rules/edition.yml @@ -0,0 +1,63 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Director's cut +? Edition Director's cut +: edition: Director's Cut + +? Collector +? Collector Edition +? Edition Collector +: edition: Collector + +? Special Edition +? Edition Special +? -Special +: edition: Special + +? Criterion Edition +? Edition Criterion +? CC +? -Criterion +: edition: Criterion + +? Deluxe +? Deluxe Edition +? Edition Deluxe +: edition: Deluxe + +? Super Movie Alternate XViD +? Super Movie Alternative XViD +? Super Movie Alternate Cut XViD +? Super Movie Alternative Cut XViD +: edition: Alternative Cut + +? ddc +: edition: Director's Definitive Cut + +? IMAX +? IMAX Edition +: edition: IMAX + +? ultimate edition +? -ultimate +: edition: Ultimate + +? ultimate collector edition +? ultimate collector's edition +? ultimate collectors edition +? -collectors edition +? -ultimate edition +: edition: [Ultimate, Collector] + +? ultimate collectors edition dc +: edition: [Ultimate, Collector, Director's Cut] + +? fan edit +? fan edition +? fan collection +: edition: Fan + +? ultimate fan edit +? ultimate fan edition +? ultimate fan collection +: edition: [Ultimate, Fan] diff --git a/lib/guessit/test/rules/episodes.yml b/lib/guessit/test/rules/episodes.yml new file mode 100644 index 00000000..44e06a3b --- /dev/null +++ b/lib/guessit/test/rules/episodes.yml @@ -0,0 +1,331 @@ +# Multiple input strings having same expected results can be chained. +# Use $ marker to check inputs that should not match results. +? +2x5 +? +2X5 +? +02x05 +? +2X05 +? +02x5 +? S02E05 +? s02e05 +? s02e5 +? s2e05 +? s02ep05 +? s2EP5 +? -s03e05 +? -s02e06 +? -3x05 +? -2x06 +: season: 2 + episode: 5 + +? "+0102" +? "+102" +: season: 1 + episode: 2 + +? "0102 S03E04" +? "S03E04 102" +: season: 3 + episode: 4 + +? +serie Saison 2 other +? +serie Season 2 other +? +serie Saisons 2 other +? +serie Seasons 2 other +? +serie Season Two other +? +serie Season II other +: season: 2 + +? Some Series.S02E01.Episode.title.mkv +? Some Series/Season 02/E01-Episode title.mkv +? Some Series/Season 02/Some Series-E01-Episode title.mkv +? Some Dummy Directory/Season 02/Some Series-E01-Episode title.mkv +? -Some Dummy Directory/Season 02/E01-Episode title.mkv +? Some Series/Unsafe Season 02/Some Series-E01-Episode title.mkv +? -Some Series/Unsafe Season 02/E01-Episode title.mkv +? Some Series/Season 02/E01-Episode title.mkv +? Some Series/ Season 02/E01-Episode title.mkv +? Some Dummy Directory/Some Series S02/E01-Episode title.mkv +? Some Dummy Directory/S02 Some Series/E01-Episode title.mkv +: title: Some Series + episode_title: Episode title + season: 2 + episode: 1 + +? Some Series.S02E01.mkv +? Some Series/Season 02/E01.mkv +? Some Series/Season 02/Some Series-E01.mkv +? Some Dummy Directory/Season 02/Some Series-E01.mkv +? -Some Dummy Directory/Season 02/E01.mkv +? Some Series/Unsafe Season 02/Some Series-E01.mkv +? -Some Series/Unsafe Season 02/E01.mkv +? Some Series/Season 02/E01.mkv +? Some Series/ Season 02/E01.mkv +? Some Dummy Directory/Some Series S02/E01-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA.mkv +: title: Some Series + season: 2 + episode: 1 + +? Some Series S03E01E02 +: title: Some Series + season: 3 + episode: [1, 2] + +? Some Series S01S02S03 +? Some Series S01-02-03 +? Some Series S01 S02 S03 +? Some Series S01 02 03 +: title: Some Series + season: [1, 2, 3] + +? Some Series E01E02E03 +? Some Series E01-02-03 +? Some Series E01-03 +? Some Series E01 E02 E03 +? Some Series E01 02 03 +: title: Some Series + episode: [1, 2, 3] + +? Some Series E01E02E04 +? Some Series E01 E02 E04 +? Some Series E01 02 04 +: title: Some Series + episode: [1, 2, 4] + +? Some Series E01-02-04 +? Some Series E01-04 +? Some Series E01-04 +: title: Some Series + episode: [1, 2, 3, 4] + +? Some Series E01-02-E04 +: title: Some Series + episode: [1, 2, 3, 4] + +? Episode 3 +? -Episode III +: episode: 3 + +? Episode 3 +? Episode III +: options: -t episode + episode: 3 + +? -A very special movie +: episode_details: Special + +? -A very special episode +: options: -t episode + episode_details: Special + +? A very special episode s06 special +: options: -t episode + title: A very special episode + episode_details: Special + +? 12 Monkeys\Season 01\Episode 05\12 Monkeys - S01E05 - The Night Room.mkv +: container: mkv + title: 12 Monkeys + episode: 5 + season: 1 + +? S03E02.X.1080p +: episode: 2 + screen_size: 1080p + season: 3 + +? Something 1 x 2-FlexGet +: options: -t episode + title: Something + season: 1 + episode: 2 + episode_title: FlexGet + +? Show.Name.-.Season.1.to.3.-.Mp4.1080p +? Show.Name.-.Season.1~3.-.Mp4.1080p +? Show.Name.-.Saison.1.a.3.-.Mp4.1080p +: container: mp4 + screen_size: 1080p + season: + - 1 + - 2 + - 3 + title: Show Name + +? Show.Name.Season.1.3&5.HDTV.XviD-GoodGroup[SomeTrash] +? Show.Name.Season.1.3 and 5.HDTV.XviD-GoodGroup[SomeTrash] +: source: HDTV + release_group: GoodGroup[SomeTrash] + season: + - 1 + - 3 + - 5 + title: Show Name + type: episode + video_codec: Xvid + +? Show.Name.Season.1.2.3-5.HDTV.XviD-GoodGroup[SomeTrash] +? Show.Name.Season.1.2.3~5.HDTV.XviD-GoodGroup[SomeTrash] +? Show.Name.Season.1.2.3 to 5.HDTV.XviD-GoodGroup[SomeTrash] +: source: HDTV + release_group: GoodGroup[SomeTrash] + season: + - 1 + - 2 + - 3 + - 4 + - 5 + title: Show Name + type: episode + video_codec: Xvid + +? The.Get.Down.S01EP01.FRENCH.720p.WEBRIP.XVID-STR +: episode: 1 + source: Web + other: Rip + language: fr + release_group: STR + screen_size: 720p + season: 1 + title: The Get Down + type: episode + video_codec: Xvid + +? My.Name.Is.Earl.S01E01-S01E21.SWE-SUB +: episode: + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 20 + - 21 + season: 1 + subtitle_language: sv + title: My Name Is Earl + type: episode + +? Show.Name.Season.4.Episodes.1-12 +: episode: + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + season: 4 + title: Show Name + type: episode + +? show name s01.to.s04 +: season: + - 1 + - 2 + - 3 + - 4 + title: show name + type: episode + +? epi +: options: -t episode + title: epi + +? Episode20 +? Episode 20 +: episode: 20 + +? Episode50 +? Episode 50 +: episode: 50 + +? Episode51 +? Episode 51 +: episode: 51 + +? Episode70 +? Episode 70 +: episode: 70 + +? Episode71 +? Episode 71 +: episode: 71 + +? S01D02.3-5-GROUP +: disc: [2, 3, 4, 5] + +? S01D02&4-6&8 +: disc: [2, 4, 5, 6, 8] + +? Something.4x05-06 +? Something - 4x05-06 +? Something:4x05-06 +? Something 4x05-06 +? Something-4x05-06 +: title: Something + season: 4 + episode: + - 5 + - 6 + +? Something.4x05-06 +? Something - 4x05-06 +? Something:4x05-06 +? Something 4x05-06 +? Something-4x05-06 +: options: -T something + title: something + season: 4 + episode: + - 5 + - 6 + +? Colony 23/S01E01.Some.title.mkv +: title: Colony 23 + season: 1 + episode: 1 + episode_title: Some title + +? Show.Name.E02.2010.mkv +: options: -t episode + title: Show Name + year: 2010 + episode: 2 + +? Show.Name.E02.S2010.mkv +: options: -t episode + title: Show Name + year: 2010 + season: 2010 + episode: 2 + + +? Show.Name.E02.2010.mkv +: title: Show Name + year: 2010 + episode: 2 + +? Show.Name.E02.S2010.mkv +: title: Show Name + year: 2010 + season: 2010 + episode: 2 diff --git a/lib/guessit/test/rules/film.yml b/lib/guessit/test/rules/film.yml new file mode 100644 index 00000000..1f774331 --- /dev/null +++ b/lib/guessit/test/rules/film.yml @@ -0,0 +1,9 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Film Title-f01-Series Title.mkv +? Film Title-f01-Series Title +? directory/Film Title-f01-Series Title/file.mkv +: title: Series Title + film_title: Film Title + film: 1 + diff --git a/lib/guessit/test/rules/language.yml b/lib/guessit/test/rules/language.yml new file mode 100644 index 00000000..10e5b9c0 --- /dev/null +++ b/lib/guessit/test/rules/language.yml @@ -0,0 +1,47 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +English +? .ENG. +: language: English + +? +French +: language: French + +? +SubFrench +? +SubFr +? +STFr +? ST.FR +: subtitle_language: French + +? +ENG.-.sub.FR +? ENG.-.FR Sub +? +ENG.-.SubFR +? +ENG.-.FRSUB +? +ENG.-.FRSUBS +? +ENG.-.FR-SUBS +: language: English + subtitle_language: French + +? "{Fr-Eng}.St{Fr-Eng}" +? "Le.Prestige[x264.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv" +: language: [French, English] + subtitle_language: [French, English] + +? +ENG.-.sub.SWE +? ENG.-.SWE Sub +? +ENG.-.SubSWE +? +ENG.-.SWESUB +? +ENG.-.sub.SV +? ENG.-.SV Sub +? +ENG.-.SubSV +? +ENG.-.SVSUB +: language: English + subtitle_language: Swedish + +? The English Patient (1996) +: title: The English Patient + -language: english + +? French.Kiss.1995.1080p +: title: French Kiss + -language: french diff --git a/lib/guessit/test/rules/other.yml b/lib/guessit/test/rules/other.yml new file mode 100644 index 00000000..447f1787 --- /dev/null +++ b/lib/guessit/test/rules/other.yml @@ -0,0 +1,169 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +DVDSCR +? +DVDScreener +? +DVD-SCR +? +DVD Screener +? +DVD AnythingElse Screener +? -DVD AnythingElse SCR +: other: Screener + +? +AudioFix +? +AudioFixed +? +Audio Fix +? +Audio Fixed +: other: Audio Fixed + +? +SyncFix +? +SyncFixed +? +Sync Fix +? +Sync Fixed +: other: Sync Fixed + +? +DualAudio +? +Dual Audio +: other: Dual Audio + +? +ws +? +WideScreen +? +Wide Screen +: other: Widescreen + +# Fix must be surround by others properties to be matched. +? DVD.fix.XViD +? -DVD.Fix +? -Fix.XViD +: other: Fix + -proper_count: 1 + +? -DVD.BlablaBla.Fix.Blablabla.XVID +? -DVD.BlablaBla.Fix.XVID +? -DVD.Fix.Blablabla.XVID +: other: Fix + -proper_count: 1 + + +? DVD.Real.PROPER.REPACK +: other: Proper + proper_count: 3 + + +? Proper.720p +? +Repack +? +Rerip +: other: Proper + proper_count: 1 + +? XViD.Fansub +: other: Fan Subtitled + +? XViD.Fastsub +: other: Fast Subtitled + +? +Season Complete +? -Complete +: other: Complete + +? R5 +: other: Region 5 + +? RC +: other: Region C + +? PreAir +? Pre Air +: other: Preair + +? Screener +: other: Screener + +? Remux +: other: Remux + +? 3D.2019 +: other: 3D + +? HD +: other: HD + +? FHD +? FullHD +? Full HD +: other: Full HD + +? UHD +? Ultra +? UltraHD +? Ultra HD +: other: Ultra HD + +? mHD # ?? +? HDLight +: other: Micro HD + +? HQ +: other: High Quality + +? hr +: other: High Resolution + +? PAL +: other: PAL + +? SECAM +: other: SECAM + +? NTSC +: other: NTSC + +? LDTV +: other: Low Definition + +? LD +: other: Line Dubbed + +? MD +: other: Mic Dubbed + +? -The complete movie +: other: Complete + +? +The complete movie +: title: The complete movie + +? +AC3-HQ +: audio_profile: High Quality + +? Other-HQ +: other: High Quality + +? reenc +? re-enc +? re-encoded +? reencoded +: other: Reencoded + +? CONVERT XViD +: other: Converted + +? +HDRIP # it's a Rip from non specified HD source +: other: [HD, Rip] + +? SDR +: other: Standard Dynamic Range + +? HDR +? HDR10 +? -HDR100 +: other: HDR10 + +? BT2020 +? BT.2020 +? -BT.20200 +? -BT.2021 +: other: BT.2020 + +? Upscaled +? Upscale +: other: Upscaled + diff --git a/lib/guessit/test/rules/part.yml b/lib/guessit/test/rules/part.yml new file mode 100644 index 00000000..72f3d98a --- /dev/null +++ b/lib/guessit/test/rules/part.yml @@ -0,0 +1,18 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Filename Part 3.mkv +? Filename Part III.mkv +? Filename Part Three.mkv +? Filename Part Trois.mkv +: title: Filename + part: 3 + +? Part 3 +? Part III +? Part Three +? Part Trois +? Part3 +: part: 3 + +? -Something.Apt.1 +: part: 1 \ No newline at end of file diff --git a/lib/guessit/test/rules/processors.yml b/lib/guessit/test/rules/processors.yml new file mode 100644 index 00000000..ee906b2c --- /dev/null +++ b/lib/guessit/test/rules/processors.yml @@ -0,0 +1,8 @@ +# Multiple input strings having same expected results can be chained. +# Use $ marker to check inputs that should not match results. + +# Prefer information for last path. +? Some movie (2000)/Some movie (2001).mkv +? Some movie (2001)/Some movie.mkv +: year: 2001 + container: mkv diff --git a/lib/guessit/test/rules/processors_test.py b/lib/guessit/test/rules/processors_test.py new file mode 100644 index 00000000..c22e968c --- /dev/null +++ b/lib/guessit/test/rules/processors_test.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name, pointless-string-statement + +from rebulk.match import Matches, Match + +from ...rules.processors import StripSeparators + + +def test_strip_separators(): + strip_separators = StripSeparators() + + matches = Matches() + + m = Match(3, 11, input_string="pre.ABCDEF.post") + + assert m.raw == '.ABCDEF.' + matches.append(m) + + returned_matches = strip_separators.when(matches, None) + assert returned_matches == matches + + strip_separators.then(matches, returned_matches, None) + + assert m.raw == 'ABCDEF' + + +def test_strip_separators_keep_acronyms(): + strip_separators = StripSeparators() + + matches = Matches() + + m = Match(0, 13, input_string=".S.H.I.E.L.D.") + m2 = Match(0, 22, input_string=".Agent.Of.S.H.I.E.L.D.") + + assert m.raw == '.S.H.I.E.L.D.' + matches.append(m) + matches.append(m2) + + returned_matches = strip_separators.when(matches, None) + assert returned_matches == matches + + strip_separators.then(matches, returned_matches, None) + + assert m.raw == '.S.H.I.E.L.D.' + assert m2.raw == 'Agent.Of.S.H.I.E.L.D.' diff --git a/lib/guessit/test/rules/release_group.yml b/lib/guessit/test/rules/release_group.yml new file mode 100644 index 00000000..c96383e9 --- /dev/null +++ b/lib/guessit/test/rules/release_group.yml @@ -0,0 +1,71 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Some.Title.XViD-ReleaseGroup +? Some.Title.XViD-ReleaseGroup.mkv +: release_group: ReleaseGroup + +? Some.Title.XViD-by.Artik[SEDG].avi +: release_group: Artik[SEDG] + +? "[ABC] Some.Title.avi" +? some/folder/[ABC]Some.Title.avi +: release_group: ABC + +? "[ABC] Some.Title.XViD-GRP.avi" +? some/folder/[ABC]Some.Title.XViD-GRP.avi +: release_group: GRP + +? "[ABC] Some.Title.S01E02.avi" +? some/folder/[ABC]Some.Title.S01E02.avi +: release_group: ABC + +? Some.Title.XViD-S2E02.NoReleaseGroup.avi +: release_group: !!null + +? Test.S01E01-FooBar-Group +: options: -G group -G xxxx + episode: 1 + episode_title: FooBar + release_group: Group + season: 1 + title: Test + type: episode + +? Test.S01E01-FooBar-Group +: options: -G re:gr.?up -G xxxx + episode: 1 + episode_title: FooBar + release_group: Group + season: 1 + title: Test + type: episode + +? Show.Name.x264-byEMP +: title: Show Name + video_codec: H.264 + release_group: byEMP + +? Show.Name.x264-NovaRip +: title: Show Name + video_codec: H.264 + release_group: NovaRip + +? Show.Name.x264-PARTiCLE +: title: Show Name + video_codec: H.264 + release_group: PARTiCLE + +? Show.Name.x264-POURMOi +: title: Show Name + video_codec: H.264 + release_group: POURMOi + +? Show.Name.x264-RipPourBox +: title: Show Name + video_codec: H.264 + release_group: RipPourBox + +? Show.Name.x264-RiPRG +: title: Show Name + video_codec: H.264 + release_group: RiPRG diff --git a/lib/guessit/test/rules/screen_size.yml b/lib/guessit/test/rules/screen_size.yml new file mode 100644 index 00000000..25d8374f --- /dev/null +++ b/lib/guessit/test/rules/screen_size.yml @@ -0,0 +1,280 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +360p +? +360px +? -360 +? +500x360 +? -250x360 +: screen_size: 360p + +? +640x360 +? -640x360i +? -684x360i +: screen_size: 360p + aspect_ratio: 1.778 + +? +360i +: screen_size: 360i + +? +480x360i +? -480x360p +? -450x360 +: screen_size: 360i + aspect_ratio: 1.333 + +? +368p +? +368px +? -368i +? -368 +? +500x368 +: screen_size: 368p + +? -490x368 +? -700x368 +: screen_size: 368p + +? +492x368p +: screen_size: + aspect_ratio: 1.337 + +? +654x368 +: screen_size: 368p + aspect_ratio: 1.777 + +? +698x368 +: screen_size: 368p + aspect_ratio: 1.897 + +? +368i +: -screen_size: 368i + +? +480p +? +480px +? -480i +? -480 +? -500x480 +? -638x480 +? -920x480 +: screen_size: 480p + +? +640x480 +: screen_size: 480p + aspect_ratio: 1.333 + +? +852x480 +: screen_size: 480p + aspect_ratio: 1.775 + +? +910x480 +: screen_size: 480p + aspect_ratio: 1.896 + +? +500x480 +? +500 x 480 +? +500 * 480 +? +500x480p +? +500X480i +: screen_size: 500x480 + aspect_ratio: 1.042 + +? +480i +? +852x480i +: screen_size: 480i + +? +576p +? +576px +? -576i +? -576 +? -500x576 +? -766x576 +? -1094x576 +: screen_size: 576p + +? +768x576 +: screen_size: 576p + aspect_ratio: 1.333 + +? +1024x576 +: screen_size: 576p + aspect_ratio: 1.778 + +? +1092x576 +: screen_size: 576p + aspect_ratio: 1.896 + +? +500x576 +: screen_size: 500x576 + aspect_ratio: 0.868 + +? +576i +: screen_size: 576i + +? +720p +? +720px +? -720i +? 720hd +? 720pHD +? -720 +? -500x720 +? -950x720 +? -1368x720 +: screen_size: 720p + +? +960x720 +: screen_size: 720p + aspect_ratio: 1.333 + +? +1280x720 +: screen_size: 720p + aspect_ratio: 1.778 + +? +1366x720 +: screen_size: 720p + aspect_ratio: 1.897 + +? +500x720 +: screen_size: 500x720 + aspect_ratio: 0.694 + +? +900p +? +900px +? -900i +? -900 +? -500x900 +? -1198x900 +? -1710x900 +: screen_size: 900p + +? +1200x900 +: screen_size: 900p + aspect_ratio: 1.333 + +? +1600x900 +: screen_size: 900p + aspect_ratio: 1.778 + +? +1708x900 +: screen_size: 900p + aspect_ratio: 1.898 + +? +500x900 +? +500x900p +? +500x900i +: screen_size: 500x900 + aspect_ratio: 0.556 + +? +900i +: screen_size: 900i + +? +1080p +? +1080px +? +1080hd +? +1080pHD +? -1080i +? -1080 +? -500x1080 +? -1438x1080 +? -2050x1080 +: screen_size: 1080p + +? +1440x1080 +: screen_size: 1080p + aspect_ratio: 1.333 + +? +1920x1080 +: screen_size: 1080p + aspect_ratio: 1.778 + +? +2048x1080 +: screen_size: 1080p + aspect_ratio: 1.896 + +? +1080i +? -1080p +: screen_size: 1080i + +? 1440p +: screen_size: 1440p + +? +500x1080 +: screen_size: 500x1080 + aspect_ratio: 0.463 + +? +2160p +? +2160px +? -2160i +? -2160 +? +4096x2160 +? +4k +? -2878x2160 +? -4100x2160 +: screen_size: 2160p + +? +2880x2160 +: screen_size: 2160p + aspect_ratio: 1.333 + +? +3840x2160 +: screen_size: 2160p + aspect_ratio: 1.778 + +? +4098x2160 +: screen_size: 2160p + aspect_ratio: 1.897 + +? +500x2160 +: screen_size: 500x2160 + aspect_ratio: 0.231 + +? +4320p +? +4320px +? -4320i +? -4320 +? -5758x2160 +? -8198x2160 +: screen_size: 4320p + +? +5760x4320 +: screen_size: 4320p + aspect_ratio: 1.333 + +? +7680x4320 +: screen_size: 4320p + aspect_ratio: 1.778 + +? +8196x4320 +: screen_size: 4320p + aspect_ratio: 1.897 + +? +500x4320 +: screen_size: 500x4320 + aspect_ratio: 0.116 + +? Test.File.720hd.bluray +? Test.File.720p24 +? Test.File.720p30 +? Test.File.720p50 +? Test.File.720p60 +? Test.File.720p120 +: screen_size: 720p + +? Test.File.400p +: options: + advanced_config: + screen_size: + progressive: ["400"] + screen_size: 400p + +? Test.File2.400p +: options: + advanced_config: + screen_size: + progressive: ["400"] + screen_size: 400p + +? Test.File.720p +: options: + advanced_config: + screen_size: + progressive: ["400"] + screen_size: 720p diff --git a/lib/guessit/test/rules/size.yml b/lib/guessit/test/rules/size.yml new file mode 100644 index 00000000..18b3cd49 --- /dev/null +++ b/lib/guessit/test/rules/size.yml @@ -0,0 +1,8 @@ +? 1.1tb +: size: 1.1TB + +? 123mb +: size: 123MB + +? 4.3gb +: size: 4.3GB diff --git a/lib/guessit/test/rules/source.yml b/lib/guessit/test/rules/source.yml new file mode 100644 index 00000000..cda8f1ac --- /dev/null +++ b/lib/guessit/test/rules/source.yml @@ -0,0 +1,323 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +VHS +? -VHSAnythingElse +? -SomeVHS stuff +? -VH +? -VHx +: source: VHS + -other: Rip + +? +VHSRip +? +VHS-Rip +? +VhS_rip +? +VHS.RIP +? -VHS +? -VHxRip +: source: VHS + other: Rip + +? +Cam +: source: Camera + -other: Rip + +? +CamRip +? +CaM Rip +? +Cam_Rip +? +cam.rip +? -Cam +: source: Camera + other: Rip + +? +HDCam +? +HD-Cam +: source: HD Camera + -other: Rip + +? +HDCamRip +? +HD-Cam.rip +? -HDCam +? -HD-Cam +: source: HD Camera + other: Rip + +? +Telesync +? +TS +: source: Telesync + -other: Rip + +? +TelesyncRip +? +TSRip +? -Telesync +? -TS +: source: Telesync + other: Rip + +? +HD TS +? -Hd.Ts # ts file extension +? -HD.TS # ts file extension +? +Hd-Ts +: source: HD Telesync + -other: Rip + +? +HD TS Rip +? +Hd-Ts-Rip +? -HD TS +? -Hd-Ts +: source: HD Telesync + other: Rip + +? +Workprint +? +workPrint +? +WorkPrint +? +WP +? -Work Print +: source: Workprint + -other: Rip + +? +Telecine +? +teleCine +? +TC +? -Tele Cine +: source: Telecine + -other: Rip + +? +Telecine Rip +? +teleCine-Rip +? +TC-Rip +? -Telecine +? -TC +: source: Telecine + other: Rip + +? +HD-TELECINE +? +HDTC +: source: HD Telecine + -other: Rip + +? +HD-TCRip +? +HD TELECINE RIP +? -HD-TELECINE +? -HDTC +: source: HD Telecine + other: Rip + +? +PPV +: source: Pay-per-view + -other: Rip + +? +ppv-rip +? -PPV +: source: Pay-per-view + other: Rip + +? -TV +? +SDTV +? +TV-Dub +: source: TV + -other: Rip + +? +SDTVRIP +? +Rip sd tv +? +TvRip +? +Rip TV +? -TV +? -SDTV +: source: TV + other: Rip + +? +DVB +? +pdTV +? +Pd Tv +: source: Digital TV + -other: Rip + +? +DVB-Rip +? +DvBRiP +? +pdtvRiP +? +pd tv RiP +? -DVB +? -pdTV +? -Pd Tv +: source: Digital TV + other: Rip + +? +DVD +? +video ts +? +DVDR +? +DVD 9 +? +dvd 5 +? -dvd ts +: source: DVD + -source: Telesync + -other: Rip + +? +DVD-RIP +? -video ts +? -DVD +? -DVDR +? -DVD 9 +? -dvd 5 +: source: DVD + other: Rip + +? +HDTV +: source: HDTV + -other: Rip + +? +tv rip hd +? +HDtv Rip +? -HdRip # it's a Rip from non specified HD source +? -HDTV +: source: HDTV + other: Rip + +? +VOD +: source: Video on Demand + -other: Rip + +? +VodRip +? +vod rip +? -VOD +: source: Video on Demand + other: Rip + +? +webrip +? +Web Rip +? +webdlrip +? +web dl rip +? +webcap +? +web cap +? +webcaprip +? +web cap rip +: source: Web + other: Rip + +? +webdl +? +Web DL +? +webHD +? +WEB hd +? +web +: source: Web + -other: Rip + +? +HDDVD +? +hd dvd +: source: HD-DVD + -other: Rip + +? +hdDvdRip +? -HDDVD +? -hd dvd +: source: HD-DVD + other: Rip + +? +BluRay +? +BD +? +BD5 +? +BD9 +? +BD25 +? +bd50 +: source: Blu-ray + -other: Rip + +? +BR-Scr +? +BR.Screener +: source: Blu-ray + other: [Reencoded, Screener] + -language: pt-BR + +? +BR-Rip +? +BRRip +: source: Blu-ray + other: [Reencoded, Rip] + -language: pt-BR + +? +BluRay rip +? +BDRip +? -BluRay +? -BD +? -BR +? -BR rip +? -BD5 +? -BD9 +? -BD25 +? -bd50 +: source: Blu-ray + other: Rip + +? XVID.NTSC.DVDR.nfo +: source: DVD + -other: Rip + +? +AHDTV +: source: Analog HDTV + -other: Rip + +? +dsr +? +dth +: source: Satellite + -other: Rip + +? +dsrip +? +ds rip +? +dsrrip +? +dsr rip +? +satrip +? +sat rip +? +dthrip +? +dth rip +? -dsr +? -dth +: source: Satellite + other: Rip + +? +UHDTV +: source: Ultra HDTV + -other: Rip + +? +UHDRip +? +UHDTV Rip +? -UHDTV +: source: Ultra HDTV + other: Rip + +? UHD Bluray +? UHD 2160p Bluray +? UHD 8bit Bluray +? UHD HQ 8bit Bluray +? Ultra Bluray +? Ultra HD Bluray +? Bluray ULTRA +? Bluray Ultra HD +? Bluray UHD +? 4K Bluray +? 2160p Bluray +? UHD 10bit HDR Bluray +? UHD HDR10 Bluray +? -HD Bluray +? -AMERICAN ULTRA (2015) 1080p Bluray +? -American.Ultra.2015.BRRip +? -BRRip XviD AC3-ULTRAS +? -UHD Proper Bluray +: source: Ultra HD Blu-ray + +? UHD.BRRip +? UHD.2160p.BRRip +? BRRip.2160p.UHD +? BRRip.[4K-2160p-UHD] +: source: Ultra HD Blu-ray + other: [Reencoded, Rip] + +? UHD.2160p.BDRip +? BDRip.[4K-2160p-UHD] +: source: Ultra HD Blu-ray + other: Rip + +? DM +: source: Digital Master + +? DMRIP +? DM-RIP +: source: Digital Master + other: Rip diff --git a/lib/guessit/test/rules/title.yml b/lib/guessit/test/rules/title.yml new file mode 100644 index 00000000..05c7f208 --- /dev/null +++ b/lib/guessit/test/rules/title.yml @@ -0,0 +1,43 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? Title Only +? -Title XViD 720p Only +? sub/folder/Title Only +? -sub/folder/Title XViD 720p Only +? Title Only.mkv +? Title Only.avi +: title: Title Only + +? Title Only/title_only.mkv +: title: Title Only + +? title_only.mkv +: title: title only + +? Some Title/some.title.mkv +? some.title/Some.Title.mkv +: title: Some Title + +? SOME TITLE/Some.title.mkv +? Some.title/SOME TITLE.mkv +: title: Some title + +? some title/Some.title.mkv +? Some.title/some title.mkv +: title: Some title + +? Some other title/Some.Other.title.mkv +? Some.Other title/Some other title.mkv +: title: Some Other title + +? This T.I.T.L.E. has dots +? This.T.I.T.L.E..has.dots +: title: This T.I.T.L.E has dots + +? This.T.I.T.L.E..has.dots.S01E02.This E.P.T.I.T.L.E.has.dots +: title: This T.I.T.L.E has dots + season: 1 + episode: 2 + episode_title: This E.P.T.I.T.L.E has dots + type: episode + diff --git a/lib/guessit/test/rules/video_codec.yml b/lib/guessit/test/rules/video_codec.yml new file mode 100644 index 00000000..ae43bc43 --- /dev/null +++ b/lib/guessit/test/rules/video_codec.yml @@ -0,0 +1,98 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? rv10 +? rv13 +? RV20 +? Rv30 +? rv40 +? -xrv40 +: video_codec: RealVideo + +? mpeg2 +? MPEG2 +? MPEG-2 +? mpg2 +? H262 +? H.262 +? x262 +? -mpeg +? -xmpeg2 +? -mpeg2x +: video_codec: MPEG-2 + +? DivX +? -div X +? divx +? dvdivx +? DVDivX +: video_codec: DivX + +? XviD +? xvid +? -x vid +: video_codec: Xvid + +? h263 +? x263 +? h.263 +: video_codec: H.263 + +? h264 +? x264 +? h.264 +? x.264 +? AVC +? AVCHD +? -MPEG-4 +? -mpeg4 +? -mpeg +? -h 265 +? -x265 +: video_codec: H.264 + +? h265 +? x265 +? h.265 +? x.265 +? hevc +? -h 264 +? -x264 +: video_codec: H.265 + +? hevc10 +? HEVC-YUV420P10 +: video_codec: H.265 + color_depth: 10-bit + +? h265-HP +: video_codec: H.265 + video_profile: High + +? H.264-SC +: video_codec: H.264 + video_profile: Scalable Video Coding + +? mpeg4-AVC +: video_codec: H.264 + video_profile: Advanced Video Codec High Definition + +? AVCHD-SC +? H.264-AVCHD-SC +: video_codec: H.264 + video_profile: + - Scalable Video Coding + - Advanced Video Codec High Definition + +? VC1 +? VC-1 +: video_codec: VC-1 + +? VP7 +: video_codec: VP7 + +? VP8 +? VP80 +: video_codec: VP8 + +? VP9 +: video_codec: VP9 diff --git a/lib/guessit/test/rules/website.yml b/lib/guessit/test/rules/website.yml new file mode 100644 index 00000000..11d434d2 --- /dev/null +++ b/lib/guessit/test/rules/website.yml @@ -0,0 +1,23 @@ +# Multiple input strings having same expected results can be chained. +# Use - marker to check inputs that should not match results. +? +tvu.org.ru +? -tvu.unsafe.ru +: website: tvu.org.ru + +? +www.nimp.na +? -somewww.nimp.na +? -www.nimp.nawouak +? -nimp.na +: website: www.nimp.na + +? +wawa.co.uk +? -wawa.uk +: website: wawa.co.uk + +? -Dark.Net.S01E06.720p.HDTV.x264-BATV + -Dark.Net.2015.720p.HDTV.x264-BATV +: website: Dark.Net + +? Dark.Net.S01E06.720p.HDTV.x264-BATV + Dark.Net.2015.720p.HDTV.x264-BATV +: title: Dark Net diff --git a/lib/guessit/test/streaming_services.yaml b/lib/guessit/test/streaming_services.yaml new file mode 100644 index 00000000..adf52e71 --- /dev/null +++ b/lib/guessit/test/streaming_services.yaml @@ -0,0 +1,1934 @@ +? House.of.Cards.2013.S02E03.1080p.NF.WEBRip.DD5.1.x264-NTb.mkv +? House.of.Cards.2013.S02E03.1080p.Netflix.WEBRip.DD5.1.x264-NTb.mkv +: title: House of Cards + year: 2013 + season: 2 + episode: 3 + screen_size: 1080p + streaming_service: Netflix + source: Web + other: Rip + audio_channels: "5.1" + audio_codec: Dolby Digital + video_codec: H.264 + release_group: NTb + +? The.Daily.Show.2015.07.01.Kirsten.Gillibrand.Extended.720p.CC.WEBRip.AAC2.0.x264-BTW.mkv +? The.Daily.Show.2015.07.01.Kirsten.Gillibrand.Extended.720p.ComedyCentral.WEBRip.AAC2.0.x264-BTW.mkv +? The.Daily.Show.2015.07.01.Kirsten.Gillibrand.Extended.720p.Comedy.Central.WEBRip.AAC2.0.x264-BTW.mkv +: audio_channels: '2.0' + audio_codec: AAC + date: 2015-07-01 + edition: Extended + source: Web + other: Rip + release_group: BTW + screen_size: 720p + streaming_service: Comedy Central + title: The Daily Show + episode_title: Kirsten Gillibrand + video_codec: H.264 + +? The.Daily.Show.2015.07.01.Kirsten.Gillibrand.Extended.Interview.720p.CC.WEBRip.AAC2.0.x264-BTW.mkv +: audio_channels: '2.0' + audio_codec: AAC + date: 2015-07-01 + source: Web + release_group: BTW + screen_size: 720p + streaming_service: Comedy Central + title: The Daily Show + episode_title: Kirsten Gillibrand Extended Interview + video_codec: H.264 + +? The.Daily.Show.2015.07.02.Sarah.Vowell.CC.WEBRip.AAC2.0.x264-BTW.mkv +: audio_channels: '2.0' + audio_codec: AAC + date: 2015-07-02 + source: Web + release_group: BTW + streaming_service: Comedy Central + title: The Daily Show + episode_title: Sarah Vowell + video_codec: H.264 + +# Streaming service: Amazon +? Show.Name.S07E04.Service.1080p.AMZN.WEBRip.DD+5.1.x264 +? Show.Name.S07E04.Service.1080p.AmazonPrime.WEBRip.DD+5.1.x264 +: title: Show Name + season: 7 + episode: 4 + episode_title: Service + screen_size: 1080p + streaming_service: Amazon Prime + source: Web + other: Rip + audio_codec: Dolby Digital Plus + audio_channels: '5.1' + video_codec: H.264 + type: episode + +# Streaming service: Comedy Central +? Show.Name.2016.09.28.Nice.Title.Extended.1080p.CC.WEBRip.AAC2.0.x264-monkee +: title: Show Name + date: 2016-09-28 + episode_title: Nice Title + edition: Extended + other: Rip + screen_size: 1080p + streaming_service: Comedy Central + source: Web + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: The CW +? Show.Name.US.S12E20.Nice.Title.720p.CW.WEBRip.AAC2.0.x264-monkee +? Show.Name.US.S12E20.Nice.Title.720p.TheCW.WEBRip.AAC2.0.x264-monkee +: title: Show Name + country: US + season: 12 + episode: 20 + episode_title: Nice Title + screen_size: 720p + streaming_service: The CW + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: AMBC +? Show.Name.2016.09.27.Nice.Title.720p.AMBC.WEBRip.AAC2.0.x264-monkee +: title: Show Name + date: 2016-09-27 + episode_title: Nice Title + screen_size: 720p + streaming_service: ABC + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: HIST +? Show.Name.720p.HIST.WEBRip.AAC2.0.H.264-monkee +? Show.Name.720p.History.WEBRip.AAC2.0.H.264-monkee +: options: -t episode + title: Show Name + screen_size: 720p + streaming_service: History + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: PBS +? Show.Name.2015.Nice.Title.1080p.PBS.WEBRip.AAC2.0.H264-monkee +: options: -t episode + title: Show Name + year: 2015 + episode_title: Nice Title + screen_size: 1080p + streaming_service: PBS + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: SeeSo +? Show.Name.2016.Nice.Title.1080p.SESO.WEBRip.AAC2.0.x264-monkee +: options: -t episode + title: Show Name + year: 2016 + episode_title: Nice Title + screen_size: 1080p + streaming_service: SeeSo + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: Discovery +? Show.Name.S01E03.Nice.Title.720p.DISC.WEBRip.AAC2.0.x264-NTb +? Show.Name.S01E03.Nice.Title.720p.Discovery.WEBRip.AAC2.0.x264-NTb +: title: Show Name + season: 1 + episode: 3 + episode_title: Nice Title + screen_size: 720p + streaming_service: Discovery + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: NTb + type: episode + +# Streaming service: BBC iPlayer +? Show.Name.2016.08.18.Nice.Title.720p.iP.WEBRip.AAC2.0.H.264-monkee +? Show.Name.2016.08.18.Nice.Title.720p.BBCiPlayer.WEBRip.AAC2.0.H.264-monkee +: title: Show Name + date: 2016-08-18 + episode_title: Nice Title + streaming_service: BBC iPlayer + screen_size: 720p + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: A&E +? Show.Name.S15E18.Nice.Title.720p.AE.WEBRip.AAC2.0.H.264-monkee +? Show.Name.S15E18.Nice.Title.720p.A&E.WEBRip.AAC2.0.H.264-monkee +: title: Show Name + season: 15 + episode: 18 + episode_title: Nice Title + screen_size: 720p + streaming_service: A&E + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: Adult Swim +? Show.Name.S04E01.Nice.Title.1080p.AS.WEBRip.AAC2.0.H.264-monkee +? Show.Name.S04E01.Nice.Title.1080p.AdultSwim.WEBRip.AAC2.0.H.264-monkee +: title: Show Name + season: 4 + episode: 1 + episode_title: Nice Title + screen_size: 1080p + streaming_service: Adult Swim + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: Netflix +? Show.Name.2013.S02E03.1080p.NF.WEBRip.DD5.1.x264-NTb.mkv +: title: Show Name + year: 2013 + season: 2 + episode: 3 + screen_size: 1080p + streaming_service: Netflix + source: Web + other: Rip + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: NTb + container: mkv + type: episode + +# Streaming service: CBS +? Show.Name.2016.05.10.Nice.Title.720p.CBS.WEBRip.AAC2.0.x264-monkee +: title: Show Name + date: 2016-05-10 + episode_title: Nice Title + screen_size: 720p + streaming_service: CBS + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: NBA TV +? NBA.2016.02.27.Team.A.vs.Team.B.720p.NBA.WEBRip.AAC2.0.H.264-monkee +? NBA.2016.02.27.Team.A.vs.Team.B.720p.NBATV.WEBRip.AAC2.0.H.264-monkee +: title: NBA + date: 2016-02-27 + episode_title: Team A vs Team B + screen_size: 720p + streaming_service: NBA TV + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: ePix +? Show.Name.S05E04.Nice.Title.Part4.720p.EPIX.WEBRip.AAC2.0.H.264-monkee +? Show.Name.S05E04.Nice.Title.Part4.720p.ePix.WEBRip.AAC2.0.H.264-monkee +: title: Show Name + season: 5 + episode: 4 + episode_title: Nice Title + part: 4 + screen_size: 720p + streaming_service: ePix + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: NBC +? Show.Name.S41E03.Nice.Title.720p.NBC.WEBRip.AAC2.0.x264-monkee +: title: Show Name + season: 41 + episode: 3 + episode_title: Nice Title + screen_size: 720p + streaming_service: NBC + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: Syfy +? Show.Name.S01E02.Nice.Title.720p.SYFY.WEBRip.AAC2.0.x264-group +? Show.Name.S01E02.Nice.Title.720p.Syfy.WEBRip.AAC2.0.x264-group +: title: Show Name + season: 1 + episode: 2 + episode_title: Nice Title + screen_size: 720p + streaming_service: Syfy + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: group + type: episode + +# Streaming service: Spike TV +? Show.Name.S01E02.Nice.Title.720p.SPKE.WEBRip.AAC2.0.x264-group +? Show.Name.S01E02.Nice.Title.720p.Spike TV.WEBRip.AAC2.0.x264-group +? Show.Name.S01E02.Nice.Title.720p.SpikeTV.WEBRip.AAC2.0.x264-group +: title: Show Name + season: 1 + episode: 2 + episode_title: Nice Title + screen_size: 720p + streaming_service: Spike TV + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: group + type: episode + +# Streaming service: IFC +? Show.Name.S01E02.Nice.Title.720p.IFC.WEBRip.AAC2.0.x264-group +: title: Show Name + season: 1 + episode: 2 + episode_title: Nice Title + screen_size: 720p + streaming_service: IFC + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: group + type: episode + +# Streaming service: NATG +? Show.Name.S01E02.Nice.Title.720p.NATG.WEBRip.AAC2.0.x264-group +? Show.Name.S01E02.Nice.Title.720p.NationalGeographic.WEBRip.AAC2.0.x264-group +: title: Show Name + season: 1 + episode: 2 + episode_title: Nice Title + screen_size: 720p + streaming_service: National Geographic + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: group + type: episode + +# Streaming service: NFL +? Show.Name.S01E02.Nice.Title.720p.NFL.WEBRip.AAC2.0.x264-group +: title: Show Name + season: 1 + episode: 2 + episode_title: Nice Title + screen_size: 720p + streaming_service: NFL + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: group + type: episode + +# Streaming service: UFC +? Show.Name.S01E02.Nice.Title.720p.UFC.WEBRip.AAC2.0.x264-group +: title: Show Name + season: 1 + episode: 2 + episode_title: Nice Title + screen_size: 720p + streaming_service: UFC + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: group + type: episode + +# Streaming service: TV Land +? Show.Name.S01E02.Nice.Title.720p.TVL.WEBRip.AAC2.0.x264-group +? Show.Name.S01E02.Nice.Title.720p.TVLand.WEBRip.AAC2.0.x264-group +? Show.Name.S01E02.Nice.Title.720p.TV Land.WEBRip.AAC2.0.x264-group +: title: Show Name + season: 1 + episode: 2 + episode_title: Nice Title + screen_size: 720p + streaming_service: TV Land + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: group + type: episode + +# Streaming service: Crunchy Roll +? Show.Name.S01.1080p.CR.WEBRip.AAC.2.0.x264-monkee +: title: Show Name + season: 1 + screen_size: 1080p + streaming_service: Crunchy Roll + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: Disney +? Show.Name.S01.1080p.DSNY.WEBRip.AAC.2.0.x264-monkee +? Show.Name.S01.1080p.Disney.WEBRip.AAC.2.0.x264-monkee +: title: Show Name + season: 1 + screen_size: 1080p + streaming_service: Disney + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: Nickelodeon +? Show.Name.S01.1080p.NICK.WEBRip.AAC.2.0.x264-monkee +? Show.Name.S01.1080p.Nickelodeon.WEBRip.AAC.2.0.x264-monkee +: title: Show Name + season: 1 + screen_size: 1080p + streaming_service: Nickelodeon + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: TFou +? Show.Name.S01.1080p.TFOU.WEBRip.AAC.2.0.x264-monkee +? Show.Name.S01.1080p.TFou.WEBRip.AAC.2.0.x264-monkee +: title: Show Name + season: 1 + screen_size: 1080p + streaming_service: TFou + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: monkee + type: episode + +# Streaming service: DIY Network +? Show.Name.S01.720p.DIY.WEBRip.AAC2.0.H.264-BTN +: title: Show Name + season: 1 + screen_size: 720p + streaming_service: DIY Network + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: BTN + type: episode + +# Streaming service: USA Network +? Show.Name.S01E02.Exfil.1080p.USAN.WEBRip.AAC2.0.x264-AJP69 +: title: Show Name + season: 1 + episode: 2 + screen_size: 1080p + streaming_service: USA Network + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: AJP69 + type: episode + +# Streaming service: TV3 Ireland +? Show.Name.S01E08.576p.TV3.WEBRip.AAC2.0.x264-HARiKEN +: title: Show Name + season: 1 + episode: 8 + screen_size: 576p + streaming_service: TV3 Ireland + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: HARiKEN + type: episode + +# Streaming service: TV4 Sweeden +? Show.Name.S05.720p.TV4.WEBRip.AAC2.0.H.264-BTW +: title: Show Name + season: 5 + screen_size: 720p + streaming_service: TV4 Sweeden + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: BTW + type: episode + +# Streaming service: TLC +? Show.Name.S02.720p.TLC.WEBRip.AAC2.0.x264-BTW +: title: Show Name + season: 2 + screen_size: 720p + streaming_service: TLC + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: BTW + type: episode + +# Streaming service: Investigation Discovery +? Show.Name.S01E01.720p.ID.WEBRip.AAC2.0.x264-BTW +: title: Show Name + season: 1 + episode: 1 + screen_size: 720p + streaming_service: Investigation Discovery + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: BTW + type: episode + +# Streaming service: RTÉ One +? Show.Name.S10E01.576p.RTE.WEBRip.AAC2.0.H.264-RTN +: title: Show Name + season: 10 + episode: 1 + screen_size: 576p + streaming_service: RTÉ One + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: RTN + type: episode + +# Streaming service: AMC +? Show.Name.S01E01.1080p.AMC.WEBRip.H.264.AAC2.0-CasStudio +: title: Show Name + season: 1 + episode: 1 + screen_size: 1080p + streaming_service: AMC + source: Web + other: Rip + audio_codec: AAC + audio_channels: '2.0' + video_codec: H.264 + release_group: CasStudio + type: episode + +? Suits.S07E01.1080p.iT.WEB-DL.DD5.1.H.264-VLAD.mkv +? Suits.S07E01.1080p.iTunes.WEB-DL.DD5.1.H.264-VLAD.mkv +: title: Suits + season: 7 + episode: 1 + screen_size: 1080p + source: Web + streaming_service: iTunes + audio_codec: Dolby Digital + audio_channels: '5.1' + video_codec: H.264 + release_group: VLAD + container: mkv + type: episode + +? UpFront.S01.720p.AJAZ.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + screen_size: 720p + season: 1 + source: Web + streaming_service: Al Jazeera English + title: UpFront + type: episode + video_codec: H.264 + +? Smack.The.Pony.S01.4OD.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + season: 1 + source: Web + streaming_service: Channel 4 + title: Smack The Pony + type: episode + video_codec: H.264 + +? The.Toy.Box.S01E01.720p.AMBC.WEBRip.AAC2.0.x264-BTN +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + other: Rip + release_group: BTN + screen_size: 720p + season: 1 + source: Web + streaming_service: ABC + title: The Toy Box + type: episode + video_codec: H.264 + +? Gundam.Reconguista.in.G.S01.720p.ANLB.WEBRip.AAC2.0.x264-HorribleSubs +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: HorribleSubs + screen_size: 720p + season: 1 + source: Web + streaming_service: AnimeLab + title: Gundam Reconguista in G + type: episode + video_codec: H.264 + +? Animal.Nation.with.Anthony.Anderson.S01E01.1080p.ANPL.WEBRip.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + other: Rip + release_group: RTN + screen_size: 1080p + season: 1 + source: Web + streaming_service: Animal Planet + title: Animal Nation with Anthony Anderson + type: episode + video_codec: H.264 + +? Park.Bench.S01.1080p.AOL.WEBRip.AAC2.0.H.264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + screen_size: 1080p + season: 1 + source: Web + streaming_service: AOL + title: Park Bench + type: episode + video_codec: H.264 + +? Crime.Scene.Cleaner.S05.720p.ARD.WEBRip.AAC2.0.H.264-BTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTN + screen_size: 720p + season: 5 + source: Web + streaming_service: ARD + title: Crime Scene Cleaner + type: episode + video_codec: H.264 + +? Decker.S03.720p.AS.WEB-DL.AAC2.0.H.264-RTN +: audio_channels: '2.0' + audio_codec: AAC + release_group: RTN + screen_size: 720p + season: 3 + source: Web + streaming_service: Adult Swim + title: Decker + type: episode + video_codec: H.264 + +? Southern.Charm.Savannah.S01E04.Hurricane.On.The.Horizon.1080p.BRAV.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + episode: 4 + episode_title: Hurricane On The Horizon + other: Rip + release_group: BTW + screen_size: 1080p + season: 1 + source: Web + streaming_service: BravoTV + title: Southern Charm Savannah + type: episode + video_codec: H.264 + +? Four.in.the.Morning.S01E01.Pig.RERip.720p.CBC.WEBRip.AAC2.0.H.264-RTN +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + episode_title: Pig + other: + - Proper + - Rip + proper_count: 1 + release_group: RTN + screen_size: 720p + season: 1 + source: Web + streaming_service: CBC + title: Four in the Morning + type: episode + video_codec: H.264 + +? Rio.Olympics.2016.08.07.Mens.Football.Group.C.Germany.vs.South.Korea.720p.CBC.WEBRip.AAC2.0.H.264-BTW +: audio_channels: '2.0' + audio_codec: AAC + date: 2016-08-07 + episode_title: Mens Football Group C Germany vs South Korea + other: Rip + release_group: BTW + screen_size: 720p + source: Web + streaming_service: CBC + title: Rio Olympics + type: episode + video_codec: H.264 + +? Comedians.In.Cars.Getting.Coffee.S01.720p.CCGC.WEBRip.AAC2.0.x264-monkee +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: monkee + screen_size: 720p + season: 1 + source: Web + streaming_service: Comedians in Cars Getting Coffee + title: Comedians In Cars Getting Coffee + type: episode + video_codec: H.264 + +? Life.on.Top.S02.720p.CMAX.WEBRip.AAC2.0.x264-CMAX +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: CMAX + screen_size: 720p + season: 2 + source: Web + streaming_service: Cinemax + title: Life on Top + type: episode + video_codec: H.264 + +? Sun.Records.S01.720p.CMT.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + screen_size: 720p + season: 1 + source: Web + streaming_service: Country Music Television + title: Sun Records + type: episode + video_codec: H.264 + +? Infinity.Train.S01E00.Pilot.REPACK.720p.CN.WEBRip.AAC2.0.H.264-monkee +: audio_channels: '2.0' + audio_codec: AAC + episode: 0 + episode_details: Pilot + episode_title: Pilot + language: zh + other: + - Proper + - Rip + proper_count: 1 + release_group: monkee + screen_size: 720p + season: 1 + source: Web + streaming_service: Cartoon Network + title: Infinity Train + type: episode + video_codec: H.264 + +? Jay.Lenos.Garage.2015.S03E02.1080p.CNBC.WEB-DL.x264-TOPKEK +: episode: 2 + release_group: TOPKEK + screen_size: 1080p + season: 3 + source: Web + streaming_service: CNBC + title: Jay Lenos Garage + type: episode + video_codec: H.264 + year: 2015 + +? US.Presidential.Debates.2015.10.28.Third.Republican.Debate.720p.CNBC.WEBRip.AAC2.0.H.264-monkee +: audio_channels: '2.0' + audio_codec: AAC + country: US + date: 2015-10-28 + episode_title: Third Republican Debate + other: Rip + release_group: monkee + screen_size: 720p + source: Web + streaming_service: CNBC + title: Presidential Debates + type: episode + video_codec: H.264 + +? What.The.Fuck.France.S01E01.Le.doublage.CNLP.WEBRip.AAC2.0.x264-TURTLE +: audio_channels: '2.0' + audio_codec: AAC + country: FR + episode: 1 + episode_title: Le doublage + other: Rip + release_group: TURTLE + season: 1 + source: Web + streaming_service: Canal+ + title: What The Fuck + type: episode + video_codec: H.264 + +? SuperMansion.S02.720p.CRKL.WEBRip.AAC2.0.x264-VLAD +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: VLAD + screen_size: 720p + season: 2 + source: Web + streaming_service: Crackle + title: SuperMansion + type: episode + video_codec: H.264 + +? Chosen.S02.1080p.CRKL.WEBRip.AAC2.0.x264-AJP69 +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: AJP69 + screen_size: 1080p + season: 2 + source: Web + streaming_service: Crackle + title: Chosen + type: episode + video_codec: H.264 + +? Chosen.S03.1080p.CRKL.WEBRip.AAC2.0.x264-AJP69 +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: AJP69 + screen_size: 1080p + season: 3 + source: Web + streaming_service: Crackle + title: Chosen + type: episode + video_codec: H.264 + +? Snatch.S01.1080p.CRKL.WEBRip.AAC2.0.x264-DEFLATE +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: DEFLATE + screen_size: 1080p + season: 1 + source: Web + streaming_service: Crackle + title: Snatch + type: episode + video_codec: H.264 + +? White.House.Correspondents.Dinner.2015.Complete.CSPN.WEBRip.AAC2.0.H.264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: + - Complete + - Rip + release_group: BTW + source: Web + streaming_service: CSpan + title: White House Correspondents Dinner + type: movie + video_codec: H.264 + year: 2015 + +? The.Amazing.Race.Canada.S03.720p.CTV.WEBRip.AAC2.0.H.264-BTW +: audio_channels: '2.0' + audio_codec: AAC + country: CA + other: Rip + release_group: BTW + screen_size: 720p + season: 3 + source: Web + streaming_service: CTV + title: The Amazing Race + type: episode + video_codec: H.264 + +? Miniverse.S01E01.Explore.the.Solar.System.2160p.CUR.WEB-DL.DDP2.0.x264-monkee +: audio_channels: '2.0' + audio_codec: Dolby Digital Plus + episode: 1 + episode_title: Explore the Solar System + release_group: monkee + screen_size: 2160p + season: 1 + source: Web + streaming_service: CuriosityStream + title: Miniverse + type: episode + video_codec: H.264 + +? Vixen.S02.720p.CWS.WEBRip.AAC2.0.x264-BMF +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BMF + screen_size: 720p + season: 2 + source: Web + streaming_service: CWSeed + title: Vixen + type: episode + video_codec: H.264 + +? Abidin.Dino.DDY.WEBRip.AAC2.0.H.264-BTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTN + source: Web + streaming_service: Digiturk Diledigin Yerde + title: Abidin Dino + type: movie + video_codec: H.264 + +? Fast.N.Loud.S08.1080p.DISC.WEBRip.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: RTN + screen_size: 1080p + season: 8 + source: Web + streaming_service: Discovery + title: Fast N Loud + type: episode + video_codec: H.264 + +? Bake.Off.Italia.S04.1080p.DPLY.WEBRip.AAC2.0.x264-Threshold +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: Threshold + screen_size: 1080p + season: 4 + source: Web + streaming_service: DPlay + title: Bake Off Italia + type: episode + video_codec: H.264 + +? Long.Riders.S01.DSKI.WEBRip.AAC2.0.x264-HorribleSubs +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: HorribleSubs + season: 1 + source: Web + streaming_service: Daisuki + title: Long Riders + type: episode + video_codec: H.264 + +? Milo.Murphys.Law.S01.720p.DSNY.WEB-DL.AAC2.0.x264-TVSmash +: audio_channels: '2.0' + audio_codec: AAC + release_group: TVSmash + screen_size: 720p + season: 1 + source: Web + streaming_service: Disney + title: Milo Murphys Law + type: episode + video_codec: H.264 + +? 30.for.30.S03E15.Doc.and.Darryl.720p.ESPN.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + episode: 15 + episode_title: Doc and Darryl + other: Rip + release_group: BTW + screen_size: 720p + season: 3 + source: Web + streaming_service: ESPN + title: 30 for 30 + type: episode + video_codec: H.264 + +? Boundless.S03.720p.ESQ.WEBRip.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: RTN + screen_size: 720p + season: 3 + source: Web + streaming_service: Esquire + title: Boundless + type: episode + video_codec: H.264 + +? Periodismo.Para.Todos.S2016E01.720p.ETTV.WEBRip.AAC2.0.H.264-braggart74 +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + other: Rip + release_group: braggart74 + screen_size: 720p + season: 2016 + source: Web + streaming_service: El Trece + title: Periodismo Para Todos + type: episode + video_codec: H.264 + year: 2016 + +? Just.Jillian.S01E01.1080p.ETV.WEBRip.AAC2.0.x264-GoApe +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + other: Rip + release_group: GoApe + screen_size: 1080p + season: 1 + source: Web + streaming_service: E! + title: Just Jillian + type: episode + video_codec: H.264 + +? New.Money.S01.1080p.ETV.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + screen_size: 1080p + season: 1 + source: Web + streaming_service: E! + title: New Money + type: episode + video_codec: H.264 + +? Gaming.Show.In.My.Parents.Garage.S02E01.The.Power.Up1000.FAM.WEBRip.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + episode_title: The Power Up1000 + other: Rip + release_group: RTN + season: 2 + source: Web + streaming_service: Family + title: Gaming Show In My Parents Garage + type: episode + video_codec: H.264 + +? Little.People.2016.S01E03.Proud.to.Be.You.and.Me.720p.FJR.WEBRip.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + episode: 3 + episode_title: Proud to Be You and Me + other: Rip + release_group: RTN + screen_size: 720p + season: 1 + source: Web + streaming_service: Family Jr + title: Little People + type: episode + video_codec: H.264 + year: 2016 + +? The.Pioneer.Woman.S00E08.Summer.Summer.Summer.720p.FOOD.WEB-DL.AAC2.0.x264-AJP69 +: audio_channels: '2.0' + audio_codec: AAC + episode: 8 + episode_title: Summer Summer Summer + release_group: AJP69 + screen_size: 720p + season: 0 + source: Web + streaming_service: Food Network + title: The Pioneer Woman + type: episode + video_codec: H.264 + +? Prata.da.Casa.S01E01.720p.FOX.WEBRip.AAC2.0.H.264-BARRY +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + other: Rip + release_group: BARRY + screen_size: 720p + season: 1 + source: Web + streaming_service: Fox + title: Prata da Casa + type: episode + video_codec: H.264 + +? Grandfathered.S01.720p.FOX.WEBRip.AAC2.0.H.264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + screen_size: 720p + season: 1 + source: Web + streaming_service: Fox + title: Grandfathered + type: episode + video_codec: H.264 + +? Truth.and.Iliza.S01E01.FREE.WEBRip.AAC2.0.x264-BTN +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + other: Rip + release_group: BTN + season: 1 + source: Web + streaming_service: Freeform + title: Truth and Iliza + type: episode + video_codec: H.264 + +? Seven.Year.Switch.S01.720p.FYI.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + screen_size: 720p + season: 1 + source: Web + streaming_service: FYI Network + title: Seven Year Switch + type: episode + video_codec: H.264 + +? NHL.2015.10.09.Leafs.vs.Red.Wings.Condensed.Game.720p.Away.Feed.GC.WEBRip.AAC2.0.H.264-BTW +: audio_channels: '2.0' + audio_codec: AAC + date: 2015-10-09 + episode_title: Leafs vs Red Wings Condensed Game + other: Rip + release_group: BTW + screen_size: 720p + source: Web + streaming_service: NHL GameCenter + title: NHL + type: episode + video_codec: H.264 + +? NHL.2016.01.26.Maple.Leafs.vs.Panthers.720p.Home.Feed.GC.WEBRip.AAC2.0.H.264-BTW +: audio_channels: '2.0' + audio_codec: AAC + date: 2016-01-26 + episode_title: Maple Leafs vs Panthers + other: Rip + release_group: BTW + screen_size: 720p + source: Web + streaming_service: NHL GameCenter + title: NHL + type: episode + video_codec: H.264 + +? Big.Brother.Canada.S05.GLBL.WEBRip.AAC2.0.H.264-RTN +: audio_channels: '2.0' + audio_codec: AAC + country: CA + other: Rip + release_group: RTN + season: 5 + source: Web + streaming_service: Global + title: Big Brother + type: episode + video_codec: H.264 + +? Pornolandia.S01.720p.GLOB.WEBRip.AAC2.0.x264-GeneX +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: GeneX + screen_size: 720p + season: 1 + source: Web + streaming_service: GloboSat Play + title: Pornolandia + type: episode + video_codec: H.264 + +? Transando.com.Laerte.S01.720p.GLOB.WEBRip.AAC2.0.x264-GeneX +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: GeneX + screen_size: 720p + season: 1 + source: Web + streaming_service: GloboSat Play + title: Transando com Laerte + type: episode + video_codec: H.264 + +? Flip.or.Flop.S01.720p.HGTV.WEBRip.AAC2.0.H.264-AJP69 +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: AJP69 + screen_size: 720p + season: 1 + source: Web + streaming_service: HGTV + title: Flip or Flop + type: episode + video_codec: H.264 + +? Kitten.Bowl.2014.720p.HLMK.WEBRip.AAC2.0.x264-monkee +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: monkee + screen_size: 720p + source: Web + streaming_service: Hallmark + title: Kitten Bowl + type: movie + video_codec: H.264 + year: 2014 + +? Still.Star-Crossed.S01E05.720p.HULU.WEB-DL.AAC2.0.H.264-VLAD +: audio_channels: '2.0' + audio_codec: AAC + episode: 5 + release_group: VLAD + screen_size: 720p + season: 1 + source: Web + streaming_service: Hulu + title: Still Star-Crossed + type: episode + video_codec: H.264 + +? EastEnders.2017.07.17.720p.iP.WEB-DL.AAC2.0.H.264-BTN +: audio_channels: '2.0' + audio_codec: AAC + date: 2017-07-17 + release_group: BTN + screen_size: 720p + source: Web + streaming_service: BBC iPlayer + title: EastEnders + type: episode + video_codec: H.264 + +? Handmade.in.Japan.S01E01.720p.iP.WEBRip.AAC2.0.H.264-SUP +: audio_channels: '2.0' + audio_codec: AAC + country: JP + episode: 1 + other: Rip + release_group: SUP + screen_size: 720p + season: 1 + source: Web + streaming_service: BBC iPlayer + title: Handmade in + type: episode + video_codec: H.264 + +? The.Chillenden.Murders.S01.720p.iP.WEBRip.AAC2.0.H.264-HAX +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: HAX + screen_size: 720p + season: 1 + source: Web + streaming_service: BBC iPlayer + title: The Chillenden Murders + type: episode + video_codec: H.264 + +? The.Street.S01.ITV.WEB-DL.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + release_group: RTN + season: 1 + source: Web + streaming_service: ITV + title: The Street + type: episode + video_codec: H.264 + +? Hope.for.Wildlife.S04.1080p.KNOW.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + screen_size: 1080p + season: 4 + source: Web + streaming_service: Knowledge Network + title: Hope for Wildlife + type: episode + video_codec: H.264 + +? Kim.of.Queens.S02.720p.LIFE.WEBRip.AAC2.0.H.264-RTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: RTN + screen_size: 720p + season: 2 + source: Web + streaming_service: Lifetime + title: Kim of Queens + type: episode + video_codec: H.264 + +? The.Rachel.Maddow.Show.2017.02.22.720p.MNBC.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + date: 2017-02-22 + other: Rip + release_group: BTW + screen_size: 720p + source: Web + streaming_service: MSNBC + title: The Rachel Maddow Show + type: episode + video_codec: H.264 + +? Ignition.S06E12.720p.MTOD.WEB-DL.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + episode: 12 + release_group: RTN + screen_size: 720p + season: 6 + source: Web + streaming_service: Motor Trend OnDemand + title: Ignition + type: episode + video_codec: H.264 + +? Teen.Mom.UK.S01E01.Life.as.a.Teen.Mum.1080p.MTV.WEB-DL.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + country: GB + episode: 1 + episode_title: Life as a Teen Mum + release_group: BTW + screen_size: 1080p + season: 1 + source: Web + streaming_service: MTV + title: Teen Mom + type: episode + video_codec: H.264 + +? Undrafted.S01.720p.NFLN.WEBRip.AAC2.0.H.264-TTYL +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: TTYL + screen_size: 720p + season: 1 + source: Web + streaming_service: NFL Now + title: Undrafted + type: episode + video_codec: H.264 + +? NFL.2016.08.25.PreSeason.Cowboys.vs.Seahawks.720p.NFL.WEBRip.AAC2.0.H.264-BTW +: audio_channels: '2.0' + audio_codec: AAC + date: 2016-08-25 + episode_title: PreSeason Cowboys vs Seahawks + other: Rip + release_group: BTW + screen_size: 720p + source: Web + streaming_service: NFL + title: NFL + type: episode + video_codec: H.264 + +? Bunsen.is.a.Beast.S01E23.Guinea.Some.Lovin.1080p.NICK.WEBRip.AAC2.0.x264-TVSmash +: audio_channels: '2.0' + audio_codec: AAC + country: GN + episode: 23 + episode_title: Some Lovin + other: Rip + release_group: TVSmash + screen_size: 1080p + season: 1 + source: Web + streaming_service: Nickelodeon + title: Bunsen is a Beast + type: episode + video_codec: H.264 + +? Valkyrie.S01.720p.NRK.WEBRip.AAC2.0.x264-BTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTN + screen_size: 720p + season: 1 + source: Web + streaming_service: Norsk Rikskringkasting + title: Valkyrie + type: episode + video_codec: H.264 + +? Food.Forward.S01.720p.PBS.WEBRip.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: RTN + screen_size: 720p + season: 1 + source: Web + streaming_service: PBS + title: Food Forward + type: episode + video_codec: H.264 + +? SciGirls.S01E01.Turtle.Mania.720p.PBSK.WEBRip.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + episode_title: Turtle Mania + other: Rip + release_group: RTN + screen_size: 720p + season: 1 + source: Web + streaming_service: PBS Kids + title: SciGirls + type: episode + video_codec: H.264 + +? Powers.2015.S01.1080p.PSN.WEBRip.DD5.1.x264-NTb +: audio_channels: '5.1' + audio_codec: Dolby Digital + other: Rip + release_group: NTb + screen_size: 1080p + season: 1 + source: Web + streaming_service: Playstation Network + title: Powers + type: episode + video_codec: H.264 + year: 2015 + +? Escape.The.Night.S02E02.The.Masquerade.Part.II.1080p.RED.WEBRip.AAC5.1.VP9-BTW +: audio_channels: '5.1' + audio_codec: AAC + episode: 2 + episode_title: The Masquerade + other: Rip + part: 2 + release_group: VP9-BTW + screen_size: 1080p + season: 2 + source: Web + streaming_service: YouTube Red + title: Escape The Night + type: episode + +? Escape.The.Night.S02E02.The.Masquerade.Part.II.2160p.RED.WEBRip.AAC5.1.VP9-BTW +: audio_channels: '5.1' + audio_codec: AAC + episode: 2 + episode_title: The Masquerade + other: Rip + part: 2 + release_group: VP9-BTW + screen_size: 2160p + season: 2 + source: Web + streaming_service: YouTube Red + title: Escape The Night + type: episode + +? Escape.The.Night.S02E02.The.Masquerade.Part.II.720p.RED.WEBRip.AAC5.1.VP9-BTW +: audio_channels: '5.1' + audio_codec: AAC + episode: 2 + episode_title: The Masquerade + other: Rip + part: 2 + release_group: VP9-BTW + screen_size: 720p + season: 2 + source: Web + streaming_service: YouTube Red + title: Escape The Night + type: episode + +? The.Family.Law.S02E01.720p.SBS.WEB-DL.AAC2.0.H.264-BTN +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + release_group: BTN + screen_size: 720p + season: 2 + source: Web + streaming_service: SBS (AU) + title: The Family Law + type: episode + video_codec: H.264 + +? Theres.No.Joy.In.Beachville.The.True.Story.of.Baseballs.Origin.720p.SNET.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + screen_size: 720p + source: Web + streaming_service: Sportsnet + title: Theres No Joy In Beachville The True Story of Baseballs Origin + type: movie + video_codec: H.264 + +? One.Night.Only.Alec.Baldwin.720p.SPIK.WEB-DL.AAC2.0.x264-NOGRP +: audio_channels: '2.0' + audio_codec: AAC + release_group: NOGRP + screen_size: 720p + source: Web + streaming_service: Spike + title: One Night Only Alec Baldwin + type: movie + video_codec: H.264 + +? Ink.Master.S08.720p.SPIK.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + screen_size: 720p + season: 8 + source: Web + streaming_service: Spike + title: Ink Master + type: episode + video_codec: H.264 + +? Jungle.Bunch.S01E01.Deep.Chasm.1080p.SPRT.WEBRip.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + episode_title: Deep Chasm + other: Rip + release_group: RTN + screen_size: 1080p + season: 1 + source: Web + streaming_service: Sprout + title: Jungle Bunch + type: episode + video_codec: H.264 + +? Ash.vs.Evil.Dead.S01.720p.STZ.WEBRip.AAC2.0.x264-NTb +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: NTb + screen_size: 720p + season: 1 + source: Web + streaming_service: Starz + title: Ash vs Evil Dead + type: episode + video_codec: H.264 + +? WWE.Swerved.S01.720p.WWEN.WEBRip.AAC2.0.H.264-PPKORE +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: PPKORE + screen_size: 720p + season: 1 + source: Web + streaming_service: WWE Network + title: WWE Swerved + type: episode + video_codec: H.264 + +? Face.Off.S11.1080p.SYFY.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + screen_size: 1080p + season: 11 + source: Web + streaming_service: Syfy + title: Face Off + type: episode + video_codec: H.264 + +? Conan.2016.09.22.Jeff.Garlin.720p.TBS.WEBRip.AAC2.0.H.264-NOGRP +: audio_channels: '2.0' + audio_codec: AAC + date: 2016-09-22 + episode_title: Jeff Garlin + other: Rip + release_group: NOGRP + screen_size: 720p + source: Web + streaming_service: TBS + title: Conan + type: episode + video_codec: H.264 + +? Swans.Crossing.S01.TUBI.WEBRip.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: RTN + season: 1 + source: Web + streaming_service: TubiTV + title: Swans Crossing + type: episode + video_codec: H.264 + +? The.Joy.of.Techs.S01.UKTV.WEB-DL.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + release_group: RTN + season: 1 + source: Web + streaming_service: UKTV + title: The Joy of Techs + type: episode + video_codec: H.264 + +? Rock.Icons.S01.720p.VH1.WEB-DL.AAC2.0.H.264-RTN +: audio_channels: '2.0' + audio_codec: AAC + release_group: RTN + screen_size: 720p + season: 1 + source: Web + streaming_service: VH1 + title: Rock Icons + type: episode + video_codec: H.264 + +? Desus.and.Mero.S01E130.2017.07.18.1080p.VICE.WEB-DL.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + date: 2017-07-18 + episode: 130 + release_group: RTN + screen_size: 1080p + season: 1 + source: Web + streaming_service: Viceland + title: Desus and Mero + type: episode + video_codec: H.264 + +? Graveyard.Carz.S07.1080p.VLCT.WEBRip.AAC2.0.x264-RTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: RTN + screen_size: 1080p + season: 7 + source: Web + streaming_service: Velocity + title: Graveyard Carz + type: episode + video_codec: H.264 + +? Other.Space.S01E01.1080p.YHOO.WEBRip.AAC2.0.x264-BTW +: audio_channels: '2.0' + audio_codec: AAC + episode: 1 + other: Rip + release_group: BTW + screen_size: 1080p + season: 1 + source: Web + streaming_service: Yahoo + title: Other Space + type: episode + video_codec: H.264 + +? Americas.Test.Kitchen.S17.720p.ATK.WEB-DL.AAC2.0.x264-BTN +: audio_channels: '2.0' + audio_codec: AAC + release_group: BTN + screen_size: 720p + season: 17 + source: Web + streaming_service: America's Test Kitchen + title: Americas Test Kitchen + type: episode + video_codec: H.264 + +? Bushwhacked.Bugs.S01.AUBC.WEBRip.AAC2.0.H.264-DAWN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: DAWN + season: 1 + source: Web + streaming_service: ABC Australia + title: Bushwhacked Bugs + type: episode + video_codec: H.264 + +? VICE.S05E12.1080p.HBO.WEB-DL.AAC2.0.H.264-monkee +? VICE.S05E12.1080p.HBO-Go.WEB-DL.AAC2.0.H.264-monkee +? VICE.S05E12.1080p.HBOGo.WEB-DL.AAC2.0.H.264-monkee +: audio_channels: '2.0' + audio_codec: AAC + episode: 12 + release_group: monkee + screen_size: 1080p + season: 5 + source: Web + streaming_service: HBO Go + title: VICE + type: episode + video_codec: H.264 + +? Dix.Pour.Cent.S02.PLUZ.WEBRip.AAC2.0.H.264-TURTLE +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: TURTLE + season: 2 + source: Web + streaming_service: Pluzz + title: Dix Pour Cent + type: episode + video_codec: H.264 + +? Ulveson.och.Herngren.S01.720p.SVT.WEBRip.AAC2.0.H.264-BTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTN + screen_size: 720p + season: 1 + source: Web + streaming_service: Sveriges Television + title: Ulveson och Herngren + type: episode + video_codec: H.264 + +? Bravest.Warriors.S03.1080p.VRV.WEBRip.AAC2.0.x264-BTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTN + screen_size: 1080p + season: 3 + source: Web + streaming_service: VRV + title: Bravest Warriors + type: episode + video_codec: H.264 + +? The.Late.Night.Big.Breakfast.S02.WME.WEBRip.AAC2.0.x264-BTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTN + season: 2 + source: Web + streaming_service: WatchMe + title: The Late Night Big Breakfast + type: episode + video_codec: H.264 + +? Hockey.Wives.S02.WNET.WEBRip.AAC2.0.H.264-BTW +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTW + season: 2 + source: Web + streaming_service: W Network + title: Hockey Wives + type: episode + video_codec: H.264 + +? Sin.City.Saints.S01.1080p.YHOO.WEBRip.AAC2.0.x264-NTb +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: NTb + screen_size: 1080p + season: 1 + source: Web + streaming_service: Yahoo + title: Sin City Saints + type: episode + video_codec: H.264 + +? 555.S01.1080p.VMEO.WEBRip.AAC2.0.x264-BTN +: audio_channels: '2.0' + audio_codec: AAC + other: Rip + release_group: BTN + screen_size: 1080p + season: 1 + source: Web + streaming_service: Vimeo + title: '555' + type: episode + video_codec: H.264 + +# All this below shouldn't match any streaming services +? London.2012.Olympics.CTV.Preview.Show.HDTV.x264-2HD +: alternative_title: Olympics CTV Preview Show + release_group: 2HD + source: HDTV + title: London + type: movie + video_codec: H.264 + year: 2012 + +? UFC.on.FOX.24.1080p.HDTV.x264-VERUM +: episode: 24 + release_group: VERUM + screen_size: 1080p + source: HDTV + title: UFC on FOX + type: episode + video_codec: H.264 + +? ESPN.E.60.2016.10.04.HDTV.x264-LoTV +: date: 2016-10-04 + episode: 60 + release_group: LoTV + source: HDTV + title: ESPN E + type: episode + video_codec: H.264 + +? GTTV.E3.All.Access.Live.Day.1.Xbox.Showcase.Preshow.HDTV.x264-SYS +: episode: 3 + episode_title: All Access Live Day 1 Xbox Showcase Preshow + release_group: SYS + source: HDTV + title: GTTV + type: episode + video_codec: H.264 diff --git a/lib/guessit/test/suggested.json b/lib/guessit/test/suggested.json new file mode 100644 index 00000000..dc838ad0 --- /dev/null +++ b/lib/guessit/test/suggested.json @@ -0,0 +1,21 @@ +{ + "titles": [ + "13 Reasons Why", + "Star Wars: Episode VII - The Force Awakens", + "3%", + "The 100", + "3 Percent", + "This is Us", + "Open Season 2", + "Game of Thrones", + "The X-Files", + "11.22.63" + ], + "suggested": [ + "13 Reasons Why", + "Star Wars: Episode VII - The Force Awakens", + "The 100", + "Open Season 2", + "11.22.63" + ] +} \ No newline at end of file diff --git a/lib/guessit/test/test-input-file.txt b/lib/guessit/test/test-input-file.txt new file mode 100644 index 00000000..656bc931 --- /dev/null +++ b/lib/guessit/test/test-input-file.txt @@ -0,0 +1,2 @@ +Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv +SecondFile.avi \ No newline at end of file diff --git a/lib/guessit/test/test_api.py b/lib/guessit/test/test_api.py new file mode 100644 index 00000000..391dbced --- /dev/null +++ b/lib/guessit/test/test_api.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name, pointless-string-statement +import json +import os +import sys + +import pytest +import six + +from ..api import guessit, properties, suggested_expected, GuessitException + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + + +def test_default(): + ret = guessit('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret + + +def test_forced_unicode(): + ret = guessit(u'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret and isinstance(ret['title'], six.text_type) + + +def test_forced_binary(): + ret = guessit(b'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret and isinstance(ret['title'], six.binary_type) + + +@pytest.mark.skipif(sys.version_info < (3, 4), reason="Path is not available") +def test_pathlike_object(): + try: + from pathlib import Path + + path = Path('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + ret = guessit(path) + assert ret and 'title' in ret + except ImportError: # pragma: no-cover + pass + + +def test_unicode_japanese(): + ret = guessit('[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi') + assert ret and 'title' in ret + + +def test_unicode_japanese_options(): + ret = guessit("[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": ["阿维达"]}) + assert ret and 'title' in ret and ret['title'] == "阿维达" + + +def test_forced_unicode_japanese_options(): + ret = guessit(u"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [u"阿维达"]}) + assert ret and 'title' in ret and ret['title'] == u"阿维达" + +# TODO: This doesn't compile on python 3, but should be tested on python 2. +""" +if six.PY2: + def test_forced_binary_japanese_options(): + ret = guessit(b"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [b"阿维达"]}) + assert ret and 'title' in ret and ret['title'] == b"阿维达" +""" + + +def test_properties(): + props = properties() + assert 'video_codec' in props.keys() + + +def test_exception(): + with pytest.raises(GuessitException) as excinfo: + guessit(object()) + assert "An internal error has occured in guessit" in str(excinfo.value) + assert "Guessit Exception Report" in str(excinfo.value) + assert "Please report at https://github.com/guessit-io/guessit/issues" in str(excinfo.value) + + +def test_suggested_expected(): + with open(os.path.join(__location__, 'suggested.json'), 'r') as f: + content = json.load(f) + actual = suggested_expected(content['titles']) + assert actual == content['suggested'] diff --git a/lib/guessit/test/test_api_unicode_literals.py b/lib/guessit/test/test_api_unicode_literals.py new file mode 100644 index 00000000..826f7cd1 --- /dev/null +++ b/lib/guessit/test/test_api_unicode_literals.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name, pointless-string-statement + + +from __future__ import unicode_literals + +import os + +import pytest +import six + +from ..api import guessit, properties, GuessitException + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + + +def test_default(): + ret = guessit('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret + + +def test_forced_unicode(): + ret = guessit(u'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret and isinstance(ret['title'], six.text_type) + + +def test_forced_binary(): + ret = guessit(b'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + assert ret and 'title' in ret and isinstance(ret['title'], six.binary_type) + + +def test_unicode_japanese(): + ret = guessit('[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi') + assert ret and 'title' in ret + + +def test_unicode_japanese_options(): + ret = guessit("[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": ["阿维达"]}) + assert ret and 'title' in ret and ret['title'] == "阿维达" + + +def test_forced_unicode_japanese_options(): + ret = guessit(u"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [u"阿维达"]}) + assert ret and 'title' in ret and ret['title'] == u"阿维达" + +# TODO: This doesn't compile on python 3, but should be tested on python 2. +""" +if six.PY2: + def test_forced_binary_japanese_options(): + ret = guessit(b"[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi", options={"expected_title": [b"阿维达"]}) + assert ret and 'title' in ret and ret['title'] == b"阿维达" +""" + + +def test_ensure_standard_string_class(): + class CustomStr(str): + pass + + ret = guessit(CustomStr('1080p'), options={'advanced': True}) + assert ret and 'screen_size' in ret and not isinstance(ret['screen_size'].input_string, CustomStr) + + +def test_properties(): + props = properties() + assert 'video_codec' in props.keys() + + +def test_exception(): + with pytest.raises(GuessitException) as excinfo: + guessit(object()) + assert "An internal error has occured in guessit" in str(excinfo.value) + assert "Guessit Exception Report" in str(excinfo.value) + assert "Please report at https://github.com/guessit-io/guessit/issues" in str(excinfo.value) diff --git a/lib/guessit/test/test_benchmark.py b/lib/guessit/test/test_benchmark.py new file mode 100644 index 00000000..34386e30 --- /dev/null +++ b/lib/guessit/test/test_benchmark.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use,pointless-statement,missing-docstring,invalid-name,line-too-long +import time + +import pytest + +from ..api import guessit + + +def case1(): + return guessit('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv') + + +def case2(): + return guessit('Movies/Fantastic Mr Fox/Fantastic.Mr.Fox.2009.DVDRip.{x264+LC-AAC.5.1}{Fr-Eng}{Sub.Fr-Eng}-™.[sharethefiles.com].mkv') + + +def case3(): + return guessit('Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi') + + +def case4(): + return guessit('Movies/The Doors (1991)/09.03.08.The.Doors.(1991).BDRip.720p.AC3.X264-HiS@SiLUHD-English.[sharethefiles.com].mkv') + + +@pytest.mark.benchmark( + group="Performance Tests", + min_time=1, + max_time=2, + min_rounds=5, + timer=time.time, + disable_gc=True, + warmup=False +) +@pytest.mark.skipif(True, reason="Disabled") +class TestBenchmark(object): + def test_case1(self, benchmark): + ret = benchmark(case1) + assert ret + + def test_case2(self, benchmark): + ret = benchmark(case2) + assert ret + + def test_case3(self, benchmark): + ret = benchmark(case3) + assert ret + + def test_case4(self, benchmark): + ret = benchmark(case4) + assert ret diff --git a/lib/guessit/test/test_main.py b/lib/guessit/test/test_main.py new file mode 100644 index 00000000..cbdba7aa --- /dev/null +++ b/lib/guessit/test/test_main.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name + +import os + +import pytest + +from ..__main__ import main + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + + +def test_main_no_args(): + main([]) + + +def test_main(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv']) + + +def test_main_unicode(): + main(['[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi']) + + +def test_main_forced_unicode(): + main([u'Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv']) + + +def test_main_verbose(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '--verbose']) + + +def test_main_yaml(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '--yaml']) + + +def test_main_json(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '--json']) + + +def test_main_show_property(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '-P', 'title']) + + +def test_main_advanced(): + main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '-a']) + + +def test_main_input(): + main(['--input', os.path.join(__location__, 'test-input-file.txt')]) + + +def test_main_properties(): + main(['-p']) + main(['-p', '--json']) + main(['-p', '--yaml']) + + +def test_main_values(): + main(['-V']) + main(['-V', '--json']) + main(['-V', '--yaml']) + + +def test_main_help(): + with pytest.raises(SystemExit): + main(['--help']) + + +def test_main_version(): + main(['--version']) diff --git a/lib/guessit/test/test_options.py b/lib/guessit/test/test_options.py new file mode 100644 index 00000000..4f019b34 --- /dev/null +++ b/lib/guessit/test/test_options.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name, pointless-string-statement +import os + +import pytest + +from ..options import get_options_file_locations, merge_options, load_config_file, ConfigurationException, \ + load_config + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + + +def test_config_locations(): + homedir = '/root' + cwd = '/root/cwd' + + locations = get_options_file_locations(homedir, cwd, True) + assert len(locations) == 9 + + assert '/root/.guessit/options.json' in locations + assert '/root/.guessit/options.yml' in locations + assert '/root/.guessit/options.yaml' in locations + assert '/root/.config/guessit/options.json' in locations + assert '/root/.config/guessit/options.yml' in locations + assert '/root/.config/guessit/options.yaml' in locations + assert '/root/cwd/guessit.options.json' in locations + assert '/root/cwd/guessit.options.yml' in locations + assert '/root/cwd/guessit.options.yaml' in locations + + +def test_merge_configurations(): + c1 = {'param1': True, 'param2': True, 'param3': False} + c2 = {'param1': False, 'param2': True, 'param3': False} + c3 = {'param1': False, 'param2': True, 'param3': False} + + merged = merge_options(c1, c2, c3) + assert not merged['param1'] + assert merged['param2'] + assert not merged['param3'] + + merged = merge_options(c3, c2, c1) + assert merged['param1'] + assert merged['param2'] + assert not merged['param3'] + + +def test_merge_configurations_lists(): + c1 = {'param1': [1], 'param2': True, 'param3': False} + c2 = {'param1': [2], 'param2': True, 'param3': False} + c3 = {'param1': [3], 'param2': True, 'param3': False} + + merged = merge_options(c1, c2, c3) + assert merged['param1'] == [1, 2, 3] + assert merged['param2'] + assert not merged['param3'] + + merged = merge_options(c3, c2, c1) + assert merged['param1'] == [3, 2, 1] + assert merged['param2'] + assert not merged['param3'] + + +def test_merge_configurations_deep(): + c1 = {'param1': [1], 'param2': {'d1': [1]}, 'param3': False} + c2 = {'param1': [2], 'param2': {'d1': [2]}, 'param3': False} + c3 = {'param1': [3], 'param2': {'d3': [3]}, 'param3': False} + + merged = merge_options(c1, c2, c3) + assert merged['param1'] == [1, 2, 3] + assert merged['param2']['d1'] == [1, 2] + assert merged['param2']['d3'] == [3] + assert 'd2' not in merged['param2'] + assert not merged['param3'] + + merged = merge_options(c3, c2, c1) + assert merged['param1'] == [3, 2, 1] + assert merged['param2'] + assert merged['param2']['d1'] == [2, 1] + assert 'd2' not in merged['param2'] + assert merged['param2']['d3'] == [3] + assert not merged['param3'] + + +def test_merge_configurations_pristine_all(): + c1 = {'param1': [1], 'param2': True, 'param3': False} + c2 = {'param1': [2], 'param2': True, 'param3': False, 'pristine': True} + c3 = {'param1': [3], 'param2': True, 'param3': False} + + merged = merge_options(c1, c2, c3) + assert merged['param1'] == [2, 3] + assert merged['param2'] + assert not merged['param3'] + + merged = merge_options(c3, c2, c1) + assert merged['param1'] == [2, 1] + assert merged['param2'] + assert not merged['param3'] + + +def test_merge_configurations_pristine_properties(): + c1 = {'param1': [1], 'param2': False, 'param3': True} + c2 = {'param1': [2], 'param2': True, 'param3': False, 'pristine': ['param2', 'param3']} + c3 = {'param1': [3], 'param2': True, 'param3': False} + + merged = merge_options(c1, c2, c3) + assert merged['param1'] == [1, 2, 3] + assert merged['param2'] + assert not merged['param3'] + + +def test_merge_configurations_pristine_properties_deep(): + c1 = {'param1': [1], 'param2': {'d1': False}, 'param3': True} + c2 = {'param1': [2], 'param2': {'d1': True}, 'param3': False, 'pristine': ['param2', 'param3']} + c3 = {'param1': [3], 'param2': {'d1': True}, 'param3': False} + + merged = merge_options(c1, c2, c3) + assert merged['param1'] == [1, 2, 3] + assert merged['param2'] + assert not merged['param3'] + + +def test_merge_configurations_pristine_properties2(): + c1 = {'param1': [1], 'param2': False, 'param3': True} + c2 = {'param1': [2], 'param2': True, 'param3': False, 'pristine': ['param1', 'param2', 'param3']} + c3 = {'param1': [3], 'param2': True, 'param3': False} + + merged = merge_options(c1, c2, c3) + assert merged['param1'] == [2, 3] + assert merged['param2'] + assert not merged['param3'] + + +def test_load_config_file(): + json_config = load_config_file(os.path.join(__location__, 'config', 'test.json')) + yml_config = load_config_file(os.path.join(__location__, 'config', 'test.yml')) + yaml_config = load_config_file(os.path.join(__location__, 'config', 'test.yaml')) + + assert json_config['expected_title'] == ['The 100', 'OSS 117'] + assert yml_config['expected_title'] == ['The 100', 'OSS 117'] + assert yaml_config['expected_title'] == ['The 100', 'OSS 117'] + + assert json_config['yaml'] is False + assert yml_config['yaml'] is True + assert yaml_config['yaml'] is True + + with pytest.raises(ConfigurationException) as excinfo: + load_config_file(os.path.join(__location__, 'config', 'dummy.txt')) + + assert excinfo.match('Configuration file extension is not supported for ".*?dummy.txt" file\\.') + + +def test_load_config(): + config = load_config({'no_default_config': True, 'param1': 'test', + 'config': [os.path.join(__location__, 'config', 'test.yml')]}) + + assert not config.get('param1') + + assert config.get('advanced_config') # advanced_config is still loaded from default + assert config['expected_title'] == ['The 100', 'OSS 117'] + assert config['yaml'] is True + + config = load_config({'no_default_config': True, 'param1': 'test'}) + + assert not config.get('param1') + + assert 'expected_title' not in config + assert 'yaml' not in config + + config = load_config({'no_default_config': True, 'param1': 'test', 'config': ['false']}) + + assert not config.get('param1') + + assert 'expected_title' not in config + assert 'yaml' not in config diff --git a/lib/guessit/test/test_yml.py b/lib/guessit/test/test_yml.py new file mode 100644 index 00000000..040796de --- /dev/null +++ b/lib/guessit/test/test_yml.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name +import logging +import os +# io.open supports encoding= in python 2.7 +from io import open # pylint: disable=redefined-builtin + +import babelfish +import six # pylint:disable=wrong-import-order +import yaml # pylint:disable=wrong-import-order +from rebulk.remodule import re +from rebulk.utils import is_iterable + +from .. import guessit +from ..options import parse_options +from ..yamlutils import OrderedDictYAMLLoader + +logger = logging.getLogger(__name__) + +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + + +class EntryResult(object): + def __init__(self, string, negates=False): + self.string = string + self.negates = negates + self.valid = [] + self.missing = [] + self.different = [] + self.extra = [] + self.others = [] + + @property + def ok(self): + if self.negates: + return self.missing or self.different + return not self.missing and not self.different and not self.extra and not self.others + + @property + def warning(self): + if self.negates: + return False + return not self.missing and not self.different and self.extra + + @property + def error(self): + if self.negates: + return not self.missing and not self.different and not self.others + return self.missing or self.different or self.others + + def __repr__(self): + if self.ok: + return self.string + ': OK!' + if self.warning: + return '%s%s: WARNING! (valid=%i, extra=%i)' % ('-' if self.negates else '', self.string, len(self.valid), + len(self.extra)) + if self.error: + return '%s%s: ERROR! (valid=%i, missing=%i, different=%i, extra=%i, others=%i)' % \ + ('-' if self.negates else '', self.string, len(self.valid), len(self.missing), len(self.different), + len(self.extra), len(self.others)) + + return '%s%s: UNKOWN! (valid=%i, missing=%i, different=%i, extra=%i, others=%i)' % \ + ('-' if self.negates else '', self.string, len(self.valid), len(self.missing), len(self.different), + len(self.extra), len(self.others)) + + @property + def details(self): + ret = [] + if self.valid: + ret.append('valid=' + str(len(self.valid))) + for valid in self.valid: + ret.append(' ' * 4 + str(valid)) + if self.missing: + ret.append('missing=' + str(len(self.missing))) + for missing in self.missing: + ret.append(' ' * 4 + str(missing)) + if self.different: + ret.append('different=' + str(len(self.different))) + for different in self.different: + ret.append(' ' * 4 + str(different)) + if self.extra: + ret.append('extra=' + str(len(self.extra))) + for extra in self.extra: + ret.append(' ' * 4 + str(extra)) + if self.others: + ret.append('others=' + str(len(self.others))) + for other in self.others: + ret.append(' ' * 4 + str(other)) + return ret + + +class Results(list): + def assert_ok(self): + errors = [entry for entry in self if entry.error] + assert not errors + + +def files_and_ids(predicate=None): + files = [] + ids = [] + + for (dirpath, _, filenames) in os.walk(__location__): + if os.path.split(dirpath)[-1] == 'config': + continue + if dirpath == __location__: + dirpath_rel = '' + else: + dirpath_rel = os.path.relpath(dirpath, __location__) + for filename in filenames: + name, ext = os.path.splitext(filename) + filepath = os.path.join(dirpath_rel, filename) + if ext == '.yml' and (not predicate or predicate(filepath)): + files.append(filepath) + ids.append(os.path.join(dirpath_rel, name)) + + return files, ids + + +class TestYml(object): + """ + Run tests from yaml files. + Multiple input strings having same expected results can be chained. + Use $ marker to check inputs that should not match results. + """ + + options_re = re.compile(r'^([ +-]+)(.*)') + + def _get_unique_id(self, collection, base_id): + ret = base_id + i = 2 + while ret in collection: + suffix = "-" + str(i) + ret = base_id + suffix + i += 1 + return ret + + def pytest_generate_tests(self, metafunc): + if 'yml_test_case' in metafunc.fixturenames: + entries = [] + entry_ids = [] + entry_set = set() + + for filename, _ in zip(*files_and_ids()): + with open(os.path.join(__location__, filename), 'r', encoding='utf-8') as infile: + data = yaml.load(infile, OrderedDictYAMLLoader) + + last_expected = None + for string, expected in reversed(list(data.items())): + if expected is None: + data[string] = last_expected + else: + last_expected = expected + + default = None + try: + default = data['__default__'] + del data['__default__'] + except KeyError: + pass + + for string, expected in data.items(): + TestYml.set_default(expected, default) + string = TestYml.fix_encoding(string, expected) + + entries.append((filename, string, expected)) + unique_id = self._get_unique_id(entry_set, '[' + filename + '] ' + str(string)) + entry_set.add(unique_id) + entry_ids.append(unique_id) + + metafunc.parametrize('yml_test_case', entries, ids=entry_ids) + + @staticmethod + def set_default(expected, default): + if default: + for k, v in default.items(): + if k not in expected: + expected[k] = v + + @classmethod + def fix_encoding(cls, string, expected): + if six.PY2: + if isinstance(string, six.text_type): + string = string.encode('utf-8') + converts = [] + for k, v in expected.items(): + if isinstance(v, six.text_type): + v = v.encode('utf-8') + converts.append((k, v)) + for k, v in converts: + expected[k] = v + if not isinstance(string, str): + string = str(string) + return string + + def test_entry(self, yml_test_case): + filename, string, expected = yml_test_case + result = self.check_data(filename, string, expected) + assert not result.error + + def check_data(self, filename, string, expected): + entry = self.check(string, expected) + if entry.ok: + logger.debug('[%s] %s', filename, entry) + elif entry.warning: + logger.warning('[%s] %s', filename, entry) + elif entry.error: + logger.error('[%s] %s', filename, entry) + for line in entry.details: + logger.error('[%s] %s', filename, ' ' * 4 + line) + return entry + + def check(self, string, expected): + negates, global_, string = self.parse_token_options(string) + + options = expected.get('options') + if options is None: + options = {} + if not isinstance(options, dict): + options = parse_options(options) + try: + result = guessit(string, options) + except Exception as exc: + logger.error('[%s] Exception: %s', string, exc) + raise exc + + entry = EntryResult(string, negates) + + if global_: + self.check_global(string, result, entry) + + self.check_expected(result, expected, entry) + + return entry + + def parse_token_options(self, string): + matches = self.options_re.search(string) + negates = False + global_ = False + if matches: + string = matches.group(2) + for opt in matches.group(1): + if '-' in opt: + negates = True + if '+' in opt: + global_ = True + return negates, global_, string + + def check_global(self, string, result, entry): + global_span = [] + for result_matches in result.matches.values(): + for result_match in result_matches: + if not global_span: + global_span = list(result_match.span) + else: + if global_span[0] > result_match.span[0]: + global_span[0] = result_match.span[0] + if global_span[1] < result_match.span[1]: + global_span[1] = result_match.span[1] + if global_span and global_span[1] - global_span[0] < len(string): + entry.others.append("Match is not global") + + def is_same(self, value, expected): + values = set(value) if is_iterable(value) else set((value,)) + expecteds = set(expected) if is_iterable(expected) else set((expected,)) + if len(values) != len(expecteds): + return False + if isinstance(next(iter(values)), babelfish.Language): + # pylint: disable=no-member + expecteds = {babelfish.Language.fromguessit(expected) for expected in expecteds} + elif isinstance(next(iter(values)), babelfish.Country): + # pylint: disable=no-member + expecteds = {babelfish.Country.fromguessit(expected) for expected in expecteds} + return values == expecteds + + def check_expected(self, result, expected, entry): + if expected: + for expected_key, expected_value in expected.items(): + if expected_key and expected_key != 'options' and expected_value is not None: + negates_key, _, result_key = self.parse_token_options(expected_key) + if result_key in result.keys(): + if not self.is_same(result[result_key], expected_value): + if negates_key: + entry.valid.append((expected_key, expected_value)) + else: + entry.different.append((expected_key, expected_value, result[result_key])) + else: + if negates_key: + entry.different.append((expected_key, expected_value, result[result_key])) + else: + entry.valid.append((expected_key, expected_value)) + elif not negates_key: + entry.missing.append((expected_key, expected_value)) + + for result_key, result_value in result.items(): + if result_key not in expected.keys(): + entry.extra.append((result_key, result_value)) diff --git a/lib/guessit/test/various.yml b/lib/guessit/test/various.yml new file mode 100644 index 00000000..6fb58deb --- /dev/null +++ b/lib/guessit/test/various.yml @@ -0,0 +1,1199 @@ +? Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv +: type: movie + title: Fear and Loathing in Las Vegas + year: 1998 + screen_size: 720p + source: HD-DVD + audio_codec: DTS + video_codec: H.264 + release_group: ESiR + +? Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi +: type: episode + title: Duckman + season: 1 + episode: 1 + episode_title: I, Duckman + date: 2002-11-07 + +? Series/Neverwhere/Neverwhere.05.Down.Street.[tvu.org.ru].avi +: type: episode + title: Neverwhere + episode: 5 + episode_title: Down Street + website: tvu.org.ru + +? Neverwhere.05.Down.Street.[tvu.org.ru].avi +: type: episode + title: Neverwhere + episode: 5 + episode_title: Down Street + website: tvu.org.ru + +? Series/Breaking Bad/Minisodes/Breaking.Bad.(Minisodes).01.Good.Cop.Bad.Cop.WEBRip.XviD.avi +: type: episode + title: Breaking Bad + episode_format: Minisode + episode: 1 + episode_title: Good Cop Bad Cop + source: Web + other: Rip + video_codec: Xvid + +? Series/Kaamelott/Kaamelott - Livre V - Ep 23 - Le Forfait.avi +: type: episode + title: Kaamelott + episode: 23 + episode_title: Le Forfait + +? Movies/The Doors (1991)/09.03.08.The.Doors.(1991).BDRip.720p.AC3.X264-HiS@SiLUHD-English.[sharethefiles.com].mkv +: type: movie + title: The Doors + year: 1991 + date: 2008-03-09 + source: Blu-ray + screen_size: 720p + audio_codec: Dolby Digital + video_codec: H.264 + release_group: HiS@SiLUHD + language: english + website: sharethefiles.com + +? Movies/M.A.S.H. (1970)/MASH.(1970).[Divx.5.02][Dual-Subtitulos][DVDRip].ogm +: type: movie + title: MASH + year: 1970 + video_codec: DivX + source: DVD + other: [Dual Audio, Rip] + +? the.mentalist.501.hdtv-lol.mp4 +: type: episode + title: the mentalist + season: 5 + episode: 1 + source: HDTV + release_group: lol + +? the.simpsons.2401.hdtv-lol.mp4 +: type: episode + title: the simpsons + season: 24 + episode: 1 + source: HDTV + release_group: lol + +? Homeland.S02E01.HDTV.x264-EVOLVE.mp4 +: type: episode + title: Homeland + season: 2 + episode: 1 + source: HDTV + video_codec: H.264 + release_group: EVOLVE + +? /media/Band_of_Brothers-e01-Currahee.mkv +: type: episode + title: Band of Brothers + episode: 1 + episode_title: Currahee + +? /media/Band_of_Brothers-x02-We_Stand_Alone_Together.mkv +: type: episode + title: Band of Brothers + bonus: 2 + bonus_title: We Stand Alone Together + +? /movies/James_Bond-f21-Casino_Royale-x02-Stunts.mkv +: type: movie + title: Casino Royale + film_title: James Bond + film: 21 + bonus: 2 + bonus_title: Stunts + +? /TV Shows/new.girl.117.hdtv-lol.mp4 +: type: episode + title: new girl + season: 1 + episode: 17 + source: HDTV + release_group: lol + +? The.Office.(US).1x03.Health.Care.HDTV.XviD-LOL.avi +: type: episode + title: The Office + country: US + season: 1 + episode: 3 + episode_title: Health Care + source: HDTV + video_codec: Xvid + release_group: LOL + +? The_Insider-(1999)-x02-60_Minutes_Interview-1996.mp4 +: type: movie + title: The Insider + year: 1999 + bonus: 2 + bonus_title: 60 Minutes Interview-1996 + +? OSS_117--Cairo,_Nest_of_Spies.mkv +: type: movie + title: OSS 117 + alternative_title: Cairo, Nest of Spies + +? Rush.._Beyond_The_Lighted_Stage-x09-Between_Sun_and_Moon-2002_Hartford.mkv +: type: movie + title: Rush Beyond The Lighted Stage + bonus: 9 + bonus_title: Between Sun and Moon + year: 2002 + +? House.Hunters.International.S56E06.720p.hdtv.x264.mp4 +: type: episode + title: House Hunters International + season: 56 + episode: 6 + screen_size: 720p + source: HDTV + video_codec: H.264 + +? White.House.Down.2013.1080p.BluRay.DTS-HD.MA.5.1.x264-PublicHD.mkv +: type: movie + title: White House Down + year: 2013 + screen_size: 1080p + source: Blu-ray + audio_codec: DTS-HD + audio_profile: Master Audio + video_codec: H.264 + release_group: PublicHD + audio_channels: "5.1" + +? White.House.Down.2013.1080p.BluRay.DTSHD.MA.5.1.x264-PublicHD.mkv +: type: movie + title: White House Down + year: 2013 + screen_size: 1080p + source: Blu-ray + audio_codec: DTS-HD + audio_profile: Master Audio + video_codec: H.264 + release_group: PublicHD + audio_channels: "5.1" + +? Hostages.S01E01.Pilot.for.Air.720p.WEB-DL.DD5.1.H.264-NTb.nfo +: type: episode + title: Hostages + episode_title: Pilot for Air + season: 1 + episode: 1 + screen_size: 720p + source: Web + audio_channels: "5.1" + video_codec: H.264 + audio_codec: Dolby Digital + release_group: NTb + +? Despicable.Me.2.2013.1080p.BluRay.x264-VeDeTT.nfo +: type: movie + title: Despicable Me 2 + year: 2013 + screen_size: 1080p + source: Blu-ray + video_codec: H.264 + release_group: VeDeTT + +? Le Cinquieme Commando 1971 SUBFORCED FRENCH DVDRiP XViD AC3 Bandix.mkv +: type: movie + audio_codec: Dolby Digital + source: DVD + other: Rip + release_group: Bandix + subtitle_language: French + title: Le Cinquieme Commando + video_codec: Xvid + year: 1971 + +? Le Seigneur des Anneaux - La Communauté de l'Anneau - Version Longue - BDRip.mkv +: type: movie + title: Le Seigneur des Anneaux + source: Blu-ray + other: Rip + +? La petite bande (Michel Deville - 1983) VF PAL MP4 x264 AAC.mkv +: type: movie + audio_codec: AAC + language: French + title: La petite bande + video_codec: H.264 + year: 1983 + other: PAL + +? Retour de Flammes (Gregor Schnitzler 2003) FULL DVD.iso +: type: movie + source: DVD + title: Retour de Flammes + type: movie + year: 2003 + +? A.Common.Title.Special.2014.avi +: type: movie + year: 2014 + title: A Common Title Special + +? A.Common.Title.2014.Special.avi +: type: episode + year: 2014 + title: A Common Title + episode_title: Special + episode_details: Special + +? A.Common.Title.2014.Special.Edition.avi +: type: movie + year: 2014 + title: A Common Title + edition: Special + +? Downton.Abbey.2013.Christmas.Special.HDTV.x264-FoV.mp4 +: type: episode + year: 2013 + title: Downton Abbey + episode_title: Christmas Special + video_codec: H.264 + release_group: FoV + source: HDTV + episode_details: Special + +? Doctor_Who_2013_Christmas_Special.The_Time_of_The_Doctor.HD +: type: episode + title: Doctor Who + other: HD + episode_details: Special + episode_title: Christmas Special The Time of The Doctor + year: 2013 + +? Doctor Who 2005 50th Anniversary Special The Day of the Doctor 3.avi +: type: episode + title: Doctor Who + episode_details: Special + episode_title: 50th Anniversary Special The Day of the Doctor 3 + year: 2005 + +? Robot Chicken S06-Born Again Virgin Christmas Special HDTV x264.avi +: type: episode + title: Robot Chicken + source: HDTV + season: 6 + episode_title: Born Again Virgin Christmas Special + video_codec: H.264 + episode_details: Special + +? Wicked.Tuna.S03E00.Head.To.Tail.Special.HDTV.x264-YesTV +: type: episode + title: Wicked Tuna + episode_title: Head To Tail Special + release_group: YesTV + season: 3 + episode: 0 + video_codec: H.264 + source: HDTV + episode_details: Special + +? The.Voice.UK.S03E12.HDTV.x264-C4TV +: episode: 12 + video_codec: H.264 + source: HDTV + title: The Voice + release_group: C4TV + season: 3 + country: United Kingdom + type: episode + +? /tmp/star.trek.9/star.trek.9.mkv +: type: movie + title: star trek 9 + +? star.trek.9.mkv +: type: movie + title: star trek 9 + +? FlexGet.S01E02.TheName.HDTV.xvid +: episode: 2 + source: HDTV + season: 1 + title: FlexGet + episode_title: TheName + type: episode + video_codec: Xvid + +? FlexGet.S01E02.TheName.HDTV.xvid +: episode: 2 + source: HDTV + season: 1 + title: FlexGet + episode_title: TheName + type: episode + video_codec: Xvid + +? some.series.S03E14.Title.Here.720p +: episode: 14 + screen_size: 720p + season: 3 + title: some series + episode_title: Title Here + type: episode + +? '[the.group] Some.Series.S03E15.Title.Two.720p' +: episode: 15 + release_group: the.group + screen_size: 720p + season: 3 + title: Some Series + episode_title: Title Two + type: episode + +? 'HD 720p: Some series.S03E16.Title.Three' +: episode: 16 + other: HD + screen_size: 720p + season: 3 + title: Some series + episode_title: Title Three + type: episode + +? Something.Season.2.1of4.Ep.Title.HDTV.torrent +: episode_count: 4 + episode: 1 + source: HDTV + season: 2 + title: Something + episode_title: Title + type: episode + container: torrent + +? Show-A (US) - Episode Title S02E09 hdtv +: country: US + episode: 9 + source: HDTV + season: 2 + title: Show-A + type: episode + +? Jack's.Show.S03E01.blah.1080p +: episode: 1 + screen_size: 1080p + season: 3 + title: Jack's Show + episode_title: blah + type: episode + +? FlexGet.epic +: title: FlexGet epic + type: movie + +? FlexGet.Apt.1 +: title: FlexGet Apt 1 + type: movie + +? FlexGet.aptitude +: title: FlexGet aptitude + type: movie + +? FlexGet.Step1 +: title: FlexGet Step1 + type: movie + +? Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720 * 432].avi +: source: DVD + other: Rip + screen_size: 720x432 + title: El Bosque Animado + video_codec: Xvid + year: 1987 + type: movie + +? Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720x432].avi +: source: DVD + other: Rip + screen_size: 720x432 + title: El Bosque Animado + video_codec: Xvid + year: 1987 + type: movie + +? 2009.shoot.fruit.chan.multi.dvd9.pal +: source: DVD + language: mul + other: PAL + title: shoot fruit chan + type: movie + year: 2009 + +? 2009.shoot.fruit.chan.multi.dvd5.pal +: source: DVD + language: mul + other: PAL + title: shoot fruit chan + type: movie + year: 2009 + +? The.Flash.2014.S01E01.PREAIR.WEBRip.XviD-EVO.avi +: episode: 1 + source: Web + other: [Preair, Rip] + release_group: EVO + season: 1 + title: The Flash + type: episode + video_codec: Xvid + year: 2014 + +? Ice.Lake.Rebels.S01E06.Ice.Lake.Games.720p.HDTV.x264-DHD +: episode: 6 + source: HDTV + release_group: DHD + screen_size: 720p + season: 1 + title: Ice Lake Rebels + episode_title: Ice Lake Games + type: episode + video_codec: H.264 + +? The League - S06E10 - Epi Sexy.mkv +: episode: 10 + season: 6 + title: The League + episode_title: Epi Sexy + type: episode + +? Stay (2005) [1080p]/Stay.2005.1080p.BluRay.x264.YIFY.mp4 +: source: Blu-ray + release_group: YIFY + screen_size: 1080p + title: Stay + type: movie + video_codec: H.264 + year: 2005 + +? /media/live/A/Anger.Management.S02E82.720p.HDTV.X264-DIMENSION.mkv +: source: HDTV + release_group: DIMENSION + screen_size: 720p + title: Anger Management + type: episode + season: 2 + episode: 82 + video_codec: H.264 + +? "[Figmentos] Monster 34 - At the End of Darkness [781219F1].mkv" +: type: episode + release_group: Figmentos + title: Monster + episode: 34 + episode_title: At the End of Darkness + crc32: 781219F1 + +? Game.of.Thrones.S05E07.720p.HDTV-KILLERS.mkv +: type: episode + episode: 7 + source: HDTV + release_group: KILLERS + screen_size: 720p + season: 5 + title: Game of Thrones + +? Game.of.Thrones.S05E07.HDTV.720p-KILLERS.mkv +: type: episode + episode: 7 + source: HDTV + release_group: KILLERS + screen_size: 720p + season: 5 + title: Game of Thrones + +? Parks and Recreation - [04x12] - Ad Campaign.avi +: type: episode + title: Parks and Recreation + season: 4 + episode: 12 + episode_title: Ad Campaign + +? Star Trek Into Darkness (2013)/star.trek.into.darkness.2013.720p.web-dl.h264-publichd.mkv +: type: movie + title: Star Trek Into Darkness + year: 2013 + screen_size: 720p + source: Web + video_codec: H.264 + release_group: publichd + +? /var/medias/series/The Originals/Season 02/The.Originals.S02E15.720p.HDTV.X264-DIMENSION.mkv +: type: episode + title: The Originals + season: 2 + episode: 15 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: DIMENSION + +? Test.S01E01E07-FooBar-Group.avi +: container: avi + episode: + - 1 + - 7 + episode_title: FooBar-Group # Make sure it doesn't conflict with uuid + season: 1 + title: Test + type: episode + +? TEST.S01E02.2160p.NF.WEBRip.x264.DD5.1-ABC +: audio_channels: '5.1' + audio_codec: Dolby Digital + episode: 2 + source: Web + other: Rip + release_group: ABC + screen_size: 2160p + season: 1 + streaming_service: Netflix + title: TEST + type: episode + video_codec: H.264 + +? TEST.2015.12.30.720p.WEBRip.h264-ABC +: date: 2015-12-30 + source: Web + other: Rip + release_group: ABC + screen_size: 720p + title: TEST + type: episode + video_codec: H.264 + +? TEST.S01E10.24.1080p.NF.WEBRip.AAC2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 10 + episode_title: '24' + source: Web + other: Rip + release_group: ABC + screen_size: 1080p + season: 1 + streaming_service: Netflix + title: TEST + type: episode + video_codec: H.264 + +? TEST.S01E10.24.1080p.NF.WEBRip.AAC2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 10 + episode_title: '24' + source: Web + other: Rip + release_group: ABC + screen_size: 1080p + season: 1 + streaming_service: Netflix + title: TEST + type: episode + video_codec: H.264 + +? TEST.S01E10.24.1080p.NF.WEBRip.AAC.2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 10 + episode_title: '24' + source: Web + other: Rip + release_group: ABC + screen_size: 1080p + season: 1 + streaming_service: Netflix + title: TEST + type: episode + video_codec: H.264 + +? TEST.S05E02.720p.iP.WEBRip.AAC2.0.H264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 2 + source: Web + other: Rip + release_group: ABC + screen_size: 720p + season: 5 + title: TEST + type: episode + video_codec: H.264 + +? TEST.S03E07.720p.WEBRip.AAC2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 7 + source: Web + other: Rip + release_group: ABC + screen_size: 720p + season: 3 + title: TEST + type: episode + video_codec: H.264 + +? TEST.S15E15.24.1080p.FREE.WEBRip.AAC2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 15 + episode_title: '24' + source: Web + other: Rip + release_group: ABC + screen_size: 1080p + season: 15 + title: TEST + type: episode + video_codec: H.264 + +? TEST.S11E11.24.720p.ETV.WEBRip.AAC2.0.x264-ABC +: audio_channels: '2.0' + audio_codec: AAC + episode: 11 + episode_title: '24' + source: Web + other: Rip + release_group: ABC + screen_size: 720p + season: 11 + title: TEST + type: episode + video_codec: H.264 + +? TEST.2015.1080p.HC.WEBRip.x264.AAC2.0-ABC +: audio_channels: '2.0' + audio_codec: AAC + source: Web + other: Rip + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + video_codec: H.264 + year: 2015 + +? TEST.2015.1080p.3D.BluRay.Half-SBS.x264.DTS-HD.MA.7.1-ABC +: audio_channels: '7.1' + audio_codec: DTS-HD + audio_profile: Master Audio + source: Blu-ray + other: 3D + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + video_codec: H.264 + year: 2015 + +? TEST.2015.1080p.3D.BluRay.Half-OU.x264.DTS-HD.MA.7.1-ABC +: audio_channels: '7.1' + audio_codec: DTS-HD + audio_profile: Master Audio + source: Blu-ray + other: 3D + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + video_codec: H.264 + year: 2015 + +? TEST.2015.1080p.3D.BluRay.Half-OU.x264.DTS-HD.MA.TrueHD.7.1.Atmos-ABC +: audio_channels: '7.1' + audio_codec: + - DTS-HD + - Dolby TrueHD + - Dolby Atmos + audio_profile: Master Audio + source: Blu-ray + other: 3D + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + video_codec: H.264 + year: 2015 + +? TEST.2015.1080p.3D.BluRay.Half-SBS.x264.DTS-HD.MA.TrueHD.7.1.Atmos-ABC +: audio_channels: '7.1' + audio_codec: + - DTS-HD + - Dolby TrueHD + - Dolby Atmos + audio_profile: Master Audio + source: Blu-ray + other: 3D + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + video_codec: H.264 + year: 2015 + +? TEST.2015.1080p.BluRay.REMUX.AVC.DTS-HD.MA.TrueHD.7.1.Atmos-ABC +: audio_channels: '7.1' + audio_codec: + - DTS-HD + - Dolby TrueHD + - Dolby Atmos + audio_profile: Master Audio + source: Blu-ray + other: Remux + release_group: ABC + screen_size: 1080p + title: TEST + type: movie + year: 2015 + +? Gangs of New York 2002 REMASTERED 1080p BluRay x264-AVCHD +: source: Blu-ray + edition: Remastered + screen_size: 1080p + title: Gangs of New York + type: movie + video_codec: H.264 + video_profile: Advanced Video Codec High Definition + year: 2002 + +? Peep.Show.S06E02.DVDrip.x264-faks86.mkv +: container: mkv + episode: 2 + source: DVD + other: Rip + release_group: faks86 + season: 6 + title: Peep Show + type: episode + video_codec: H.264 + +# Episode title is indeed 'October 8, 2014' +# https://thetvdb.com/?tab=episode&seriesid=82483&seasonid=569935&id=4997362&lid=7 +? The Soup - 11x41 - October 8, 2014.mp4 +: container: mp4 + episode: 41 + episode_title: October 8, 2014 + season: 11 + title: The Soup + type: episode + +? Red.Rock.S02E59.WEB-DLx264-JIVE +: episode: 59 + season: 2 + source: Web + release_group: JIVE + title: Red Rock + type: episode + video_codec: H.264 + +? Pawn.Stars.S12E31.Deals.On.Wheels.PDTVx264-JIVE +: episode: 31 + episode_title: Deals On Wheels + season: 12 + source: Digital TV + release_group: JIVE + title: Pawn Stars + type: episode + video_codec: H.264 + +? Duck.Dynasty.S09E09.Van.He-llsing.HDTVx264-JIVE +: episode: 9 + episode_title: Van He-llsing + season: 9 + source: HDTV + release_group: JIVE + title: Duck Dynasty + type: episode + video_codec: H.264 + +? ATKExotics.16.01.24.Ava.Alba.Watersports.XXX.1080p.MP4-KTR +: title: ATKExotics + episode_title: Ava Alba Watersports + other: XXX + screen_size: 1080p + container: mp4 + release_group: KTR + type: episode + +? PutaLocura.15.12.22.Spanish.Luzzy.XXX.720p.MP4-oRo +: title: PutaLocura + episode_title: Spanish Luzzy + other: XXX + screen_size: 720p + container: mp4 + release_group: oRo + type: episode + +? French Maid Services - Lola At Your Service WEB-DL SPLIT SCENES MP4-RARBG +: title: French Maid Services + alternative_title: Lola At Your Service + source: Web + container: mp4 + release_group: RARBG + type: movie + +? French Maid Services - Lola At Your Service - Marc Dorcel WEB-DL SPLIT SCENES MP4-RARBG +: title: French Maid Services + alternative_title: [Lola At Your Service, Marc Dorcel] + source: Web + container: mp4 + release_group: RARBG + type: movie + +? PlayboyPlus.com_16.01.23.Eleni.Corfiate.Playboy.Romania.XXX.iMAGESET-OHRLY +: episode_title: Eleni Corfiate Playboy Romania + other: XXX + type: episode + +? TeenPornoPass - Anna - Beautiful Ass Deep Penetrated 720p mp4 +: title: TeenPornoPass + alternative_title: + - Anna + - Beautiful Ass Deep Penetrated + screen_size: 720p + container: mp4 + type: movie + +? SexInJeans.Gina.Gerson.Super.Nasty.Asshole.Pounding.With.Gina.In.Jeans.A.Devil.In.Denim.The.Finest.Ass.Fuck.Frolicking.mp4 +: title: SexInJeans Gina Gerson Super Nasty Asshole Pounding With Gina In Jeans A Devil In Denim The Finest Ass Fuck Frolicking + container: mp4 + type: movie + +? TNA Impact Wrestling HDTV 2017-06-22 720p H264 AVCHD-SC-SDH +: title: TNA Impact Wrestling + source: HDTV + date: 2017-06-22 + screen_size: 720p + video_codec: H.264 + video_profile: + - Advanced Video Codec High Definition + - Scalable Video Coding + release_group: SDH + type: episode + +? Katy Perry - Pepsi & Billboard Summer Beats Concert Series 2012 1080i HDTV 20 Mbps DD2.0 MPEG2-TrollHD.ts +: title: Katy Perry + alternative_title: Pepsi & Billboard Summer Beats Concert Series + year: 2012 + screen_size: 1080i + source: HDTV + video_bit_rate: 20Mbps + audio_codec: Dolby Digital + audio_channels: '2.0' + video_codec: MPEG-2 + release_group: TrollHD + container: ts + +? Justin Timberlake - MTV Video Music Awards 2013 1080i 32 Mbps DTS-HD 5.1.ts +: title: Justin Timberlake + alternative_title: MTV Video Music Awards + year: 2013 + screen_size: 1080i + video_bit_rate: 32Mbps + audio_codec: DTS-HD + audio_channels: '5.1' + container: ts + type: movie + +? Chuck Berry The Very Best Of Chuck Berry(2010)[320 Kbps] +: title: Chuck Berry The Very Best Of Chuck Berry + year: 2010 + audio_bit_rate: 320Kbps + type: movie + +? Title Name [480p][1.5Mbps][.mp4] +: title: Title Name + screen_size: 480p + video_bit_rate: 1.5Mbps + container: mp4 + type: movie + +? This.is.Us +: options: --no-default-config + title: This is Us + type: movie + +? This.is.Us +: options: --excludes country + title: This is Us + type: movie + +? MotoGP.2016x03.USA.Race.BTSportHD.1080p25 +: title: MotoGP + season: 2016 + year: 2016 + episode: 3 + screen_size: 1080p + frame_rate: 25fps + type: episode + +? BBC.Earth.South.Pacific.2010.D2.1080p.24p.BD25.DTS-HD +: title: BBC Earth South Pacific + year: 2010 + screen_size: 1080p + frame_rate: 24fps + source: Blu-ray + audio_codec: DTS-HD + type: movie + +? Mr Robot - S03E01 - eps3 0 power-saver-mode h (1080p AMZN WEB-DL x265 HEVC 10bit EAC3 6.0 RCVR).mkv +: title: Mr Robot + season: 3 + episode: 1 + episode_title: eps3 0 power-saver-mode h + screen_size: 1080p + streaming_service: Amazon Prime + source: Web + video_codec: H.265 + video_profile: High Efficiency Video Coding + color_depth: 10-bit + audio_codec: Dolby Digital Plus + audio_channels: '5.1' + release_group: RCVR + container: mkv + type: episode + +? Panorama.15-05-2018.Web-DL.540p.H264.AAC.Subs.mp4 +: title: Panorama + date: 2018-05-15 + source: Web + screen_size: 540p + video_codec: H.264 + audio_codec: AAC + subtitle_language: und + container: mp4 + type: episode + +? Shaolin 2011.720p.BluRay.x264-x0r.mkv +: title: Shaolin + year: 2011 + screen_size: 720p + source: Blu-ray + video_codec: H.264 + release_group: x0r + container: mkv + type: movie + +? '[ Engineering Catastrophes S02E10 1080p AMZN WEB-DL DD+ 2.0 x264-TrollHD ]' +: title: Engineering Catastrophes + season: 2 + episode: 10 + screen_size: 1080p + streaming_service: Amazon Prime + source: Web + audio_codec: Dolby Digital Plus + audio_channels: '2.0' + video_codec: H.264 + release_group: TrollHD + type: episode + +? A Very Harold & Kumar 3D Christmas (2011).mkv +: title: A Very Harold & Kumar 3D Christmas + year: 2011 + container: mkv + type: movie + +? Cleveland.Hustles.S01E03.Downward.Dogs.and.Proper.Pigs.720p.HDTV.x264-W4F +: title: Cleveland Hustles + season: 1 + episode: 3 + episode_title: Downward Dogs and Proper Pigs + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: W4F + type: episode + +? Pawn.Stars.S12E20.The.Pawn.Awakens.REAL.READ.NFO.720p.HDTV.x264-DHD +: title: Pawn Stars + season: 12 + episode: 20 + episode_title: The Pawn Awakens + other: + - Proper + - Read NFO + proper_count: 2 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: DHD + type: episode + +? Pawn.Stars.S12E22.Racing.Revolution.REAL.720p.HDTV.x264-DHD +: title: Pawn Stars + season: 12 + episode: 22 + episode_title: Racing Revolution + other: Proper + proper_count: 2 + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: DHD + type: episode + +? Luksusfellen.S18E02.REAL.NORWEGiAN.720p.WEB.h264-NORPiLT +: title: Luksusfellen + season: 18 + episode: 2 + other: Proper + proper_count: 2 + language: Norwegian + screen_size: 720p + source: Web + video_codec: H.264 + release_group: NORPiLT + type: episode + +? The.Exorcist.S02E07.REAL.FRENCH.720p.HDTV.x264-SH0W +: title: The Exorcist + season: 2 + episode: 7 + other: Proper + proper_count: 2 + language: fr + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: SH0W + type: episode + +? Outrageous.Acts.of.Science.S05E02.Is.This.for.Real.720p.HDTV.x264-DHD +: title: Outrageous Acts of Science + season: 5 + episode: 2 +# corner case +# episode_title: Is This for Real + screen_size: 720p + source: HDTV + video_codec: H.264 + release_group: DHD + type: episode + +? How.the.Universe.Works.S06E08.Strange.Lives.of.Dwarf.Planets.REAL.720p.WEB.x264-DHD +: title: How the Universe Works + season: 6 + episode: 8 + episode_title: Strange Lives of Dwarf Planets + other: Proper + proper_count: 2 + screen_size: 720p + source: Web + video_codec: H.264 + release_group: DHD + type: episode + +? Vampirina.S01E16.REAL.HDTV.x264-W4F +: title: Vampirina + season: 1 + episode: 16 + other: Proper + proper_count: 2 + source: HDTV + video_codec: H.264 + release_group: W4F + type: episode + +? Test.S01E16.Some Real Episode Title.HDTV.x264-W4F +: title: Test + season: 1 + episode: 16 + episode_title: Some Real Episode Title + source: HDTV + video_codec: H.264 + release_group: W4F + type: episode + +? NOS4A2.S01E01.The.Shorter.Way.REPACK.720p.AMZN.WEB-DL.DDP5.1.H.264-NTG.mkv +: title: NOS4A2 + season: 1 + episode: 1 + episode_title: The Shorter Way + other: Proper + proper_count: 1 + screen_size: 720p + streaming_service: Amazon Prime + source: Web + audio_codec: Dolby Digital Plus + audio_channels: '5.1' + video_codec: H.264 + release_group: NTG + container: mkv + type: episode + +? Star Trek DS9 Ep 2x03 The Siege (Part III) +: title: Star Trek DS9 + season: 2 + episode: 3 + episode_title: The Siege + part: 3 + type: episode + +? The.Red.Line.S01E01 +: title: The Red Line + season: 1 + episode: 1 + type: episode + +? Show.S01E01.WEB.x264-METCON.mkv +: title: Show + season: 1 + episode: 1 + source: Web + video_codec: H.264 + release_group: METCON + container: mkv + type: episode + +? Show.S01E01.WEB.x264-TCMEON.mkv +: title: Show + season: 1 + episode: 1 + source: Web + video_codec: H.264 + release_group: TCMEON + container: mkv + type: episode + +? Show.S01E01.WEB.x264-MEONTC.mkv +: title: Show + season: 1 + episode: 1 + source: Web + video_codec: H.264 + release_group: MEONTC + container: mkv + type: episode + +? '[TorrentCouch.com].Westworld.S02.Complete.720p.WEB-DL.x264.[MP4].[5.3GB].[Season.2.Full]/[TorrentCouch.com].Westworld.S02E03.720p.WEB-DL.x264.mp4' +: website: TorrentCouch.com + title: Westworld + season: 2 + other: Complete + screen_size: 720p + source: Web + video_codec: H.264 + container: mp4 + size: 5.3GB + episode: 3 + type: episode + +? Vita.&.Virginia.2018.720p.H.264.YTS.LT.mp4 +: title: Vita & Virginia + year: 2018 + screen_size: 720p + video_codec: H.264 + release_group: YTS.LT + container: mp4 + type: movie \ No newline at end of file diff --git a/lib/guessit/tlds-alpha-by-domain.txt b/lib/guessit/tlds-alpha-by-domain.txt new file mode 100644 index 00000000..280c794c --- /dev/null +++ b/lib/guessit/tlds-alpha-by-domain.txt @@ -0,0 +1,341 @@ +# Version 2013112900, Last Updated Fri Nov 29 07:07:01 2013 UTC +AC +AD +AE +AERO +AF +AG +AI +AL +AM +AN +AO +AQ +AR +ARPA +AS +ASIA +AT +AU +AW +AX +AZ +BA +BB +BD +BE +BF +BG +BH +BI +BIKE +BIZ +BJ +BM +BN +BO +BR +BS +BT +BV +BW +BY +BZ +CA +CAMERA +CAT +CC +CD +CF +CG +CH +CI +CK +CL +CLOTHING +CM +CN +CO +COM +CONSTRUCTION +CONTRACTORS +COOP +CR +CU +CV +CW +CX +CY +CZ +DE +DIAMONDS +DIRECTORY +DJ +DK +DM +DO +DZ +EC +EDU +EE +EG +ENTERPRISES +EQUIPMENT +ER +ES +ESTATE +ET +EU +FI +FJ +FK +FM +FO +FR +GA +GALLERY +GB +GD +GE +GF +GG +GH +GI +GL +GM +GN +GOV +GP +GQ +GR +GRAPHICS +GS +GT +GU +GURU +GW +GY +HK +HM +HN +HOLDINGS +HR +HT +HU +ID +IE +IL +IM +IN +INFO +INT +IO +IQ +IR +IS +IT +JE +JM +JO +JOBS +JP +KE +KG +KH +KI +KITCHEN +KM +KN +KP +KR +KW +KY +KZ +LA +LAND +LB +LC +LI +LIGHTING +LK +LR +LS +LT +LU +LV +LY +MA +MC +MD +ME +MG +MH +MIL +MK +ML +MM +MN +MO +MOBI +MP +MQ +MR +MS +MT +MU +MUSEUM +MV +MW +MX +MY +MZ +NA +NAME +NC +NE +NET +NF +NG +NI +NL +NO +NP +NR +NU +NZ +OM +ORG +PA +PE +PF +PG +PH +PHOTOGRAPHY +PK +PL +PLUMBING +PM +PN +POST +PR +PRO +PS +PT +PW +PY +QA +RE +RO +RS +RU +RW +SA +SB +SC +SD +SE +SEXY +SG +SH +SI +SINGLES +SJ +SK +SL +SM +SN +SO +SR +ST +SU +SV +SX +SY +SZ +TATTOO +TC +TD +TECHNOLOGY +TEL +TF +TG +TH +TIPS +TJ +TK +TL +TM +TN +TO +TODAY +TP +TR +TRAVEL +TT +TV +TW +TZ +UA +UG +UK +US +UY +UZ +VA +VC +VE +VENTURES +VG +VI +VN +VOYAGE +VU +WF +WS +XN--3E0B707E +XN--45BRJ9C +XN--80AO21A +XN--80ASEHDB +XN--80ASWG +XN--90A3AC +XN--CLCHC0EA0B2G2A9GCD +XN--FIQS8S +XN--FIQZ9S +XN--FPCRJ9C3D +XN--FZC2C9E2C +XN--GECRJ9C +XN--H2BRJ9C +XN--J1AMH +XN--J6W193G +XN--KPRW13D +XN--KPRY57D +XN--L1ACC +XN--LGBBAT1AD8J +XN--MGB9AWBF +XN--MGBA3A4F16A +XN--MGBAAM7A8H +XN--MGBAYH7GPA +XN--MGBBH1A71E +XN--MGBC0A9AZCG +XN--MGBERP4A5D4AR +XN--MGBX4CD0AB +XN--NGBC5AZD +XN--O3CW4H +XN--OGBPF8FL +XN--P1AI +XN--PGBS0DH +XN--Q9JYB4C +XN--S9BRJ9C +XN--UNUP4Y +XN--WGBH1C +XN--WGBL6A +XN--XKC2AL3HYE2A +XN--XKC2DL3A5EE0H +XN--YFRO4I67O +XN--YGBI2AMMX +XXX +YE +YT +ZA +ZM +ZW diff --git a/lib/guessit/yamlutils.py b/lib/guessit/yamlutils.py new file mode 100644 index 00000000..d04be641 --- /dev/null +++ b/lib/guessit/yamlutils.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Options +""" + +try: + from collections import OrderedDict +except ImportError: # pragma: no-cover + from ordereddict import OrderedDict # pylint:disable=import-error +import babelfish + +import yaml # pylint:disable=wrong-import-order + +from .rules.common.quantity import BitRate, FrameRate, Size + + +class OrderedDictYAMLLoader(yaml.SafeLoader): + """ + A YAML loader that loads mappings into ordered dictionaries. + From https://gist.github.com/enaeseth/844388 + """ + + def __init__(self, *args, **kwargs): + yaml.SafeLoader.__init__(self, *args, **kwargs) + + self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map) + self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map) + + def construct_yaml_map(self, node): + data = OrderedDict() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_mapping(self, node, deep=False): + if isinstance(node, yaml.MappingNode): + self.flatten_mapping(node) + else: # pragma: no cover + raise yaml.constructor.ConstructorError(None, None, + 'expected a mapping node, but found %s' % node.id, node.start_mark) + + mapping = OrderedDict() + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError as exc: # pragma: no cover + raise yaml.constructor.ConstructorError('while constructing a mapping', + node.start_mark, 'found unacceptable key (%s)' + % exc, key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + +class CustomDumper(yaml.SafeDumper): + """ + Custom YAML Dumper. + """ + pass # pylint:disable=unnecessary-pass + + +def default_representer(dumper, data): + """Default representer""" + return dumper.represent_str(str(data)) + + +CustomDumper.add_representer(babelfish.Language, default_representer) +CustomDumper.add_representer(babelfish.Country, default_representer) +CustomDumper.add_representer(BitRate, default_representer) +CustomDumper.add_representer(FrameRate, default_representer) +CustomDumper.add_representer(Size, default_representer) + + +def ordered_dict_representer(dumper, data): + """OrderedDict representer""" + return dumper.represent_mapping('tag:yaml.org,2002:map', data.items()) + + +CustomDumper.add_representer(OrderedDict, ordered_dict_representer) diff --git a/lib/libtorrent.pyd b/lib/libtorrent.pyd deleted file mode 100644 index 1068c833..00000000 Binary files a/lib/libtorrent.pyd and /dev/null differ diff --git a/lib/pyparsing.py b/lib/pyparsing.py new file mode 100644 index 00000000..581d5bbb --- /dev/null +++ b/lib/pyparsing.py @@ -0,0 +1,7107 @@ +# -*- coding: utf-8 -*- +# module pyparsing.py +# +# Copyright (c) 2003-2019 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +__doc__ = \ +""" +pyparsing module - Classes and methods to define and execute parsing grammars +============================================================================= + +The pyparsing module is an alternative approach to creating and +executing simple grammars, vs. the traditional lex/yacc approach, or the +use of regular expressions. With pyparsing, you don't need to learn +a new syntax for defining grammars or matching expressions - the parsing +module provides a library of classes that you use to construct the +grammar directly in Python. + +Here is a program to parse "Hello, World!" (or any greeting of the form +``"<salutation>, <addressee>!"``), built up using :class:`Word`, +:class:`Literal`, and :class:`And` elements +(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions, +and the strings are auto-converted to :class:`Literal` expressions):: + + from pyparsing import Word, alphas + + # define grammar of a greeting + greet = Word(alphas) + "," + Word(alphas) + "!" + + hello = "Hello, World!" + print (hello, "->", greet.parseString(hello)) + +The program outputs the following:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the +self-explanatory class names, and the use of '+', '|' and '^' operators. + +The :class:`ParseResults` object returned from +:class:`ParserElement.parseString` can be +accessed as a nested list, a dictionary, or an object with named +attributes. + +The pyparsing module handles some of the problems that are typically +vexing when writing text parsers: + + - extra or missing whitespace (the above program will also handle + "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments + + +Getting Started - +----------------- +Visit the classes :class:`ParserElement` and :class:`ParseResults` to +see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + + - construct literal match expressions from :class:`Literal` and + :class:`CaselessLiteral` classes + - construct character word-group expressions using the :class:`Word` + class + - see how to create repetitive expressions using :class:`ZeroOrMore` + and :class:`OneOrMore` classes + - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`, + and :class:`'&'<Each>` operators to combine simple expressions into + more complex ones + - associate names with your parsed results using + :class:`ParserElement.setResultsName` + - access the parsed data, which is returned as a :class:`ParseResults` + object + - find some helpful expression short-cuts like :class:`delimitedList` + and :class:`oneOf` + - find more useful common expressions in the :class:`pyparsing_common` + namespace class +""" + +__version__ = "2.4.7" +__versionTime__ = "30 Mar 2020 00:43 UTC" +__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" + +import string +from weakref import ref as wkref +import copy +import sys +import warnings +import re +import sre_constants +import collections +import pprint +import traceback +import types +from datetime import datetime +from operator import itemgetter +import itertools +from functools import wraps +from contextlib import contextmanager + +try: + # Python 3 + from itertools import filterfalse +except ImportError: + from itertools import ifilterfalse as filterfalse + +try: + from _thread import RLock +except ImportError: + from threading import RLock + +try: + # Python 3 + from collections.abc import Iterable + from collections.abc import MutableMapping, Mapping +except ImportError: + # Python 2.7 + from collections import Iterable + from collections import MutableMapping, Mapping + +try: + from collections import OrderedDict as _OrderedDict +except ImportError: + try: + from ordereddict import OrderedDict as _OrderedDict + except ImportError: + _OrderedDict = None + +try: + from types import SimpleNamespace +except ImportError: + class SimpleNamespace: pass + +# version compatibility configuration +__compat__ = SimpleNamespace() +__compat__.__doc__ = """ + A cross-version compatibility configuration for pyparsing features that will be + released in a future version. By setting values in this configuration to True, + those features can be enabled in prior versions for compatibility development + and testing. + + - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping + of results names when an And expression is nested within an Or or MatchFirst; set to + True to enable bugfix released in pyparsing 2.3.0, or False to preserve + pre-2.3.0 handling of named results +""" +__compat__.collect_all_And_tokens = True + +__diag__ = SimpleNamespace() +__diag__.__doc__ = """ +Diagnostic configuration (all default to False) + - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results + name is defined on a MatchFirst or Or expression with one or more And subexpressions + (only warns if __compat__.collect_all_And_tokens is False) + - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results + name is defined on a containing expression with ungrouped subexpressions that also + have results names + - warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined + with a results name, but has no contents defined + - warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is + incorrectly called with multiple str arguments + - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent + calls to ParserElement.setName() +""" +__diag__.warn_multiple_tokens_in_named_alternation = False +__diag__.warn_ungrouped_named_tokens_in_collection = False +__diag__.warn_name_set_on_empty_Forward = False +__diag__.warn_on_multiple_string_args_to_oneof = False +__diag__.enable_debug_on_named_expressions = False +__diag__._all_names = [nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")] + +def _enable_all_warnings(): + __diag__.warn_multiple_tokens_in_named_alternation = True + __diag__.warn_ungrouped_named_tokens_in_collection = True + __diag__.warn_name_set_on_empty_Forward = True + __diag__.warn_on_multiple_string_args_to_oneof = True +__diag__.enable_all_warnings = _enable_all_warnings + + +__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__', + 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', + 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', + 'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', + 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', + 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', + 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', + 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char', + 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', + 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', + 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', + 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', + 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', + 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', + 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', + 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', + 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', + 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass', + 'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set', + 'conditionAsParseAction', 're', + ] + +system_version = tuple(sys.version_info)[:3] +PY_3 = system_version[0] == 3 +if PY_3: + _MAX_INT = sys.maxsize + basestring = str + unichr = chr + unicode = str + _ustr = str + + # build list of single arg builtins, that can be used as parse actions + singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] + +else: + _MAX_INT = sys.maxint + range = xrange + + def _ustr(obj): + """Drop-in replacement for str(obj) that tries to be Unicode + friendly. It first tries str(obj). If that fails with + a UnicodeEncodeError, then it tries unicode(obj). It then + < returns the unicode object | encodes it with the default + encoding | ... >. + """ + if isinstance(obj, unicode): + return obj + + try: + # If this works, then _ustr(obj) has the same behaviour as str(obj), so + # it won't break any existing code. + return str(obj) + + except UnicodeEncodeError: + # Else encode it + ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') + xmlcharref = Regex(r'&#\d+;') + xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) + return xmlcharref.transformString(ret) + + # build list of single arg builtins, tolerant of Python version, that can be used as parse actions + singleArgBuiltins = [] + import __builtin__ + + for fname in "sum len sorted reversed list tuple set any all min max".split(): + try: + singleArgBuiltins.append(getattr(__builtin__, fname)) + except AttributeError: + continue + +_generatorType = type((y for y in range(1))) + +def _xml_escape(data): + """Escape &, <, >, ", ', etc. in a string of data.""" + + # ampersand must be replaced first + from_symbols = '&><"\'' + to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split()) + for from_, to_ in zip(from_symbols, to_symbols): + data = data.replace(from_, to_) + return data + +alphas = string.ascii_uppercase + string.ascii_lowercase +nums = "0123456789" +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums +_bslash = chr(92) +printables = "".join(c for c in string.printable if c not in string.whitespace) + + +def conditionAsParseAction(fn, message=None, fatal=False): + msg = message if message is not None else "failed user-defined condition" + exc_type = ParseFatalException if fatal else ParseException + fn = _trim_arity(fn) + + @wraps(fn) + def pa(s, l, t): + if not bool(fn(s, l, t)): + raise exc_type(s, l, msg) + + return pa + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__(self, pstr, loc=0, msg=None, elem=None): + self.loc = loc + if msg is None: + self.msg = pstr + self.pstr = "" + else: + self.msg = msg + self.pstr = pstr + self.parserElement = elem + self.args = (pstr, loc, msg) + + @classmethod + def _from_exception(cls, pe): + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) + + def __getattr__(self, aname): + """supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + """ + if aname == "lineno": + return lineno(self.loc, self.pstr) + elif aname in ("col", "column"): + return col(self.loc, self.pstr) + elif aname == "line": + return line(self.loc, self.pstr) + else: + raise AttributeError(aname) + + def __str__(self): + if self.pstr: + if self.loc >= len(self.pstr): + foundstr = ', found end of text' + else: + foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\', '\\') + else: + foundstr = '' + return ("%s%s (at char %d), (line:%d, col:%d)" % + (self.msg, foundstr, self.loc, self.lineno, self.column)) + def __repr__(self): + return _ustr(self) + def markInputline(self, markerString=">!<"): + """Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = "".join((line_str[:line_column], + markerString, line_str[line_column:])) + return line_str.strip() + def __dir__(self): + return "lineno col line".split() + dir(type(self)) + +class ParseException(ParseBaseException): + """ + Exception thrown when parse expressions don't match class; + supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + + Example:: + + try: + Word(nums).setName("integer").parseString("ABC") + except ParseException as pe: + print(pe) + print("column: {}".format(pe.col)) + + prints:: + + Expected integer (at char 0), (line:1, col:1) + column: 1 + + """ + + @staticmethod + def explain(exc, depth=16): + """ + Method to take an exception and translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - exc - exception raised during parsing (need not be a ParseException, in support + of Python exceptions that might be raised in a parse action) + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + + Note: the diagnostic output will include string representations of the expressions + that failed to parse. These representations will be more helpful if you use `setName` to + give identifiable names to your expressions. Otherwise they will use the default string + forms, which may be cryptic to read. + + explain() is only supported under Python 3. + """ + import inspect + + if depth is None: + depth = sys.getrecursionlimit() + ret = [] + if isinstance(exc, ParseBaseException): + ret.append(exc.line) + ret.append(' ' * (exc.col - 1) + '^') + ret.append("{0}: {1}".format(type(exc).__name__, exc)) + + if depth > 0: + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen = set() + for i, ff in enumerate(callers[-depth:]): + frm = ff[0] + + f_self = frm.f_locals.get('self', None) + if isinstance(f_self, ParserElement): + if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'): + continue + if f_self in seen: + continue + seen.add(f_self) + + self_type = type(f_self) + ret.append("{0}.{1} - {2}".format(self_type.__module__, + self_type.__name__, + f_self)) + elif f_self is not None: + self_type = type(f_self) + ret.append("{0}.{1}".format(self_type.__module__, + self_type.__name__)) + else: + code = frm.f_code + if code.co_name in ('wrapper', '<module>'): + continue + + ret.append("{0}".format(code.co_name)) + + depth -= 1 + if not depth: + break + + return '\n'.join(ret) + + +class ParseFatalException(ParseBaseException): + """user-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately""" + pass + +class ParseSyntaxException(ParseFatalException): + """just like :class:`ParseFatalException`, but thrown internally + when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates + that parsing is to stop immediately because an unbacktrackable + syntax error has been found. + """ + pass + +#~ class ReparseException(ParseBaseException): + #~ """Experimental class - parse actions can raise this exception to cause + #~ pyparsing to reparse the input string: + #~ - with a modified input string, and/or + #~ - with a modified start location + #~ Set the values of the ReparseException in the constructor, and raise the + #~ exception in a parse action to cause pyparsing to use the new string/location. + #~ Setting the values as None causes no change to be made. + #~ """ + #~ def __init_( self, newstring, restartLoc ): + #~ self.newParseText = newstring + #~ self.reparseLoc = restartLoc + +class RecursiveGrammarException(Exception): + """exception thrown by :class:`ParserElement.validate` if the + grammar could be improperly recursive + """ + def __init__(self, parseElementList): + self.parseElementTrace = parseElementList + + def __str__(self): + return "RecursiveGrammarException: %s" % self.parseElementTrace + +class _ParseResultsWithOffset(object): + def __init__(self, p1, p2): + self.tup = (p1, p2) + def __getitem__(self, i): + return self.tup[i] + def __repr__(self): + return repr(self.tup[0]) + def setOffset(self, i): + self.tup = (self.tup[0], i) + +class ParseResults(object): + """Structured parse results, to provide multiple means of access to + the parsed data: + + - as a list (``len(results)``) + - by list index (``results[0], results[1]``, etc.) + - by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`) + + Example:: + + integer = Word(nums) + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + # equivalent form: + # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + # parseString returns a ParseResults object + result = date_str.parseString("1999/12/31") + + def test(s, fn=repr): + print("%s -> %s" % (s, fn(eval(s)))) + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + + prints:: + + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: 31 + - month: 12 + - year: 1999 + """ + def __new__(cls, toklist=None, name=None, asList=True, modal=True): + if isinstance(toklist, cls): + return toklist + retobj = object.__new__(cls) + retobj.__doinit = True + return retobj + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance): + if self.__doinit: + self.__doinit = False + self.__name = None + self.__parent = None + self.__accumNames = {} + self.__asList = asList + self.__modal = modal + if toklist is None: + toklist = [] + if isinstance(toklist, list): + self.__toklist = toklist[:] + elif isinstance(toklist, _generatorType): + self.__toklist = list(toklist) + else: + self.__toklist = [toklist] + self.__tokdict = dict() + + if name is not None and name: + if not modal: + self.__accumNames[name] = 0 + if isinstance(name, int): + name = _ustr(name) # will always return a str, but use _ustr for consistency + self.__name = name + if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])): + if isinstance(toklist, basestring): + toklist = [toklist] + if asList: + if isinstance(toklist, ParseResults): + self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0) + else: + self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0) + self[name].__name = name + else: + try: + self[name] = toklist[0] + except (KeyError, TypeError, IndexError): + self[name] = toklist + + def __getitem__(self, i): + if isinstance(i, (int, slice)): + return self.__toklist[i] + else: + if i not in self.__accumNames: + return self.__tokdict[i][-1][0] + else: + return ParseResults([v[0] for v in self.__tokdict[i]]) + + def __setitem__(self, k, v, isinstance=isinstance): + if isinstance(v, _ParseResultsWithOffset): + self.__tokdict[k] = self.__tokdict.get(k, list()) + [v] + sub = v[0] + elif isinstance(k, (int, slice)): + self.__toklist[k] = v + sub = v + else: + self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)] + sub = v + if isinstance(sub, ParseResults): + sub.__parent = wkref(self) + + def __delitem__(self, i): + if isinstance(i, (int, slice)): + mylen = len(self.__toklist) + del self.__toklist[i] + + # convert int to slice + if isinstance(i, int): + if i < 0: + i += mylen + i = slice(i, i + 1) + # get removed indices + removed = list(range(*i.indices(mylen))) + removed.reverse() + # fixup indices in token dictionary + for name, occurrences in self.__tokdict.items(): + for j in removed: + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) + else: + del self.__tokdict[i] + + def __contains__(self, k): + return k in self.__tokdict + + def __len__(self): + return len(self.__toklist) + + def __bool__(self): + return (not not self.__toklist) + __nonzero__ = __bool__ + + def __iter__(self): + return iter(self.__toklist) + + def __reversed__(self): + return iter(self.__toklist[::-1]) + + def _iterkeys(self): + if hasattr(self.__tokdict, "iterkeys"): + return self.__tokdict.iterkeys() + else: + return iter(self.__tokdict) + + def _itervalues(self): + return (self[k] for k in self._iterkeys()) + + def _iteritems(self): + return ((k, self[k]) for k in self._iterkeys()) + + if PY_3: + keys = _iterkeys + """Returns an iterator of all named result keys.""" + + values = _itervalues + """Returns an iterator of all named result values.""" + + items = _iteritems + """Returns an iterator of all named result key-value tuples.""" + + else: + iterkeys = _iterkeys + """Returns an iterator of all named result keys (Python 2.x only).""" + + itervalues = _itervalues + """Returns an iterator of all named result values (Python 2.x only).""" + + iteritems = _iteritems + """Returns an iterator of all named result key-value tuples (Python 2.x only).""" + + def keys(self): + """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" + return list(self.iterkeys()) + + def values(self): + """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" + return list(self.itervalues()) + + def items(self): + """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" + return list(self.iteritems()) + + def haskeys(self): + """Since keys() returns an iterator, this method is helpful in bypassing + code that looks for the existence of any defined results names.""" + return bool(self.__tokdict) + + def pop(self, *args, **kwargs): + """ + Removes and returns item at specified index (default= ``last``). + Supports both ``list`` and ``dict`` semantics for ``pop()``. If + passed no argument or an integer argument, it will use ``list`` + semantics and pop tokens from the list of parsed tokens. If passed + a non-integer argument (most likely a string), it will use ``dict`` + semantics and pop the corresponding value from any defined results + names. A second default return value argument is supported, just as in + ``dict.pop()``. + + Example:: + + def remove_first(tokens): + tokens.pop(0) + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] + + label = Word(alphas) + patt = label("LABEL") + OneOrMore(Word(nums)) + print(patt.parseString("AAB 123 321").dump()) + + # Use pop() in a parse action to remove named result (note that corresponding value is not + # removed from list form of results) + def remove_LABEL(tokens): + tokens.pop("LABEL") + return tokens + patt.addParseAction(remove_LABEL) + print(patt.parseString("AAB 123 321").dump()) + + prints:: + + ['AAB', '123', '321'] + - LABEL: AAB + + ['AAB', '123', '321'] + """ + if not args: + args = [-1] + for k, v in kwargs.items(): + if k == 'default': + args = (args[0], v) + else: + raise TypeError("pop() got an unexpected keyword argument '%s'" % k) + if (isinstance(args[0], int) + or len(args) == 1 + or args[0] in self): + index = args[0] + ret = self[index] + del self[index] + return ret + else: + defaultvalue = args[1] + return defaultvalue + + def get(self, key, defaultValue=None): + """ + Returns named result matching the given key, or if there is no + such name, then returns the given ``defaultValue`` or ``None`` if no + ``defaultValue`` is specified. + + Similar to ``dict.get()``. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString("1999/12/31") + print(result.get("year")) # -> '1999' + print(result.get("hour", "not specified")) # -> 'not specified' + print(result.get("hour")) # -> None + """ + if key in self: + return self[key] + else: + return defaultValue + + def insert(self, index, insStr): + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to ``list.insert()``. + + Example:: + + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to insert the parse location in the front of the parsed results + def insert_locn(locn, tokens): + tokens.insert(0, locn) + print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] + """ + self.__toklist.insert(index, insStr) + # fixup indices in token dictionary + for name, occurrences in self.__tokdict.items(): + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) + + def append(self, item): + """ + Add single element to end of ParseResults list of elements. + + Example:: + + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to compute the sum of the parsed integers, and add it to the end + def append_sum(tokens): + tokens.append(sum(map(int, tokens))) + print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] + """ + self.__toklist.append(item) + + def extend(self, itemseq): + """ + Add sequence of elements to end of ParseResults list of elements. + + Example:: + + patt = OneOrMore(Word(alphas)) + + # use a parse action to append the reverse of the matched strings, to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' + """ + if isinstance(itemseq, ParseResults): + self.__iadd__(itemseq) + else: + self.__toklist.extend(itemseq) + + def clear(self): + """ + Clear all elements and results names. + """ + del self.__toklist[:] + self.__tokdict.clear() + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + return "" + + def __add__(self, other): + ret = self.copy() + ret += other + return ret + + def __iadd__(self, other): + if other.__tokdict: + offset = len(self.__toklist) + addoffset = lambda a: offset if a < 0 else a + offset + otheritems = other.__tokdict.items() + otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) + for k, vlist in otheritems for v in vlist] + for k, v in otherdictitems: + self[k] = v + if isinstance(v[0], ParseResults): + v[0].__parent = wkref(self) + + self.__toklist += other.__toklist + self.__accumNames.update(other.__accumNames) + return self + + def __radd__(self, other): + if isinstance(other, int) and other == 0: + # useful for merging many ParseResults using sum() builtin + return self.copy() + else: + # this may raise a TypeError - so be it + return other + self + + def __repr__(self): + return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict)) + + def __str__(self): + return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' + + def _asStringList(self, sep=''): + out = [] + for item in self.__toklist: + if out and sep: + out.append(sep) + if isinstance(item, ParseResults): + out += item._asStringList() + else: + out.append(_ustr(item)) + return out + + def asList(self): + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + + Example:: + + patt = OneOrMore(Word(alphas)) + result = patt.parseString("sldkj lsdkj sldkj") + # even though the result prints in string-like form, it is actually a pyparsing ParseResults + print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] + + # Use asList() to create an actual list + result_list = result.asList() + print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] + """ + return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist] + + def asDict(self): + """ + Returns the named parse results as a nested dictionary. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) + + result_dict = result.asDict() + print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} + + # even though a ParseResults supports dict-like access, sometime you just need to have a dict + import json + print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable + print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} + """ + if PY_3: + item_fn = self.items + else: + item_fn = self.iteritems + + def toItem(obj): + if isinstance(obj, ParseResults): + if obj.haskeys(): + return obj.asDict() + else: + return [toItem(v) for v in obj] + else: + return obj + + return dict((k, toItem(v)) for k, v in item_fn()) + + def copy(self): + """ + Returns a new copy of a :class:`ParseResults` object. + """ + ret = ParseResults(self.__toklist) + ret.__tokdict = dict(self.__tokdict.items()) + ret.__parent = self.__parent + ret.__accumNames.update(self.__accumNames) + ret.__name = self.__name + return ret + + def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True): + """ + (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. + """ + nl = "\n" + out = [] + namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items() + for v in vlist) + nextLevelIndent = indent + " " + + # collapse out indents if formatting is not desired + if not formatted: + indent = "" + nextLevelIndent = "" + nl = "" + + selfTag = None + if doctag is not None: + selfTag = doctag + else: + if self.__name: + selfTag = self.__name + + if not selfTag: + if namedItemsOnly: + return "" + else: + selfTag = "ITEM" + + out += [nl, indent, "<", selfTag, ">"] + + for i, res in enumerate(self.__toklist): + if isinstance(res, ParseResults): + if i in namedItems: + out += [res.asXML(namedItems[i], + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] + else: + out += [res.asXML(None, + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] + else: + # individual token, see if there is a name for it + resTag = None + if i in namedItems: + resTag = namedItems[i] + if not resTag: + if namedItemsOnly: + continue + else: + resTag = "ITEM" + xmlBodyText = _xml_escape(_ustr(res)) + out += [nl, nextLevelIndent, "<", resTag, ">", + xmlBodyText, + "</", resTag, ">"] + + out += [nl, indent, "</", selfTag, ">"] + return "".join(out) + + def __lookup(self, sub): + for k, vlist in self.__tokdict.items(): + for v, loc in vlist: + if sub is v: + return k + return None + + def getName(self): + r""" + Returns the results name for this token expression. Useful when several + different expressions might match at a particular location. + + Example:: + + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = OneOrMore(user_data) + + result = user_info.parseString("22 111-22-3333 #221B") + for item in result: + print(item.getName(), ':', item[0]) + + prints:: + + age : 22 + ssn : 111-22-3333 + house_number : 221B + """ + if self.__name: + return self.__name + elif self.__parent: + par = self.__parent() + if par: + return par.__lookup(self) + else: + return None + elif (len(self) == 1 + and len(self.__tokdict) == 1 + and next(iter(self.__tokdict.values()))[0][1] in (0, -1)): + return next(iter(self.__tokdict.keys())) + else: + return None + + def dump(self, indent='', full=True, include_list=True, _depth=0): + """ + Diagnostic method for listing out the contents of + a :class:`ParseResults`. Accepts an optional ``indent`` argument so + that this string can be embedded in a nested display of other data. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(result.dump()) + + prints:: + + ['12', '/', '31', '/', '1999'] + - day: 1999 + - month: 31 + - year: 12 + """ + out = [] + NL = '\n' + if include_list: + out.append(indent + _ustr(self.asList())) + else: + out.append('') + + if full: + if self.haskeys(): + items = sorted((str(k), v) for k, v in self.items()) + for k, v in items: + if out: + out.append(NL) + out.append("%s%s- %s: " % (indent, (' ' * _depth), k)) + if isinstance(v, ParseResults): + if v: + out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1)) + else: + out.append(_ustr(v)) + else: + out.append(repr(v)) + elif any(isinstance(vv, ParseResults) for vv in self): + v = self + for i, vv in enumerate(v): + if isinstance(vv, ParseResults): + out.append("\n%s%s[%d]:\n%s%s%s" % (indent, + (' ' * (_depth)), + i, + indent, + (' ' * (_depth + 1)), + vv.dump(indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1))) + else: + out.append("\n%s%s[%d]:\n%s%s%s" % (indent, + (' ' * (_depth)), + i, + indent, + (' ' * (_depth + 1)), + _ustr(vv))) + + return "".join(out) + + def pprint(self, *args, **kwargs): + """ + Pretty-printer for parsed results as a list, using the + `pprint <https://docs.python.org/3/library/pprint.html>`_ module. + Accepts additional positional or keyword args as defined for + `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ . + + Example:: + + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(delimitedList(term))) + result = func.parseString("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + + prints:: + + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ + pprint.pprint(self.asList(), *args, **kwargs) + + # add support for pickle protocol + def __getstate__(self): + return (self.__toklist, + (self.__tokdict.copy(), + self.__parent is not None and self.__parent() or None, + self.__accumNames, + self.__name)) + + def __setstate__(self, state): + self.__toklist = state[0] + self.__tokdict, par, inAccumNames, self.__name = state[1] + self.__accumNames = {} + self.__accumNames.update(inAccumNames) + if par is not None: + self.__parent = wkref(par) + else: + self.__parent = None + + def __getnewargs__(self): + return self.__toklist, self.__name, self.__asList, self.__modal + + def __dir__(self): + return dir(type(self)) + list(self.keys()) + + @classmethod + def from_dict(cls, other, name=None): + """ + Helper classmethod to construct a ParseResults from a dict, preserving the + name-value relations as results names. If an optional 'name' argument is + given, a nested ParseResults will be returned + """ + def is_iterable(obj): + try: + iter(obj) + except Exception: + return False + else: + if PY_3: + return not isinstance(obj, (str, bytes)) + else: + return not isinstance(obj, basestring) + + ret = cls([]) + for k, v in other.items(): + if isinstance(v, Mapping): + ret += cls.from_dict(v, name=k) + else: + ret += cls([v], name=k, asList=is_iterable(v)) + if name is not None: + ret = cls([ret], name=name) + return ret + +MutableMapping.register(ParseResults) + +def col (loc, strg): + """Returns current column within a string, counting newlines as line separators. + The first column is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See + :class:`ParserElement.parseString` for more + information on parsing strings containing ``<TAB>`` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + """ + s = strg + return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) + +def lineno(loc, strg): + """Returns current line number within a string, counting newlines as line separators. + The first line is number 1. + + Note - the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`ParserElement.parseString` + for more information on parsing strings containing ``<TAB>`` s, and + suggested methods to maintain a consistent view of the parsed string, the + parse location, and line and column positions within the parsed string. + """ + return strg.count("\n", 0, loc) + 1 + +def line(loc, strg): + """Returns the line of text containing loc within a string, counting newlines as line separators. + """ + lastCR = strg.rfind("\n", 0, loc) + nextCR = strg.find("\n", loc) + if nextCR >= 0: + return strg[lastCR + 1:nextCR] + else: + return strg[lastCR + 1:] + +def _defaultStartDebugAction(instring, loc, expr): + print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring)))) + +def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks): + print("Matched " + _ustr(expr) + " -> " + str(toks.asList())) + +def _defaultExceptionDebugAction(instring, loc, expr, exc): + print("Exception raised:" + _ustr(exc)) + +def nullDebugAction(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + pass + +# Only works on Python 3.x - nonlocal is toxic to Python 2 installs +#~ 'decorator to trim function calls to match the arity of the target' +#~ def _trim_arity(func, maxargs=3): + #~ if func in singleArgBuiltins: + #~ return lambda s,l,t: func(t) + #~ limit = 0 + #~ foundArity = False + #~ def wrapper(*args): + #~ nonlocal limit,foundArity + #~ while 1: + #~ try: + #~ ret = func(*args[limit:]) + #~ foundArity = True + #~ return ret + #~ except TypeError: + #~ if limit == maxargs or foundArity: + #~ raise + #~ limit += 1 + #~ continue + #~ return wrapper + +# this version is Python 2.x-3.x cross-compatible +'decorator to trim function calls to match the arity of the target' +def _trim_arity(func, maxargs=2): + if func in singleArgBuiltins: + return lambda s, l, t: func(t) + limit = [0] + foundArity = [False] + + # traceback return data structure changed in Py3.5 - normalize back to plain tuples + if system_version[:2] >= (3, 5): + def extract_stack(limit=0): + # special handling for Python 3.5.0 - extra deep call stack by 1 + offset = -3 if system_version == (3, 5, 0) else -2 + frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset] + return [frame_summary[:2]] + def extract_tb(tb, limit=0): + frames = traceback.extract_tb(tb, limit=limit) + frame_summary = frames[-1] + return [frame_summary[:2]] + else: + extract_stack = traceback.extract_stack + extract_tb = traceback.extract_tb + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + LINE_DIFF = 6 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + this_line = extract_stack(limit=2)[-1] + pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF) + + def wrapper(*args): + while 1: + try: + ret = func(*args[limit[0]:]) + foundArity[0] = True + return ret + except TypeError: + # re-raise TypeErrors if they did not come from our arity testing + if foundArity[0]: + raise + else: + try: + tb = sys.exc_info()[-1] + if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: + raise + finally: + try: + del tb + except NameError: + pass + + if limit[0] <= maxargs: + limit[0] += 1 + continue + raise + + # copy func name to wrapper for sensible debug output + func_name = "<parse action>" + try: + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + wrapper.__name__ = func_name + + return wrapper + + +class ParserElement(object): + """Abstract base level parser element class.""" + DEFAULT_WHITE_CHARS = " \n\t\r" + verbose_stacktrace = False + + @staticmethod + def setDefaultWhitespaceChars(chars): + r""" + Overrides the default whitespace chars + + Example:: + + # default whitespace chars are space, <TAB> and newline + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.setDefaultWhitespaceChars(" \t") + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + + @staticmethod + def inlineLiteralsUsing(cls): + """ + Set class to be used for inclusion of string literals into a parser. + + Example:: + + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inlineLiteralsUsing(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] + """ + ParserElement._literalStringClass = cls + + @classmethod + def _trim_traceback(cls, tb): + while tb.tb_next: + tb = tb.tb_next + return tb + + def __init__(self, savelist=False): + self.parseAction = list() + self.failAction = None + # ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall + self.strRepr = None + self.resultsName = None + self.saveAsList = savelist + self.skipWhitespace = True + self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + self.copyDefaultWhiteChars = True + self.mayReturnEmpty = False # used when checking for left-recursion + self.keepTabs = False + self.ignoreExprs = list() + self.debug = False + self.streamlined = False + self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index + self.errmsg = "" + self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) + self.debugActions = (None, None, None) # custom debug actions + self.re = None + self.callPreparse = True # used to avoid redundant calls to preParse + self.callDuringTry = False + + def copy(self): + """ + Make a copy of this :class:`ParserElement`. Useful for defining + different parse actions for the same parsing pattern, using copies of + the original parse element. + + Example:: + + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K") + integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + + print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) + + prints:: + + [5120, 100, 655360, 268435456] + + Equivalent form of ``expr.copy()`` is just ``expr()``:: + + integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + """ + cpy = copy.copy(self) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS + return cpy + + def setName(self, name): + """ + Define name for this expression, makes debugging and exception messages clearer. + + Example:: + + Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) + Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ + self.name = name + self.errmsg = "Expected " + self.name + if __diag__.enable_debug_on_named_expressions: + self.setDebug() + return self + + def setResultsName(self, name, listAllMatches=False): + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + NOTE: this returns a *copy* of the original :class:`ParserElement` object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + ``expr("name")`` in place of ``expr.setResultsName("name")`` + - see :class:`__call__`. + + Example:: + + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + """ + return self._setResultsName(name, listAllMatches) + + def _setResultsName(self, name, listAllMatches=False): + newself = self.copy() + if name.endswith("*"): + name = name[:-1] + listAllMatches = True + newself.resultsName = name + newself.modalResults = not listAllMatches + return newself + + def setBreak(self, breakFlag=True): + """Method to invoke the Python pdb debugger when this element is + about to be parsed. Set ``breakFlag`` to True to enable, False to + disable. + """ + if breakFlag: + _parseMethod = self._parse + def breaker(instring, loc, doActions=True, callPreParse=True): + import pdb + # this call to pdb.set_trace() is intentional, not a checkin error + pdb.set_trace() + return _parseMethod(instring, loc, doActions, callPreParse) + breaker._originalParseMethod = _parseMethod + self._parse = breaker + else: + if hasattr(self._parse, "_originalParseMethod"): + self._parse = self._parse._originalParseMethod + return self + + def setParseAction(self, *fns, **kwargs): + """ + Define one or more actions to perform when successfully matching parse element definition. + Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` , + ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object + + If the functions in fns modify the tokens, they can return them as the return + value from fn, and the modified list of tokens will replace the original. + Otherwise, fn does not need to return any value. + + If None is passed as the parse action, all previously added parse actions for this + expression are cleared. + + Optional keyword arguments: + - callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`parseString for more + information on parsing strings containing ``<TAB>`` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + + Example:: + + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + # use parse action to convert to ints at parse time + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + date_str = integer + '/' + integer + '/' + integer + + # note that integer fields are now ints, not strings + date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] + """ + if list(fns) == [None,]: + self.parseAction = [] + else: + if not all(callable(fn) for fn in fns): + raise TypeError("parse actions must be callable") + self.parseAction = list(map(_trim_arity, list(fns))) + self.callDuringTry = kwargs.get("callDuringTry", False) + return self + + def addParseAction(self, *fns, **kwargs): + """ + Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`. + + See examples in :class:`copy`. + """ + self.parseAction += list(map(_trim_arity, list(fns))) + self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) + return self + + def addCondition(self, *fns, **kwargs): + """Add a boolean predicate function to expression's list of parse actions. See + :class:`setParseAction` for function call signatures. Unlike ``setParseAction``, + functions passed to ``addCondition`` need to return boolean success/fail of the condition. + + Optional keyword arguments: + - message = define a custom message to be used in the raised exception + - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException + + Example:: + + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) + """ + for fn in fns: + self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'), + fatal=kwargs.get('fatal', False))) + + self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) + return self + + def setFailAction(self, fn): + """Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + ``fn(s, loc, expr, err)`` where: + - s = string being parsed + - loc = location where expression match was attempted and failed + - expr = the parse expression that failed + - err = the exception thrown + The function returns no value. It may throw :class:`ParseFatalException` + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def _skipIgnorables(self, instring, loc): + exprsFound = True + while exprsFound: + exprsFound = False + for e in self.ignoreExprs: + try: + while 1: + loc, dummy = e._parse(instring, loc) + exprsFound = True + except ParseException: + pass + return loc + + def preParse(self, instring, loc): + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + + if self.skipWhitespace: + wt = self.whiteChars + instrlen = len(instring) + while loc < instrlen and instring[loc] in wt: + loc += 1 + + return loc + + def parseImpl(self, instring, loc, doActions=True): + return loc, [] + + def postParse(self, instring, loc, tokenlist): + return tokenlist + + # ~ @profile + def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True): + TRY, MATCH, FAIL = 0, 1, 2 + debugging = (self.debug) # and doActions) + + if debugging or self.failAction: + # ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring))) + if self.debugActions[TRY]: + self.debugActions[TRY](instring, loc, self) + try: + if callPreParse and self.callPreparse: + preloc = self.preParse(instring, loc) + else: + preloc = loc + tokensStart = preloc + if self.mayIndexError or preloc >= len(instring): + try: + loc, tokens = self.parseImpl(instring, preloc, doActions) + except IndexError: + raise ParseException(instring, len(instring), self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, preloc, doActions) + except Exception as err: + # ~ print ("Exception raised:", err) + if self.debugActions[FAIL]: + self.debugActions[FAIL](instring, tokensStart, self, err) + if self.failAction: + self.failAction(instring, tokensStart, self, err) + raise + else: + if callPreParse and self.callPreparse: + preloc = self.preParse(instring, loc) + else: + preloc = loc + tokensStart = preloc + if self.mayIndexError or preloc >= len(instring): + try: + loc, tokens = self.parseImpl(instring, preloc, doActions) + except IndexError: + raise ParseException(instring, len(instring), self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, preloc, doActions) + + tokens = self.postParse(instring, loc, tokens) + + retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults) + if self.parseAction and (doActions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + try: + tokens = fn(instring, tokensStart, retTokens) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + exc.__cause__ = parse_action_exc + raise exc + + if tokens is not None and tokens is not retTokens: + retTokens = ParseResults(tokens, + self.resultsName, + asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults) + except Exception as err: + # ~ print "Exception raised in user parse action:", err + if self.debugActions[FAIL]: + self.debugActions[FAIL](instring, tokensStart, self, err) + raise + else: + for fn in self.parseAction: + try: + tokens = fn(instring, tokensStart, retTokens) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + exc.__cause__ = parse_action_exc + raise exc + + if tokens is not None and tokens is not retTokens: + retTokens = ParseResults(tokens, + self.resultsName, + asList=self.saveAsList and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults) + if debugging: + # ~ print ("Matched", self, "->", retTokens.asList()) + if self.debugActions[MATCH]: + self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens) + + return loc, retTokens + + def tryParse(self, instring, loc): + try: + return self._parse(instring, loc, doActions=False)[0] + except ParseFatalException: + raise ParseException(instring, loc, self.errmsg, self) + + def canParseNext(self, instring, loc): + try: + self.tryParse(instring, loc) + except (ParseException, IndexError): + return False + else: + return True + + class _UnboundedCache(object): + def __init__(self): + cache = {} + self.not_in_cache = not_in_cache = object() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + + def clear(self): + cache.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + if _OrderedDict is not None: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = _OrderedDict() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + while len(cache) > size: + try: + cache.popitem(False) + except KeyError: + pass + + def clear(self): + cache.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + else: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = {} + key_fifo = collections.deque([], size) + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + while len(key_fifo) > size: + cache.pop(key_fifo.popleft(), None) + key_fifo.append(key) + + def clear(self): + cache.clear() + key_fifo.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache(self, instring, loc, doActions=True, callPreParse=True): + HIT, MISS = 0, 1 + lookup = (self, instring, loc, callPreParse, doActions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, doActions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy())) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if isinstance(value, Exception): + raise value + return value[0], value[1].copy() + + _parse = _parseNoCache + + @staticmethod + def resetCache(): + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) + + _packratEnabled = False + @staticmethod + def enablePackrat(cache_size_limit=128): + """Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + Parameters: + + - cache_size_limit - (default= ``128``) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method :class:`ParserElement.enablePackrat`. + For best results, call ``enablePackrat()`` immediately after + importing pyparsing. + + Example:: + + import pyparsing + pyparsing.ParserElement.enablePackrat() + """ + if not ParserElement._packratEnabled: + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = ParserElement._UnboundedCache() + else: + ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) + ParserElement._parse = ParserElement._parseCache + + def parseString(self, instring, parseAll=False): + """ + Execute the parse expression with the given string. + This is the main interface to the client code, once the complete + expression has been built. + + Returns the parsed data as a :class:`ParseResults` object, which may be + accessed as a list, or as a dict or object with attributes if the given parser + includes results names. + + If you want the grammar to require that the entire input string be + successfully parsed, then set ``parseAll`` to True (equivalent to ending + the grammar with ``StringEnd()``). + + Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string, + in order to report proper column numbers in parse actions. + If the input string contains tabs and + the grammar uses parse actions that use the ``loc`` argument to index into the + string being parsed, you can ensure you have a consistent view of the input + string by: + + - calling ``parseWithTabs`` on your grammar before calling ``parseString`` + (see :class:`parseWithTabs`) + - define your parse action using the full ``(s, loc, toks)`` signature, and + reference the input string using the parse action's ``s`` argument + - explictly expand the tabs in your input string before calling + ``parseString`` + + Example:: + + Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] + Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text + """ + ParserElement.resetCache() + if not self.streamlined: + self.streamline() + # ~ self.saveAsList = True + for e in self.ignoreExprs: + e.streamline() + if not self.keepTabs: + instring = instring.expandtabs() + try: + loc, tokens = self._parse(instring, 0) + if parseAll: + loc = self.preParse(instring, loc) + se = Empty() + StringEnd() + se._parse(instring, loc) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + if getattr(exc, '__traceback__', None) is not None: + exc.__traceback__ = self._trim_traceback(exc.__traceback__) + raise exc + else: + return tokens + + def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + ``maxMatches`` argument, to clip scanning after 'n' matches are found. If + ``overlap`` is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See :class:`parseString` for more information on parsing + strings with embedded tabs. + + Example:: + + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens, start, end in Word(alphas).scanString(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = _ustr(instring).expandtabs() + instrlen = len(instring) + loc = 0 + preparseFn = self.preParse + parseFn = self._parse + ParserElement.resetCache() + matches = 0 + try: + while loc <= instrlen and matches < maxMatches: + try: + preloc = preparseFn(instring, loc) + nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) + except ParseException: + loc = preloc + 1 + else: + if nextLoc > loc: + matches += 1 + yield tokens, preloc, nextLoc + if overlap: + nextloc = preparseFn(instring, loc) + if nextloc > loc: + loc = nextLoc + else: + loc += 1 + else: + loc = nextLoc + else: + loc = preloc + 1 + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + if getattr(exc, '__traceback__', None) is not None: + exc.__traceback__ = self._trim_traceback(exc.__traceback__) + raise exc + + def transformString(self, instring): + """ + Extension to :class:`scanString`, to modify matching text with modified tokens that may + be returned from a parse action. To use ``transformString``, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking ``transformString()`` on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. ``transformString()`` returns the resulting transformed string. + + Example:: + + wd = Word(alphas) + wd.setParseAction(lambda toks: toks[0].title()) + + print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) + + prints:: + + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ + out = [] + lastE = 0 + # force preservation of <TAB>s, to minimize unwanted transformation of string, and to + # keep string locs straight between transformString and scanString + self.keepTabs = True + try: + for t, s, e in self.scanString(instring): + out.append(instring[lastE:s]) + if t: + if isinstance(t, ParseResults): + out += t.asList() + elif isinstance(t, list): + out += t + else: + out.append(t) + lastE = e + out.append(instring[lastE:]) + out = [o for o in out if o] + return "".join(map(_ustr, _flatten(out))) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + if getattr(exc, '__traceback__', None) is not None: + exc.__traceback__ = self._trim_traceback(exc.__traceback__) + raise exc + + def searchString(self, instring, maxMatches=_MAX_INT): + """ + Another extension to :class:`scanString`, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + ``maxMatches`` argument, to clip searching after 'n' matches are found. + + Example:: + + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) + + # the sum() builtin can be used to merge results into a single ParseResults object + print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) + + prints:: + + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] + """ + try: + return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)]) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + if getattr(exc, '__traceback__', None) is not None: + exc.__traceback__ = self._trim_traceback(exc.__traceback__) + raise exc + + def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): + """ + Generator method to split a string using the given expression as a separator. + May be called with optional ``maxsplit`` argument, to limit the number of splits; + and the optional ``includeSeparators`` argument (default= ``False``), if the separating + matching text should be included in the split results. + + Example:: + + punc = oneOf(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + + prints:: + + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + splits = 0 + last = 0 + for t, s, e in self.scanString(instring, maxMatches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + + def __add__(self, other): + """ + Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement + converts them to :class:`Literal`s by default. + + Example:: + + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print (hello, "->", greet.parseString(hello)) + + prints:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + + ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. + + Literal('start') + ... + Literal('end') + + is equivalent to: + + Literal('start') + SkipTo('end')("_skipped*") + Literal('end') + + Note that the skipped text is returned with '_skipped' as a results name, + and to support having multiple skips in the same parser, the value returned is + a list of all skipped text. + """ + if other is Ellipsis: + return _PendingSkip(self) + + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return And([self, other]) + + def __radd__(self, other): + """ + Implementation of + operator when left operand is not a :class:`ParserElement` + """ + if other is Ellipsis: + return SkipTo(self)("_skipped*") + self + + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other + self + + def __sub__(self, other): + """ + Implementation of - operator, returns :class:`And` with error stop + """ + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return self + And._ErrorStop() + other + + def __rsub__(self, other): + """ + Implementation of - operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other - self + + def __mul__(self, other): + """ + Implementation of * operator, allows use of ``expr * 3`` in place of + ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer + tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples + may also include ``None`` as in: + - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` + + Note that ``expr*(None, n)`` does not raise an exception if + more than n exprs exist in the input stream; that is, + ``expr*(None, n)`` does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + ``expr*(None, n) + ~expr`` + """ + if other is Ellipsis: + other = (0, None) + elif isinstance(other, tuple) and other[:1] == (Ellipsis,): + other = ((0, ) + other[1:] + (None,))[:2] + + if isinstance(other, int): + minElements, optElements = other, 0 + elif isinstance(other, tuple): + other = tuple(o if o is not Ellipsis else None for o in other) + other = (other + (None, None))[:2] + if other[0] is None: + other = (0, other[1]) + if isinstance(other[0], int) and other[1] is None: + if other[0] == 0: + return ZeroOrMore(self) + if other[0] == 1: + return OneOrMore(self) + else: + return self * other[0] + ZeroOrMore(self) + elif isinstance(other[0], int) and isinstance(other[1], int): + minElements, optElements = other + optElements -= minElements + else: + raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1])) + else: + raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) + + if minElements < 0: + raise ValueError("cannot multiply ParserElement by negative value") + if optElements < 0: + raise ValueError("second tuple value must be greater or equal to first tuple value") + if minElements == optElements == 0: + raise ValueError("cannot multiply ParserElement by 0 or (0, 0)") + + if optElements: + def makeOptionalList(n): + if n > 1: + return Optional(self + makeOptionalList(n - 1)) + else: + return Optional(self) + if minElements: + if minElements == 1: + ret = self + makeOptionalList(optElements) + else: + ret = And([self] * minElements) + makeOptionalList(optElements) + else: + ret = makeOptionalList(optElements) + else: + if minElements == 1: + ret = self + else: + ret = And([self] * minElements) + return ret + + def __rmul__(self, other): + return self.__mul__(other) + + def __or__(self, other): + """ + Implementation of | operator - returns :class:`MatchFirst` + """ + if other is Ellipsis: + return _PendingSkip(self, must_skip=True) + + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return MatchFirst([self, other]) + + def __ror__(self, other): + """ + Implementation of | operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other | self + + def __xor__(self, other): + """ + Implementation of ^ operator - returns :class:`Or` + """ + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return Or([self, other]) + + def __rxor__(self, other): + """ + Implementation of ^ operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other ^ self + + def __and__(self, other): + """ + Implementation of & operator - returns :class:`Each` + """ + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return Each([self, other]) + + def __rand__(self, other): + """ + Implementation of & operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, basestring): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return None + return other & self + + def __invert__(self): + """ + Implementation of ~ operator - returns :class:`NotAny` + """ + return NotAny(self) + + def __iter__(self): + # must implement __iter__ to override legacy use of sequential access to __getitem__ to + # iterate over a sequence + raise TypeError('%r object is not iterable' % self.__class__.__name__) + + def __getitem__(self, key): + """ + use ``[]`` indexing notation as a short form for expression repetition: + - ``expr[n]`` is equivalent to ``expr*n`` + - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` + - ``expr[n, ...]`` or ``expr[n,]`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` + - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` + ``None`` may be used in place of ``...``. + + Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception + if more than ``n`` ``expr``s exist in the input stream. If this behavior is + desired, then write ``expr[..., n] + ~expr``. + """ + + # convert single arg keys to tuples + try: + if isinstance(key, str): + key = (key,) + iter(key) + except TypeError: + key = (key, key) + + if len(key) > 2: + warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5], + '... [{0}]'.format(len(key)) + if len(key) > 5 else '')) + + # clip to 2 elements + ret = self * tuple(key[:2]) + return ret + + def __call__(self, name=None): + """ + Shortcut for :class:`setResultsName`, with ``listAllMatches=False``. + + If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be + passed as ``True``. + + If ``name` is omitted, same as calling :class:`copy`. + + Example:: + + # these are equivalent + userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno") + userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") + """ + if name is not None: + return self._setResultsName(name) + else: + return self.copy() + + def suppress(self): + """ + Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress(self) + + def leaveWhitespace(self): + """ + Disables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + """ + self.skipWhitespace = False + return self + + def setWhitespaceChars(self, chars): + """ + Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = chars + self.copyDefaultWhiteChars = False + return self + + def parseWithTabs(self): + """ + Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string. + Must be called before ``parseString`` when the input grammar contains elements that + match ``<TAB>`` characters. + """ + self.keepTabs = True + return self + + def ignore(self, other): + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + + patt = OneOrMore(Word(alphas)) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] + + patt.ignore(cStyleComment) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] + """ + if isinstance(other, basestring): + other = Suppress(other) + + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + self.ignoreExprs.append(other) + else: + self.ignoreExprs.append(Suppress(other.copy())) + return self + + def setDebugActions(self, startAction, successAction, exceptionAction): + """ + Enable display of debugging messages while doing pattern matching. + """ + self.debugActions = (startAction or _defaultStartDebugAction, + successAction or _defaultSuccessDebugAction, + exceptionAction or _defaultExceptionDebugAction) + self.debug = True + return self + + def setDebug(self, flag=True): + """ + Enable display of debugging messages while doing pattern matching. + Set ``flag`` to True to enable, False to disable. + + Example:: + + wd = Word(alphas).setName("alphaword") + integer = Word(nums).setName("numword") + term = wd | integer + + # turn on debugging for wd + wd.setDebug() + + OneOrMore(term).parseString("abc 123 xyz 890") + + prints:: + + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using :class:`setDebugActions`. Prior to attempting + to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"`` + is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` + message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``. + """ + if flag: + self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction) + else: + self.debug = False + return self + + def __str__(self): + return self.name + + def __repr__(self): + return _ustr(self) + + def streamline(self): + self.streamlined = True + self.strRepr = None + return self + + def checkRecursion(self, parseElementList): + pass + + def validate(self, validateTrace=None): + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ + self.checkRecursion([]) + + def parseFile(self, file_or_filename, parseAll=False): + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + try: + file_contents = file_or_filename.read() + except AttributeError: + with open(file_or_filename, "r") as f: + file_contents = f.read() + try: + return self.parseString(file_contents, parseAll) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + if getattr(exc, '__traceback__', None) is not None: + exc.__traceback__ = self._trim_traceback(exc.__traceback__) + raise exc + + def __eq__(self, other): + if self is other: + return True + elif isinstance(other, basestring): + return self.matches(other) + elif isinstance(other, ParserElement): + return vars(self) == vars(other) + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return id(self) + + def __req__(self, other): + return self == other + + def __rne__(self, other): + return not (self == other) + + def matches(self, testString, parseAll=True): + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. + + Parameters: + - testString - to test against this expression for a match + - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests + + Example:: + + expr = Word(nums) + assert expr.matches("100") + """ + try: + self.parseString(_ustr(testString), parseAll=parseAll) + return True + except ParseBaseException: + return False + + def runTests(self, tests, parseAll=True, comment='#', + fullDump=True, printResults=True, failureTests=False, postParse=None, + file=None): + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + - tests - a list of separate test strings, or a multiline string of test strings + - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests + - comment - (default= ``'#'``) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - printResults - (default= ``True``) prints test output to stdout + - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing + - postParse - (default= ``None``) optional callback for successful parse results; called as + `fn(test_string, parse_results)` and returns a string to be added to the test output + - file - (default=``None``) optional file-like object to which test output will be written; + if None, will default to ``sys.stdout`` + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if ``failureTests`` is True), and the results contain a list of lines of each + test's output + + Example:: + + number_expr = pyparsing_common.number.copy() + + result = number_expr.runTests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.runTests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failureTests=True) + print("Success" if result[0] else "Failed!") + + prints:: + + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this:: + + expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") + + (Note that this is a raw string literal, you must include the leading 'r'.) + """ + if isinstance(tests, basestring): + tests = list(map(str.strip, tests.rstrip().splitlines())) + if isinstance(comment, basestring): + comment = Literal(comment) + if file is None: + file = sys.stdout + print_ = file.write + + allResults = [] + comments = [] + success = True + NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString) + BOM = u'\ufeff' + for t in tests: + if comment is not None and comment.matches(t, False) or comments and not t: + comments.append(t) + continue + if not t: + continue + out = ['\n' + '\n'.join(comments) if comments else '', t] + comments = [] + try: + # convert newline marks to actual newlines, and strip leading BOM if present + t = NL.transformString(t.lstrip(BOM)) + result = self.parseString(t, parseAll=parseAll) + except ParseBaseException as pe: + fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" + if '\n' in t: + out.append(line(pe.loc, t)) + out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal) + else: + out.append(' ' * pe.loc + '^' + fatal) + out.append("FAIL: " + str(pe)) + success = success and failureTests + result = pe + except Exception as exc: + out.append("FAIL-EXCEPTION: " + str(exc)) + success = success and failureTests + result = exc + else: + success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + if isinstance(pp_value, ParseResults): + out.append(pp_value.dump()) + else: + out.append(str(pp_value)) + else: + out.append(result.dump()) + except Exception as e: + out.append(result.dump(full=fullDump)) + out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e)) + else: + out.append(result.dump(full=fullDump)) + + if printResults: + if fullDump: + out.append('') + print_('\n'.join(out)) + + allResults.append((t, result)) + + return success, allResults + + +class _PendingSkip(ParserElement): + # internal placeholder class to hold a place were '...' is added to a parser element, + # once another ParserElement is added, this placeholder will be replaced with a SkipTo + def __init__(self, expr, must_skip=False): + super(_PendingSkip, self).__init__() + self.strRepr = str(expr + Empty()).replace('Empty', '...') + self.name = self.strRepr + self.anchor = expr + self.must_skip = must_skip + + def __add__(self, other): + skipper = SkipTo(other).setName("...")("_skipped*") + if self.must_skip: + def must_skip(t): + if not t._skipped or t._skipped.asList() == ['']: + del t[0] + t.pop("_skipped", None) + def show_skip(t): + if t._skipped.asList()[-1:] == ['']: + skipped = t.pop('_skipped') + t['_skipped'] = 'missing <' + repr(self.anchor) + '>' + return (self.anchor + skipper().addParseAction(must_skip) + | skipper().addParseAction(show_skip)) + other + + return self.anchor + skipper + other + + def __repr__(self): + return self.strRepr + + def parseImpl(self, *args): + raise Exception("use of `...` expression without following SkipTo target expression") + + +class Token(ParserElement): + """Abstract :class:`ParserElement` subclass, for defining atomic + matching patterns. + """ + def __init__(self): + super(Token, self).__init__(savelist=False) + + +class Empty(Token): + """An empty token, will always match. + """ + def __init__(self): + super(Empty, self).__init__() + self.name = "Empty" + self.mayReturnEmpty = True + self.mayIndexError = False + + +class NoMatch(Token): + """A token that will never match. + """ + def __init__(self): + super(NoMatch, self).__init__() + self.name = "NoMatch" + self.mayReturnEmpty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + + def parseImpl(self, instring, loc, doActions=True): + raise ParseException(instring, loc, self.errmsg, self) + + +class Literal(Token): + """Token to exactly match a specified string. + + Example:: + + Literal('blah').parseString('blah') # -> ['blah'] + Literal('blah').parseString('blahfooblah') # -> ['blah'] + Literal('blah').parseString('bla') # -> Exception: Expected "blah" + + For case-insensitive matching, use :class:`CaselessLiteral`. + + For keyword matching (force word break before and after the matched string), + use :class:`Keyword` or :class:`CaselessKeyword`. + """ + def __init__(self, matchString): + super(Literal, self).__init__() + self.match = matchString + self.matchLen = len(matchString) + try: + self.firstMatchChar = matchString[0] + except IndexError: + warnings.warn("null string passed to Literal; use Empty() instead", + SyntaxWarning, stacklevel=2) + self.__class__ = Empty + self.name = '"%s"' % _ustr(self.match) + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + + # Performance tuning: modify __class__ to select + # a parseImpl optimized for single-character check + if self.matchLen == 1 and type(self) is Literal: + self.__class__ = _SingleCharLiteral + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + +class _SingleCharLiteral(Literal): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar: + return loc + 1, self.match + raise ParseException(instring, loc, self.errmsg, self) + +_L = Literal +ParserElement._literalStringClass = Literal + +class Keyword(Token): + """Token to exactly match a specified string as a keyword, that is, + it must be immediately followed by a non-keyword character. Compare + with :class:`Literal`: + + - ``Literal("if")`` will match the leading ``'if'`` in + ``'ifAndOnlyIf'``. + - ``Keyword("if")`` will not; it will only match the leading + ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` + + Accepts two optional constructor arguments in addition to the + keyword string: + + - ``identChars`` is a string of characters that would be valid + identifier characters, defaulting to all alphanumerics + "_" and + "$" + - ``caseless`` allows case-insensitive matching, default is ``False``. + + Example:: + + Keyword("start").parseString("start") # -> ['start'] + Keyword("start").parseString("starting") # -> Exception + + For case-insensitive matching, use :class:`CaselessKeyword`. + """ + DEFAULT_KEYWORD_CHARS = alphanums + "_$" + + def __init__(self, matchString, identChars=None, caseless=False): + super(Keyword, self).__init__() + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS + self.match = matchString + self.matchLen = len(matchString) + try: + self.firstMatchChar = matchString[0] + except IndexError: + warnings.warn("null string passed to Keyword; use Empty() instead", + SyntaxWarning, stacklevel=2) + self.name = '"%s"' % self.match + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = matchString.upper() + identChars = identChars.upper() + self.identChars = set(identChars) + + def parseImpl(self, instring, loc, doActions=True): + if self.caseless: + if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch) + and (loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen].upper() not in self.identChars) + and (loc == 0 + or instring[loc - 1].upper() not in self.identChars)): + return loc + self.matchLen, self.match + + else: + if instring[loc] == self.firstMatchChar: + if ((self.matchLen == 1 or instring.startswith(self.match, loc)) + and (loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen] not in self.identChars) + and (loc == 0 or instring[loc - 1] not in self.identChars)): + return loc + self.matchLen, self.match + + raise ParseException(instring, loc, self.errmsg, self) + + def copy(self): + c = super(Keyword, self).copy() + c.identChars = Keyword.DEFAULT_KEYWORD_CHARS + return c + + @staticmethod + def setDefaultKeywordChars(chars): + """Overrides the default Keyword chars + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + +class CaselessLiteral(Literal): + """Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + + OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for :class:`CaselessKeyword`.) + """ + def __init__(self, matchString): + super(CaselessLiteral, self).__init__(matchString.upper()) + # Preserve the defining literal. + self.returnString = matchString + self.name = "'%s'" % self.returnString + self.errmsg = "Expected " + self.name + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc:loc + self.matchLen].upper() == self.match: + return loc + self.matchLen, self.returnString + raise ParseException(instring, loc, self.errmsg, self) + +class CaselessKeyword(Keyword): + """ + Caseless version of :class:`Keyword`. + + Example:: + + OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] + + (Contrast with example for :class:`CaselessLiteral`.) + """ + def __init__(self, matchString, identChars=None): + super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True) + +class CloseMatch(Token): + """A variation on :class:`Literal` which matches "close" matches, + that is, strings with at most 'n' mismatching characters. + :class:`CloseMatch` takes parameters: + + - ``match_string`` - string to be matched + - ``maxMismatches`` - (``default=1``) maximum number of + mismatches allowed to count as a match + + The results from a successful parse will contain the matched text + from the input string and the following named results: + + - ``mismatches`` - a list of the positions within the + match_string where mismatches were found + - ``original`` - the original match_string used to compare + against the input string + + If ``mismatches`` is an empty list, then the match was an exact + match. + + Example:: + + patt = CloseMatch("ATCATCGAATGGA") + patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) + patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) + + # exact match + patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) + + # close match allowing up to 2 mismatches + patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) + patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) + """ + def __init__(self, match_string, maxMismatches=1): + super(CloseMatch, self).__init__() + self.name = match_string + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) + self.mayIndexError = False + self.mayReturnEmpty = False + + def parseImpl(self, instring, loc, doActions=True): + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)): + src, mat = s_m + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results['original'] = match_string + results['mismatches'] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + +class Word(Token): + """Token for matching words composed of allowed character sets. + Defined with string containing all allowed initial characters, an + optional string containing allowed body characters (if omitted, + defaults to the initial character set), and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. An optional ``excludeChars`` parameter can + list characters that might be found in the input ``bodyChars`` + string; useful to define a word of all printables except for one or + two characters, for instance. + + :class:`srange` is useful for defining custom character set strings + for defining ``Word`` expressions, using range notation from + regular expression character sets. + + A common mistake is to use :class:`Word` to match a specific literal + string, as in ``Word("Address")``. Remember that :class:`Word` + uses the string argument to define *sets* of matchable characters. + This expression would match "Add", "AAA", "dAred", or any other word + made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an + exact literal string, use :class:`Literal` or :class:`Keyword`. + + pyparsing includes helper strings for building Words: + + - :class:`alphas` + - :class:`nums` + - :class:`alphanums` + - :class:`hexnums` + - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 + - accented, tilded, umlauted, etc.) + - :class:`punc8bit` (non-alphabetic characters in ASCII range + 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - :class:`printables` (any non-whitespace character) + + Example:: + + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capital_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums + '-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, excludeChars=",") + """ + def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None): + super(Word, self).__init__() + if excludeChars: + excludeChars = set(excludeChars) + initChars = ''.join(c for c in initChars if c not in excludeChars) + if bodyChars: + bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) + self.initCharsOrig = initChars + self.initChars = set(initChars) + if bodyChars: + self.bodyCharsOrig = bodyChars + self.bodyChars = set(bodyChars) + else: + self.bodyCharsOrig = initChars + self.bodyChars = set(initChars) + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.asKeyword = asKeyword + + if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0): + if self.bodyCharsOrig == self.initCharsOrig: + self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) + elif len(self.initCharsOrig) == 1: + self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) + else: + self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig), + _escapeRegexRangeChars(self.bodyCharsOrig),) + if self.asKeyword: + self.reString = r"\b" + self.reString + r"\b" + + try: + self.re = re.compile(self.reString) + except Exception: + self.re = None + else: + self.re_match = self.re.match + self.__class__ = _WordRegex + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.initChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + instrlen = len(instring) + bodychars = self.bodyChars + maxloc = start + self.maxLen + maxloc = min(maxloc, instrlen) + while loc < maxloc and instring[loc] in bodychars: + loc += 1 + + throwException = False + if loc - start < self.minLen: + throwException = True + elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: + throwException = True + elif self.asKeyword: + if (start > 0 and instring[start - 1] in bodychars + or loc < instrlen and instring[loc] in bodychars): + throwException = True + + if throwException: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def __str__(self): + try: + return super(Word, self).__str__() + except Exception: + pass + + if self.strRepr is None: + + def charsAsStr(s): + if len(s) > 4: + return s[:4] + "..." + else: + return s + + if self.initCharsOrig != self.bodyCharsOrig: + self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig)) + else: + self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) + + return self.strRepr + +class _WordRegex(Word): + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + + +class Char(_WordRegex): + """A short-cut class for defining ``Word(characters, exact=1)``, + when defining a match of any single character in a string of + characters. + """ + def __init__(self, charset, asKeyword=False, excludeChars=None): + super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars) + self.reString = "[%s]" % _escapeRegexRangeChars(''.join(self.initChars)) + if asKeyword: + self.reString = r"\b%s\b" % self.reString + self.re = re.compile(self.reString) + self.re_match = self.re.match + + +class Regex(Token): + r"""Token for matching strings that match a given regular + expression. Defined with string specifying the regular expression in + a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_. + If the given regex contains named groups (defined using ``(?P<name>...)``), + these will be preserved as named parse results. + + If instead of the Python stdlib re module you wish to use a different RE module + (such as the `regex` module), you can replace it by either building your + Regex object with a compiled RE that was compiled using regex: + + Example:: + + realnum = Regex(r"[+-]?\d+\.\d*") + date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') + # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + + # use regex module instead of stdlib re module to construct a Regex using + # a compiled regular expression + import regex + parser = pp.Regex(regex.compile(r'[0-9]')) + + """ + def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False): + """The parameters ``pattern`` and ``flags`` are passed + to the ``re.compile()`` function as-is. See the Python + `re module <https://docs.python.org/3/library/re.html>`_ module for an + explanation of the acceptable patterns and flags. + """ + super(Regex, self).__init__() + + if isinstance(pattern, basestring): + if not pattern: + warnings.warn("null string passed to Regex; use Empty() instead", + SyntaxWarning, stacklevel=2) + + self.pattern = pattern + self.flags = flags + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + except sre_constants.error: + warnings.warn("invalid pattern (%s) passed to Regex" % pattern, + SyntaxWarning, stacklevel=2) + raise + + elif hasattr(pattern, 'pattern') and hasattr(pattern, 'match'): + self.re = pattern + self.pattern = self.reString = pattern.pattern + self.flags = flags + + else: + raise TypeError("Regex may only be constructed with a string or a compiled RE object") + + self.re_match = self.re.match + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = self.re_match("") is not None + self.asGroupList = asGroupList + self.asMatch = asMatch + if self.asGroupList: + self.parseImpl = self.parseImplAsGroupList + if self.asMatch: + self.parseImpl = self.parseImplAsMatch + + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = ParseResults(result.group()) + d = result.groupdict() + if d: + for k, v in d.items(): + ret[k] = v + return loc, ret + + def parseImplAsGroupList(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.groups() + return loc, ret + + def parseImplAsMatch(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result + return loc, ret + + def __str__(self): + try: + return super(Regex, self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "Re:(%s)" % repr(self.pattern) + + return self.strRepr + + def sub(self, repl): + r""" + Return Regex with an attached parse action to transform the parsed + result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. + + Example:: + + make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>") + print(make_html.transformString("h1:main title:")) + # prints "<h1>main title</h1>" + """ + if self.asGroupList: + warnings.warn("cannot use sub() with Regex(asGroupList=True)", + SyntaxWarning, stacklevel=2) + raise SyntaxError() + + if self.asMatch and callable(repl): + warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", + SyntaxWarning, stacklevel=2) + raise SyntaxError() + + if self.asMatch: + def pa(tokens): + return tokens[0].expand(repl) + else: + def pa(tokens): + return self.re.sub(repl, tokens[0]) + return self.addParseAction(pa) + +class QuotedString(Token): + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + + - quoteChar - string of one or more characters defining the + quote delimiting string + - escChar - character to escape quotes, typically backslash + (default= ``None``) + - escQuote - special quote sequence to escape an embedded quote + string (such as SQL's ``""`` to escape an embedded ``"``) + (default= ``None``) + - multiline - boolean indicating whether quotes can span + multiple lines (default= ``False``) + - unquoteResults - boolean indicating whether the matched text + should be unquoted (default= ``True``) + - endQuoteChar - string of one or more characters defining the + end of the quote delimited string (default= ``None`` => same as + quoteChar) + - convertWhitespaceEscapes - convert escaped whitespace + (``'\t'``, ``'\n'``, etc.) to actual whitespace + (default= ``True``) + + Example:: + + qs = QuotedString('"') + print(qs.searchString('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', endQuoteChar='}}') + print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', escQuote='""') + print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + + prints:: + + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] + """ + def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, + unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): + super(QuotedString, self).__init__() + + # remove white space from quote chars - wont work anyway + quoteChar = quoteChar.strip() + if not quoteChar: + warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) + raise SyntaxError() + + if endQuoteChar is None: + endQuoteChar = quoteChar + else: + endQuoteChar = endQuoteChar.strip() + if not endQuoteChar: + warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2) + raise SyntaxError() + + self.quoteChar = quoteChar + self.quoteCharLen = len(quoteChar) + self.firstQuoteChar = quoteChar[0] + self.endQuoteChar = endQuoteChar + self.endQuoteCharLen = len(endQuoteChar) + self.escChar = escChar + self.escQuote = escQuote + self.unquoteResults = unquoteResults + self.convertWhitespaceEscapes = convertWhitespaceEscapes + + if multiline: + self.flags = re.MULTILINE | re.DOTALL + self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '')) + else: + self.flags = 0 + self.pattern = r'%s(?:[^%s\n\r%s]' % (re.escape(self.quoteChar), + _escapeRegexRangeChars(self.endQuoteChar[0]), + (escChar is not None and _escapeRegexRangeChars(escChar) or '')) + if len(self.endQuoteChar) > 1: + self.pattern += ( + '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), + _escapeRegexRangeChars(self.endQuoteChar[i])) + for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')') + + if escQuote: + self.pattern += (r'|(?:%s)' % re.escape(escQuote)) + if escChar: + self.pattern += (r'|(?:%s.)' % re.escape(escChar)) + self.escCharReplacePattern = re.escape(self.escChar) + "(.)" + self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + self.re_match = self.re.match + except sre_constants.error: + warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, + SyntaxWarning, stacklevel=2) + raise + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.group() + + if self.unquoteResults: + + # strip off quotes + ret = ret[self.quoteCharLen: -self.endQuoteCharLen] + + if isinstance(ret, basestring): + # replace escaped whitespace + if '\\' in ret and self.convertWhitespaceEscapes: + ws_map = { + r'\t': '\t', + r'\n': '\n', + r'\f': '\f', + r'\r': '\r', + } + for wslit, wschar in ws_map.items(): + ret = ret.replace(wslit, wschar) + + # replace escaped characters + if self.escChar: + ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) + + # replace escaped quotes + if self.escQuote: + ret = ret.replace(self.escQuote, self.endQuoteChar) + + return loc, ret + + def __str__(self): + try: + return super(QuotedString, self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) + + return self.strRepr + + +class CharsNotIn(Token): + """Token for matching words composed of characters *not* in a given + set (will include whitespace in matched characters if not listed in + the provided exclusion set - see example). Defined with string + containing all disallowed characters, and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. + + Example:: + + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) + + prints:: + + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] + """ + def __init__(self, notChars, min=1, max=0, exact=0): + super(CharsNotIn, self).__init__() + self.skipWhitespace = False + self.notChars = notChars + + if min < 1: + raise ValueError("cannot specify a minimum length < 1; use " + "Optional(CharsNotIn()) if zero-length char group is permitted") + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.name = _ustr(self) + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = (self.minLen == 0) + self.mayIndexError = False + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] in self.notChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + notchars = self.notChars + maxlen = min(start + self.maxLen, len(instring)) + while loc < maxlen and instring[loc] not in notchars: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + def __str__(self): + try: + return super(CharsNotIn, self).__str__() + except Exception: + pass + + if self.strRepr is None: + if len(self.notChars) > 4: + self.strRepr = "!W:(%s...)" % self.notChars[:4] + else: + self.strRepr = "!W:(%s)" % self.notChars + + return self.strRepr + +class White(Token): + """Special matching class for matching whitespace. Normally, + whitespace is ignored by pyparsing grammars. This class is included + when some whitespace structures are significant. Define with + a string containing the whitespace characters to be matched; default + is ``" \\t\\r\\n"``. Also takes optional ``min``, + ``max``, and ``exact`` arguments, as defined for the + :class:`Word` class. + """ + whiteStrs = { + ' ' : '<SP>', + '\t': '<TAB>', + '\n': '<LF>', + '\r': '<CR>', + '\f': '<FF>', + u'\u00A0': '<NBSP>', + u'\u1680': '<OGHAM_SPACE_MARK>', + u'\u180E': '<MONGOLIAN_VOWEL_SEPARATOR>', + u'\u2000': '<EN_QUAD>', + u'\u2001': '<EM_QUAD>', + u'\u2002': '<EN_SPACE>', + u'\u2003': '<EM_SPACE>', + u'\u2004': '<THREE-PER-EM_SPACE>', + u'\u2005': '<FOUR-PER-EM_SPACE>', + u'\u2006': '<SIX-PER-EM_SPACE>', + u'\u2007': '<FIGURE_SPACE>', + u'\u2008': '<PUNCTUATION_SPACE>', + u'\u2009': '<THIN_SPACE>', + u'\u200A': '<HAIR_SPACE>', + u'\u200B': '<ZERO_WIDTH_SPACE>', + u'\u202F': '<NNBSP>', + u'\u205F': '<MMSP>', + u'\u3000': '<IDEOGRAPHIC_SPACE>', + } + def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): + super(White, self).__init__() + self.matchWhite = ws + self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite)) + # ~ self.leaveWhitespace() + self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) + self.mayReturnEmpty = True + self.errmsg = "Expected " + self.name + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.matchWhite: + raise ParseException(instring, loc, self.errmsg, self) + start = loc + loc += 1 + maxloc = start + self.maxLen + maxloc = min(maxloc, len(instring)) + while loc < maxloc and instring[loc] in self.matchWhite: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class _PositionToken(Token): + def __init__(self): + super(_PositionToken, self).__init__() + self.name = self.__class__.__name__ + self.mayReturnEmpty = True + self.mayIndexError = False + +class GoToColumn(_PositionToken): + """Token to advance to a specific column of input text; useful for + tabular report scraping. + """ + def __init__(self, colno): + super(GoToColumn, self).__init__() + self.col = colno + + def preParse(self, instring, loc): + if col(loc, instring) != self.col: + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col: + loc += 1 + return loc + + def parseImpl(self, instring, loc, doActions=True): + thiscol = col(loc, instring) + if thiscol > self.col: + raise ParseException(instring, loc, "Text not in expected column", self) + newloc = loc + self.col - thiscol + ret = instring[loc: newloc] + return newloc, ret + + +class LineStart(_PositionToken): + r"""Matches if current position is at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (LineStart() + 'AAA' + restOfLine).searchString(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + def __init__(self): + super(LineStart, self).__init__() + self.errmsg = "Expected start of line" + + def parseImpl(self, instring, loc, doActions=True): + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) + +class LineEnd(_PositionToken): + """Matches if current position is at the end of a line within the + parse string + """ + def __init__(self): + super(LineEnd, self).__init__() + self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", "")) + self.errmsg = "Expected end of line" + + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): + if instring[loc] == "\n": + return loc + 1, "\n" + else: + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + +class StringStart(_PositionToken): + """Matches if current position is at the beginning of the parse + string + """ + def __init__(self): + super(StringStart, self).__init__() + self.errmsg = "Expected start of text" + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + # see if entire string up to here is just whitespace and ignoreables + if loc != self.preParse(instring, 0): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + +class StringEnd(_PositionToken): + """Matches if current position is at the end of the parse string + """ + def __init__(self): + super(StringEnd, self).__init__() + self.errmsg = "Expected end of text" + + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + elif loc > len(instring): + return loc, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + +class WordStart(_PositionToken): + """Matches if the current position is at the beginning of a Word, + and is not preceded by any character in a given set of + ``wordChars`` (default= ``printables``). To emulate the + ``\b`` behavior of regular expressions, use + ``WordStart(alphanums)``. ``WordStart`` will also match at + the beginning of the string being parsed, or at the beginning of + a line. + """ + def __init__(self, wordChars=printables): + super(WordStart, self).__init__() + self.wordChars = set(wordChars) + self.errmsg = "Not at the start of a word" + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + if (instring[loc - 1] in self.wordChars + or instring[loc] not in self.wordChars): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + +class WordEnd(_PositionToken): + """Matches if the current position is at the end of a Word, and is + not followed by any character in a given set of ``wordChars`` + (default= ``printables``). To emulate the ``\b`` behavior of + regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` + will also match at the end of the string being parsed, or at the end + of a line. + """ + def __init__(self, wordChars=printables): + super(WordEnd, self).__init__() + self.wordChars = set(wordChars) + self.skipWhitespace = False + self.errmsg = "Not at the end of a word" + + def parseImpl(self, instring, loc, doActions=True): + instrlen = len(instring) + if instrlen > 0 and loc < instrlen: + if (instring[loc] in self.wordChars or + instring[loc - 1] not in self.wordChars): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class ParseExpression(ParserElement): + """Abstract subclass of ParserElement, for combining and + post-processing parsed tokens. + """ + def __init__(self, exprs, savelist=False): + super(ParseExpression, self).__init__(savelist) + if isinstance(exprs, _generatorType): + exprs = list(exprs) + + if isinstance(exprs, basestring): + self.exprs = [self._literalStringClass(exprs)] + elif isinstance(exprs, ParserElement): + self.exprs = [exprs] + elif isinstance(exprs, Iterable): + exprs = list(exprs) + # if sequence of strings provided, wrap with Literal + if any(isinstance(expr, basestring) for expr in exprs): + exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs) + self.exprs = list(exprs) + else: + try: + self.exprs = list(exprs) + except TypeError: + self.exprs = [exprs] + self.callPreparse = False + + def append(self, other): + self.exprs.append(other) + self.strRepr = None + return self + + def leaveWhitespace(self): + """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on + all contained expressions.""" + self.skipWhitespace = False + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.leaveWhitespace() + return self + + def ignore(self, other): + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super(ParseExpression, self).ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + else: + super(ParseExpression, self).ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + return self + + def __str__(self): + try: + return super(ParseExpression, self).__str__() + except Exception: + pass + + if self.strRepr is None: + self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs)) + return self.strRepr + + def streamline(self): + super(ParseExpression, self).streamline() + + for e in self.exprs: + e.streamline() + + # collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d) + # but only if there are no parse actions or resultsNames on the nested And's + # (likewise for Or's and MatchFirst's) + if len(self.exprs) == 2: + other = self.exprs[0] + if (isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug): + self.exprs = other.exprs[:] + [self.exprs[1]] + self.strRepr = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + other = self.exprs[-1] + if (isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug): + self.exprs = self.exprs[:-1] + other.exprs[:] + self.strRepr = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + self.errmsg = "Expected " + _ustr(self) + + return self + + def validate(self, validateTrace=None): + tmp = (validateTrace if validateTrace is not None else [])[:] + [self] + for e in self.exprs: + e.validate(tmp) + self.checkRecursion([]) + + def copy(self): + ret = super(ParseExpression, self).copy() + ret.exprs = [e.copy() for e in self.exprs] + return ret + + def _setResultsName(self, name, listAllMatches=False): + if __diag__.warn_ungrouped_named_tokens_in_collection: + for e in self.exprs: + if isinstance(e, ParserElement) and e.resultsName: + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName), + stacklevel=3) + + return super(ParseExpression, self)._setResultsName(name, listAllMatches) + + +class And(ParseExpression): + """ + Requires all given :class:`ParseExpression` s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the ``'+'`` operator. + May also be constructed using the ``'-'`` operator, which will + suppress backtracking. + + Example:: + + integer = Word(nums) + name_expr = OneOrMore(Word(alphas)) + + expr = And([integer("id"), name_expr("name"), integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") + """ + + class _ErrorStop(Empty): + def __init__(self, *args, **kwargs): + super(And._ErrorStop, self).__init__(*args, **kwargs) + self.name = '-' + self.leaveWhitespace() + + def __init__(self, exprs, savelist=True): + exprs = list(exprs) + if exprs and Ellipsis in exprs: + tmp = [] + for i, expr in enumerate(exprs): + if expr is Ellipsis: + if i < len(exprs) - 1: + skipto_arg = (Empty() + exprs[i + 1]).exprs[-1] + tmp.append(SkipTo(skipto_arg)("_skipped*")) + else: + raise Exception("cannot construct And with sequence ending in ...") + else: + tmp.append(expr) + exprs[:] = tmp + super(And, self).__init__(exprs, savelist) + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + self.setWhitespaceChars(self.exprs[0].whiteChars) + self.skipWhitespace = self.exprs[0].skipWhitespace + self.callPreparse = True + + def streamline(self): + # collapse any _PendingSkip's + if self.exprs: + if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip) + for e in self.exprs[:-1]): + for i, e in enumerate(self.exprs[:-1]): + if e is None: + continue + if (isinstance(e, ParseExpression) + and e.exprs and isinstance(e.exprs[-1], _PendingSkip)): + e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] + self.exprs[i + 1] = None + self.exprs = [e for e in self.exprs if e is not None] + + super(And, self).streamline() + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self + + def parseImpl(self, instring, loc, doActions=True): + # pass False as last arg to _parse for first element, since we already + # pre-parsed the string as part of our And pre-parsing + loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False) + errorStop = False + for e in self.exprs[1:]: + if isinstance(e, And._ErrorStop): + errorStop = True + continue + if errorStop: + try: + loc, exprtokens = e._parse(instring, loc, doActions) + except ParseSyntaxException: + raise + except ParseBaseException as pe: + pe.__traceback__ = None + raise ParseSyntaxException._from_exception(pe) + except IndexError: + raise ParseSyntaxException(instring, len(instring), self.errmsg, self) + else: + loc, exprtokens = e._parse(instring, loc, doActions) + if exprtokens or exprtokens.haskeys(): + resultlist += exprtokens + return loc, resultlist + + def __iadd__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) + return self.append(other) # And([self, other]) + + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.exprs: + e.checkRecursion(subRecCheckList) + if not e.mayReturnEmpty: + break + + def __str__(self): + if hasattr(self, "name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + +class Or(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + two expressions match, the expression that matches the longest + string will be used. May be constructed using the ``'^'`` + operator. + + Example:: + + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) + + prints:: + + [['123'], ['3.1416'], ['789']] + """ + def __init__(self, exprs, savelist=False): + super(Or, self).__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self): + super(Or, self).streamline() + if __compat__.collect_all_And_tokens: + self.saveAsList = any(e.saveAsList for e in self.exprs) + return self + + def parseImpl(self, instring, loc, doActions=True): + maxExcLoc = -1 + maxException = None + matches = [] + for e in self.exprs: + try: + loc2 = e.tryParse(instring, loc) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException(instring, len(instring), e.errmsg, self) + maxExcLoc = len(instring) + else: + # save match among all matches, to retry longest to shortest + matches.append((loc2, e)) + + if matches: + # re-evaluate all matches in descending order of length of match, in case attached actions + # might change whether or how much they match of the input. + matches.sort(key=itemgetter(0), reverse=True) + + if not doActions: + # no further conditions or parse actions to change the selection of + # alternative, so the first match will be the best match + best_expr = matches[0][1] + return best_expr._parse(instring, loc, doActions) + + longest = -1, None + for loc1, expr1 in matches: + if loc1 <= longest[0]: + # already have a longer match than this one will deliver, we are done + return longest + + try: + loc2, toks = expr1._parse(instring, loc, doActions) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + else: + if loc2 >= loc1: + return loc2, toks + # didn't match as much as before + elif loc2 > longest[0]: + longest = loc2, toks + + if longest != (-1, None): + return longest + + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException(instring, loc, "no defined alternatives to match", self) + + + def __ixor__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) + return self.append(other) # Or([self, other]) + + def __str__(self): + if hasattr(self, "name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.exprs: + e.checkRecursion(subRecCheckList) + + def _setResultsName(self, name, listAllMatches=False): + if (not __compat__.collect_all_And_tokens + and __diag__.warn_multiple_tokens_in_named_alternation): + if any(isinstance(e, And) for e in self.exprs): + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "may only return a single token for an And alternative, " + "in future will return the full list of tokens".format( + "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), + stacklevel=3) + + return super(Or, self)._setResultsName(name, listAllMatches) + + +class MatchFirst(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + two expressions match, the first one listed is the one that will + match. May be constructed using the ``'|'`` operator. + + Example:: + + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] + """ + def __init__(self, exprs, savelist=False): + super(MatchFirst, self).__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self): + super(MatchFirst, self).streamline() + if __compat__.collect_all_And_tokens: + self.saveAsList = any(e.saveAsList for e in self.exprs) + return self + + def parseImpl(self, instring, loc, doActions=True): + maxExcLoc = -1 + maxException = None + for e in self.exprs: + try: + ret = e._parse(instring, loc, doActions) + return ret + except ParseException as err: + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException(instring, len(instring), e.errmsg, self) + maxExcLoc = len(instring) + + # only got here if no expression matched, raise exception for match that made it the furthest + else: + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException(instring, loc, "no defined alternatives to match", self) + + def __ior__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) + return self.append(other) # MatchFirst([self, other]) + + def __str__(self): + if hasattr(self, "name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.exprs: + e.checkRecursion(subRecCheckList) + + def _setResultsName(self, name, listAllMatches=False): + if (not __compat__.collect_all_And_tokens + and __diag__.warn_multiple_tokens_in_named_alternation): + if any(isinstance(e, And) for e in self.exprs): + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "may only return a single token for an And alternative, " + "in future will return the full list of tokens".format( + "warn_multiple_tokens_in_named_alternation", name, type(self).__name__), + stacklevel=3) + + return super(MatchFirst, self)._setResultsName(name, listAllMatches) + + +class Each(ParseExpression): + """Requires all given :class:`ParseExpression` s to be found, but in + any order. Expressions may be separated by whitespace. + + May be constructed using the ``'&'`` operator. + + Example:: + + color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) + + shape_spec.runTests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + + prints:: + + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 + """ + def __init__(self, exprs, savelist=True): + super(Each, self).__init__(exprs, savelist) + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = True + self.initExprGroups = True + self.saveAsList = True + + def streamline(self): + super(Each, self).streamline() + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self + + def parseImpl(self, instring, loc, doActions=True): + if self.initExprGroups: + self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional)) + opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)] + opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, (Optional, Regex))] + self.optionals = opt1 + opt2 + self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)] + self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)] + self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))] + self.required += self.multirequired + self.initExprGroups = False + tmpLoc = loc + tmpReqd = self.required[:] + tmpOpt = self.optionals[:] + matchOrder = [] + + keepMatching = True + while keepMatching: + tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired + failed = [] + for e in tmpExprs: + try: + tmpLoc = e.tryParse(instring, tmpLoc) + except ParseException: + failed.append(e) + else: + matchOrder.append(self.opt1map.get(id(e), e)) + if e in tmpReqd: + tmpReqd.remove(e) + elif e in tmpOpt: + tmpOpt.remove(e) + if len(failed) == len(tmpExprs): + keepMatching = False + + if tmpReqd: + missing = ", ".join(_ustr(e) for e in tmpReqd) + raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing) + + # add any unmatched Optionals, in case they have default values defined + matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt] + + resultlist = [] + for e in matchOrder: + loc, results = e._parse(instring, loc, doActions) + resultlist.append(results) + + finalResults = sum(resultlist, ParseResults([])) + return loc, finalResults + + def __str__(self): + if hasattr(self, "name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" + + return self.strRepr + + def checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.exprs: + e.checkRecursion(subRecCheckList) + + +class ParseElementEnhance(ParserElement): + """Abstract subclass of :class:`ParserElement`, for combining and + post-processing parsed tokens. + """ + def __init__(self, expr, savelist=False): + super(ParseElementEnhance, self).__init__(savelist) + if isinstance(expr, basestring): + if issubclass(self._literalStringClass, Token): + expr = self._literalStringClass(expr) + else: + expr = self._literalStringClass(Literal(expr)) + self.expr = expr + self.strRepr = None + if expr is not None: + self.mayIndexError = expr.mayIndexError + self.mayReturnEmpty = expr.mayReturnEmpty + self.setWhitespaceChars(expr.whiteChars) + self.skipWhitespace = expr.skipWhitespace + self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse + self.ignoreExprs.extend(expr.ignoreExprs) + + def parseImpl(self, instring, loc, doActions=True): + if self.expr is not None: + return self.expr._parse(instring, loc, doActions, callPreParse=False) + else: + raise ParseException("", loc, self.errmsg, self) + + def leaveWhitespace(self): + self.skipWhitespace = False + self.expr = self.expr.copy() + if self.expr is not None: + self.expr.leaveWhitespace() + return self + + def ignore(self, other): + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super(ParseElementEnhance, self).ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + else: + super(ParseElementEnhance, self).ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + return self + + def streamline(self): + super(ParseElementEnhance, self).streamline() + if self.expr is not None: + self.expr.streamline() + return self + + def checkRecursion(self, parseElementList): + if self in parseElementList: + raise RecursiveGrammarException(parseElementList + [self]) + subRecCheckList = parseElementList[:] + [self] + if self.expr is not None: + self.expr.checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None): + if validateTrace is None: + validateTrace = [] + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self.checkRecursion([]) + + def __str__(self): + try: + return super(ParseElementEnhance, self).__str__() + except Exception: + pass + + if self.strRepr is None and self.expr is not None: + self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr)) + return self.strRepr + + +class FollowedBy(ParseElementEnhance): + """Lookahead matching of the given parse expression. + ``FollowedBy`` does *not* advance the parsing position within + the input string, it only verifies that the specified parse + expression matches at the current position. ``FollowedBy`` + always returns a null token list. If any results names are defined + in the lookahead expression, those *will* be returned for access by + name. + + Example:: + + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() + + prints:: + + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ + def __init__(self, expr): + super(FollowedBy, self).__init__(expr) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + # by using self._expr.parse and deleting the contents of the returned ParseResults list + # we keep any named results that were defined in the FollowedBy expression + _, ret = self.expr._parse(instring, loc, doActions=doActions) + del ret[:] + + return loc, ret + + +class PrecededBy(ParseElementEnhance): + """Lookbehind matching of the given parse expression. + ``PrecededBy`` does not advance the parsing position within the + input string, it only verifies that the specified parse expression + matches prior to the current position. ``PrecededBy`` always + returns a null token list, but if a results name is defined on the + given expression, it is returned. + + Parameters: + + - expr - expression that must match prior to the current parse + location + - retreat - (default= ``None``) - (int) maximum number of characters + to lookbehind prior to the current parse location + + If the lookbehind expression is a string, Literal, Keyword, or + a Word or CharsNotIn with a specified exact or maximum length, then + the retreat parameter is not required. Otherwise, retreat must be + specified to give a maximum number of characters to look back from + the current parse position for a lookbehind match. + + Example:: + + # VB-style variable names with type prefixes + int_var = PrecededBy("#") + pyparsing_common.identifier + str_var = PrecededBy("$") + pyparsing_common.identifier + + """ + def __init__(self, expr, retreat=None): + super(PrecededBy, self).__init__(expr) + self.expr = self.expr().leaveWhitespace() + self.mayReturnEmpty = True + self.mayIndexError = False + self.exact = False + if isinstance(expr, str): + retreat = len(expr) + self.exact = True + elif isinstance(expr, (Literal, Keyword)): + retreat = expr.matchLen + self.exact = True + elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: + retreat = expr.maxLen + self.exact = True + elif isinstance(expr, _PositionToken): + retreat = 0 + self.exact = True + self.retreat = retreat + self.errmsg = "not preceded by " + str(expr) + self.skipWhitespace = False + self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) + + def parseImpl(self, instring, loc=0, doActions=True): + if self.exact: + if loc < self.retreat: + raise ParseException(instring, loc, self.errmsg) + start = loc - self.retreat + _, ret = self.expr._parse(instring, start) + else: + # retreat specified a maximum lookbehind window, iterate + test_expr = self.expr + StringEnd() + instring_slice = instring[max(0, loc - self.retreat):loc] + last_expr = ParseException(instring, loc, self.errmsg) + for offset in range(1, min(loc, self.retreat + 1)+1): + try: + # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) + _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset) + except ParseBaseException as pbe: + last_expr = pbe + else: + break + else: + raise last_expr + return loc, ret + + +class NotAny(ParseElementEnhance): + """Lookahead to disallow matching with the given parse expression. + ``NotAny`` does *not* advance the parsing position within the + input string, it only verifies that the specified parse expression + does *not* match at the current position. Also, ``NotAny`` does + *not* skip over leading whitespace. ``NotAny`` always returns + a null token list. May be constructed using the '~' operator. + + Example:: + + AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) + + # take care not to mistake keywords for identifiers + ident = ~(AND | OR | NOT) + Word(alphas) + boolean_term = Optional(NOT) + ident + + # very crude boolean expression - to support parenthesis groups and + # operation hierarchy, use infixNotation + boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term) + + # integers that are followed by "." are actually floats + integer = Word(nums) + ~Char(".") + """ + def __init__(self, expr): + super(NotAny, self).__init__(expr) + # ~ self.leaveWhitespace() + self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs + self.mayReturnEmpty = True + self.errmsg = "Found unwanted token, " + _ustr(self.expr) + + def parseImpl(self, instring, loc, doActions=True): + if self.expr.canParseNext(instring, loc): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + def __str__(self): + if hasattr(self, "name"): + return self.name + + if self.strRepr is None: + self.strRepr = "~{" + _ustr(self.expr) + "}" + + return self.strRepr + +class _MultipleMatch(ParseElementEnhance): + def __init__(self, expr, stopOn=None): + super(_MultipleMatch, self).__init__(expr) + self.saveAsList = True + ender = stopOn + if isinstance(ender, basestring): + ender = self._literalStringClass(ender) + self.stopOn(ender) + + def stopOn(self, ender): + if isinstance(ender, basestring): + ender = self._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None + return self + + def parseImpl(self, instring, loc, doActions=True): + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.tryParse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False) + try: + hasIgnoreExprs = (not not self.ignoreExprs) + while 1: + if check_ender: + try_not_ender(instring, loc) + if hasIgnoreExprs: + preloc = self_skip_ignorables(instring, loc) + else: + preloc = loc + loc, tmptokens = self_expr_parse(instring, preloc, doActions) + if tmptokens or tmptokens.haskeys(): + tokens += tmptokens + except (ParseException, IndexError): + pass + + return loc, tokens + + def _setResultsName(self, name, listAllMatches=False): + if __diag__.warn_ungrouped_named_tokens_in_collection: + for e in [self.expr] + getattr(self.expr, 'exprs', []): + if isinstance(e, ParserElement) and e.resultsName: + warnings.warn("{0}: setting results name {1!r} on {2} expression " + "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName), + stacklevel=3) + + return super(_MultipleMatch, self)._setResultsName(name, listAllMatches) + + +class OneOrMore(_MultipleMatch): + """Repetition of one or more of the given expression. + + Parameters: + - expr - expression that must match one or more times + - stopOn - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: BLACK" + OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + + # use stopOn attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parseString(text).pprint() + """ + + def __str__(self): + if hasattr(self, "name"): + return self.name + + if self.strRepr is None: + self.strRepr = "{" + _ustr(self.expr) + "}..." + + return self.strRepr + +class ZeroOrMore(_MultipleMatch): + """Optional repetition of zero or more of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - stopOn - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example: similar to :class:`OneOrMore` + """ + def __init__(self, expr, stopOn=None): + super(ZeroOrMore, self).__init__(expr, stopOn=stopOn) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + try: + return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) + except (ParseException, IndexError): + return loc, [] + + def __str__(self): + if hasattr(self, "name"): + return self.name + + if self.strRepr is None: + self.strRepr = "[" + _ustr(self.expr) + "]..." + + return self.strRepr + + +class _NullToken(object): + def __bool__(self): + return False + __nonzero__ = __bool__ + def __str__(self): + return "" + +class Optional(ParseElementEnhance): + """Optional matching of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - default (optional) - value to be returned if the optional expression is not found. + + Example:: + + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) + zip.runTests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + + prints:: + + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) + """ + __optionalNotMatched = _NullToken() + + def __init__(self, expr, default=__optionalNotMatched): + super(Optional, self).__init__(expr, savelist=False) + self.saveAsList = self.expr.saveAsList + self.defaultValue = default + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + try: + loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False) + except (ParseException, IndexError): + if self.defaultValue is not self.__optionalNotMatched: + if self.expr.resultsName: + tokens = ParseResults([self.defaultValue]) + tokens[self.expr.resultsName] = self.defaultValue + else: + tokens = [self.defaultValue] + else: + tokens = [] + return loc, tokens + + def __str__(self): + if hasattr(self, "name"): + return self.name + + if self.strRepr is None: + self.strRepr = "[" + _ustr(self.expr) + "]" + + return self.strRepr + +class SkipTo(ParseElementEnhance): + """Token for skipping over all undefined text until the matched + expression is found. + + Parameters: + - expr - target expression marking the end of the data to be skipped + - include - (default= ``False``) if True, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element list). + - ignore - (default= ``None``) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - failOn - (default= ``None``) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the SkipTo is not a match + + Example:: + + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quotedString) + string_data.setParseAction(tokenMap(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.searchString(report): + print tkt.dump() + + prints:: + + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: 6 + - desc: Intermittent system crash + - issue_num: 101 + - sev: Critical + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: 14 + - desc: Spelling error on Login ('log|n') + - issue_num: 94 + - sev: Cosmetic + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: 47 + - desc: System slow when running too many reports + - issue_num: 79 + - sev: Minor + """ + def __init__(self, other, include=False, ignore=None, failOn=None): + super(SkipTo, self).__init__(other) + self.ignoreExpr = ignore + self.mayReturnEmpty = True + self.mayIndexError = False + self.includeMatch = include + self.saveAsList = False + if isinstance(failOn, basestring): + self.failOn = self._literalStringClass(failOn) + else: + self.failOn = failOn + self.errmsg = "No match found for " + _ustr(self.expr) + + def parseImpl(self, instring, loc, doActions=True): + startloc = loc + instrlen = len(instring) + expr = self.expr + expr_parse = self.expr._parse + self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None + self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if self_ignoreExpr_tryParse is not None: + # advance past ignore expressions + while 1: + try: + tmploc = self_ignoreExpr_tryParse(instring, tmploc) + except ParseBaseException: + break + + try: + expr_parse(instring, tmploc, doActions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = expr_parse(instring, loc, doActions, callPreParse=False) + skipresult += mat + + return loc, skipresult + +class Forward(ParseElementEnhance): + """Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the ``Forward`` + variable using the '<<' operator. + + Note: take care when assigning to ``Forward`` not to overlook + precedence of operators. + + Specifically, '|' has a lower precedence than '<<', so that:: + + fwdExpr << a | b | c + + will actually be evaluated as:: + + (fwdExpr << a) | b | c + + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the ``Forward``:: + + fwdExpr << (a | b | c) + + Converting to use the '<<=' operator instead will avoid this problem. + + See :class:`ParseResults.pprint` for an example of a recursive + parser created using ``Forward``. + """ + def __init__(self, other=None): + super(Forward, self).__init__(other, savelist=False) + + def __lshift__(self, other): + if isinstance(other, basestring): + other = self._literalStringClass(other) + self.expr = other + self.strRepr = None + self.mayIndexError = self.expr.mayIndexError + self.mayReturnEmpty = self.expr.mayReturnEmpty + self.setWhitespaceChars(self.expr.whiteChars) + self.skipWhitespace = self.expr.skipWhitespace + self.saveAsList = self.expr.saveAsList + self.ignoreExprs.extend(self.expr.ignoreExprs) + return self + + def __ilshift__(self, other): + return self << other + + def leaveWhitespace(self): + self.skipWhitespace = False + return self + + def streamline(self): + if not self.streamlined: + self.streamlined = True + if self.expr is not None: + self.expr.streamline() + return self + + def validate(self, validateTrace=None): + if validateTrace is None: + validateTrace = [] + + if self not in validateTrace: + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self.checkRecursion([]) + + def __str__(self): + if hasattr(self, "name"): + return self.name + if self.strRepr is not None: + return self.strRepr + + # Avoid infinite recursion by setting a temporary strRepr + self.strRepr = ": ..." + + # Use the string representation of main expression. + retString = '...' + try: + if self.expr is not None: + retString = _ustr(self.expr)[:1000] + else: + retString = "None" + finally: + self.strRepr = self.__class__.__name__ + ": " + retString + return self.strRepr + + def copy(self): + if self.expr is not None: + return super(Forward, self).copy() + else: + ret = Forward() + ret <<= self + return ret + + def _setResultsName(self, name, listAllMatches=False): + if __diag__.warn_name_set_on_empty_Forward: + if self.expr is None: + warnings.warn("{0}: setting results name {0!r} on {1} expression " + "that has no contained expression".format("warn_name_set_on_empty_Forward", + name, + type(self).__name__), + stacklevel=3) + + return super(Forward, self)._setResultsName(name, listAllMatches) + +class TokenConverter(ParseElementEnhance): + """ + Abstract subclass of :class:`ParseExpression`, for converting parsed results. + """ + def __init__(self, expr, savelist=False): + super(TokenConverter, self).__init__(expr) # , savelist) + self.saveAsList = False + +class Combine(TokenConverter): + """Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the + input string; this can be disabled by specifying + ``'adjacent=False'`` in the constructor. + + Example:: + + real = Word(nums) + '.' + Word(nums) + print(real.parseString('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parseString('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) + """ + def __init__(self, expr, joinString="", adjacent=True): + super(Combine, self).__init__(expr) + # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself + if adjacent: + self.leaveWhitespace() + self.adjacent = adjacent + self.skipWhitespace = True + self.joinString = joinString + self.callPreparse = True + + def ignore(self, other): + if self.adjacent: + ParserElement.ignore(self, other) + else: + super(Combine, self).ignore(other) + return self + + def postParse(self, instring, loc, tokenlist): + retToks = tokenlist.copy() + del retToks[:] + retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults) + + if self.resultsName and retToks.haskeys(): + return [retToks] + else: + return retToks + +class Group(TokenConverter): + """Converter to return the matched tokens as a list - useful for + returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. + + Example:: + + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Optional(delimitedList(term)) + print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Optional(delimitedList(term))) + print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']] + """ + def __init__(self, expr): + super(Group, self).__init__(expr) + self.saveAsList = True + + def postParse(self, instring, loc, tokenlist): + return [tokenlist] + +class Dict(TokenConverter): + """Converter to return a repetitive expression as a list, but also + as a dictionary. Each element can also be referenced using the first + token in the expression as its key. Useful for tabular report + scraping when the first column can be used as a item key. + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + # print attributes as plain groups + print(OneOrMore(attr_expr).parseString(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names + result = Dict(OneOrMore(Group(attr_expr))).parseString(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.asDict()) + + prints:: + + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + + See more examples at :class:`ParseResults` of accessing fields by results name. + """ + def __init__(self, expr): + super(Dict, self).__init__(expr) + self.saveAsList = True + + def postParse(self, instring, loc, tokenlist): + for i, tok in enumerate(tokenlist): + if len(tok) == 0: + continue + ikey = tok[0] + if isinstance(ikey, int): + ikey = _ustr(tok[0]).strip() + if len(tok) == 1: + tokenlist[ikey] = _ParseResultsWithOffset("", i) + elif len(tok) == 2 and not isinstance(tok[1], ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) + else: + dictvalue = tok.copy() # ParseResults(i) + del dictvalue[0] + if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) + else: + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) + + if self.resultsName: + return [tokenlist] + else: + return tokenlist + + +class Suppress(TokenConverter): + """Converter for ignoring the results of a parsed expression. + + Example:: + + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + ZeroOrMore(',' + wd) + print(wd_list1.parseString(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) + print(wd_list2.parseString(source)) + + prints:: + + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + + (See also :class:`delimitedList`.) + """ + def postParse(self, instring, loc, tokenlist): + return [] + + def suppress(self): + return self + + +class OnlyOnce(object): + """Wrapper for parse actions, to ensure they are only called once. + """ + def __init__(self, methodCall): + self.callable = _trim_arity(methodCall) + self.called = False + def __call__(self, s, l, t): + if not self.called: + results = self.callable(s, l, t) + self.called = True + return results + raise ParseException(s, l, "") + def reset(self): + self.called = False + +def traceParseAction(f): + """Decorator for debugging parse actions. + + When the parse action is called, this decorator will print + ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``. + When the parse action completes, the decorator will print + ``"<<"`` followed by the returned value, or any exception that the parse action raised. + + Example:: + + wd = Word(alphas) + + @traceParseAction + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) + print(wds.parseString("slkdjs sld sldd sdlf sdljf")) + + prints:: + + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + <<leaving remove_duplicate_chars (ret: 'dfjkls') + ['dfjkls'] + """ + f = _trim_arity(f) + def z(*paArgs): + thisFunc = f.__name__ + s, l, t = paArgs[-3:] + if len(paArgs) > 3: + thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc + sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t)) + try: + ret = f(*paArgs) + except Exception as exc: + sys.stderr.write("<<leaving %s (exception: %s)\n" % (thisFunc, exc)) + raise + sys.stderr.write("<<leaving %s (ret: %r)\n" % (thisFunc, ret)) + return ret + try: + z.__name__ = f.__name__ + except AttributeError: + pass + return z + +# +# global helpers +# +def delimitedList(expr, delim=",", combine=False): + """Helper to define a delimited list of expressions - the delimiter + defaults to ','. By default, the list elements and delimiters can + have intervening whitespace, and comments, but this can be + overridden by passing ``combine=True`` in the constructor. If + ``combine`` is set to ``True``, the matching tokens are + returned as a single token string, with the delimiters included; + otherwise, the matching tokens are returned as a list of tokens, + with the delimiters suppressed. + + Example:: + + delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] + delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] + """ + dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..." + if combine: + return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName) + else: + return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName) + +def countedArray(expr, intExpr=None): + """Helper to define a counted list of expressions. + + This helper defines a pattern of the form:: + + integer expr expr expr... + + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the + leading count token is suppressed. + + If ``intExpr`` is specified, it should be a pyparsing expression + that produces an integer value. + + Example:: + + countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] + + # in this parser, the leading integer value is given in binary, + # '10' indicating that 2 values are in the array + binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) + countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] + """ + arrayExpr = Forward() + def countFieldParseAction(s, l, t): + n = t[0] + arrayExpr << (n and Group(And([expr] * n)) or Group(empty)) + return [] + if intExpr is None: + intExpr = Word(nums).setParseAction(lambda t: int(t[0])) + else: + intExpr = intExpr.copy() + intExpr.setName("arrayLen") + intExpr.addParseAction(countFieldParseAction, callDuringTry=True) + return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...') + +def _flatten(L): + ret = [] + for i in L: + if isinstance(i, list): + ret.extend(_flatten(i)) + else: + ret.append(i) + return ret + +def matchPreviousLiteral(expr): + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = matchPreviousLiteral(first) + matchExpr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches a previous literal, will also match the leading + ``"1:1"`` in ``"1:10"``. If this is not desired, use + :class:`matchPreviousExpr`. Do *not* use with packrat parsing + enabled. + """ + rep = Forward() + def copyTokenToRepeater(s, l, t): + if t: + if len(t) == 1: + rep << t[0] + else: + # flatten t tokens + tflat = _flatten(t.asList()) + rep << And(Literal(tt) for tt in tflat) + else: + rep << Empty() + expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) + return rep + +def matchPreviousExpr(expr): + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = matchPreviousExpr(first) + matchExpr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches by expressions, will *not* match the leading ``"1:1"`` + in ``"1:10"``; the expressions are evaluated first, and then + compared, so ``"1"`` is compared with ``"10"``. Do *not* use + with packrat parsing enabled. + """ + rep = Forward() + e2 = expr.copy() + rep <<= e2 + def copyTokenToRepeater(s, l, t): + matchTokens = _flatten(t.asList()) + def mustMatchTheseTokens(s, l, t): + theseTokens = _flatten(t.asList()) + if theseTokens != matchTokens: + raise ParseException('', 0, '') + rep.setParseAction(mustMatchTheseTokens, callDuringTry=True) + expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) + return rep + +def _escapeRegexRangeChars(s): + # ~ escape these chars: ^-[] + for c in r"\^-[]": + s = s.replace(c, _bslash + c) + s = s.replace("\n", r"\n") + s = s.replace("\t", r"\t") + return _ustr(s) + +def oneOf(strs, caseless=False, useRegex=True, asKeyword=False): + """Helper to quickly define a set of alternative Literals, and makes + sure to do longest-first testing when there is a conflict, + regardless of the input order, but returns + a :class:`MatchFirst` for best performance. + + Parameters: + + - strs - a string of space-delimited literals, or a collection of + string literals + - caseless - (default= ``False``) - treat all literals as + caseless + - useRegex - (default= ``True``) - as an optimization, will + generate a Regex object; otherwise, will generate + a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if + creating a :class:`Regex` raises an exception) + - asKeyword - (default=``False``) - enforce Keyword-style matching on the + generated expressions + + Example:: + + comp_oper = oneOf("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) + + prints:: + + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] + """ + if isinstance(caseless, basestring): + warnings.warn("More than one string argument passed to oneOf, pass " + "choices as a list or space-delimited string", stacklevel=2) + + if caseless: + isequal = (lambda a, b: a.upper() == b.upper()) + masks = (lambda a, b: b.upper().startswith(a.upper())) + parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral + else: + isequal = (lambda a, b: a == b) + masks = (lambda a, b: b.startswith(a)) + parseElementClass = Keyword if asKeyword else Literal + + symbols = [] + if isinstance(strs, basestring): + symbols = strs.split() + elif isinstance(strs, Iterable): + symbols = list(strs) + else: + warnings.warn("Invalid argument to oneOf, expected string or iterable", + SyntaxWarning, stacklevel=2) + if not symbols: + return NoMatch() + + if not asKeyword: + # if not producing keywords, need to reorder to take care to avoid masking + # longer choices with shorter ones + i = 0 + while i < len(symbols) - 1: + cur = symbols[i] + for j, other in enumerate(symbols[i + 1:]): + if isequal(other, cur): + del symbols[i + j + 1] + break + elif masks(cur, other): + del symbols[i + j + 1] + symbols.insert(i, other) + break + else: + i += 1 + + if not (caseless or asKeyword) and useRegex: + # ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols])) + try: + if len(symbols) == len("".join(symbols)): + return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(' | '.join(symbols)) + else: + return Regex("|".join(re.escape(sym) for sym in symbols)).setName(' | '.join(symbols)) + except Exception: + warnings.warn("Exception creating Regex for oneOf, building MatchFirst", + SyntaxWarning, stacklevel=2) + + # last resort, just use MatchFirst + return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) + +def dictOf(key, value): + """Helper to easily and clearly define a dictionary by specifying + the respective patterns for the key and value. Takes care of + defining the :class:`Dict`, :class:`ZeroOrMore`, and + :class:`Group` tokens in the proper order. The key pattern + can include delimiting markers or punctuation, as long as they are + suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the :class:`Dict` results + can include named token fields. + + Example:: + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + print(OneOrMore(attr_expr).parseString(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) + + # similar to Dict, but simpler call format + result = dictOf(attr_label, attr_value).parseString(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.asDict()) + + prints:: + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} + """ + return Dict(OneOrMore(Group(key + value))) + +def originalTextFor(expr, asString=True): + """Helper to return the original, untokenized text for a given + expression. Useful to restore the parsed fields of an HTML start + tag into the raw tag text itself, or to revert separate tokens with + intervening whitespace back to the original matching input text. By + default, returns astring containing the original parsed text. + + If the optional ``asString`` argument is passed as + ``False``, then the return value is + a :class:`ParseResults` containing any results names that + were originally matched, and a single token containing the original + matched text from the input string. So if the expression passed to + :class:`originalTextFor` contains expressions with defined + results names, you must set ``asString`` to ``False`` if you + want to preserve those results name values. + + Example:: + + src = "this is test <b> bold <i>text</i> </b> normal text " + for tag in ("b", "i"): + opener, closer = makeHTMLTags(tag) + patt = originalTextFor(opener + SkipTo(closer) + closer) + print(patt.searchString(src)[0]) + + prints:: + + ['<b> bold <i>text</i> </b>'] + ['<i>text</i>'] + """ + locMarker = Empty().setParseAction(lambda s, loc, t: loc) + endlocMarker = locMarker.copy() + endlocMarker.callPreparse = False + matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") + if asString: + extractText = lambda s, l, t: s[t._original_start: t._original_end] + else: + def extractText(s, l, t): + t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] + matchExpr.setParseAction(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs + return matchExpr + +def ungroup(expr): + """Helper to undo pyparsing's default grouping of And expressions, + even if all but one are non-empty. + """ + return TokenConverter(expr).addParseAction(lambda t: t[0]) + +def locatedExpr(expr): + """Helper to decorate a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - locn_start = location where matched expression begins + - locn_end = location where matched expression ends + - value = the actual parsed results + + Be careful if the input text contains ``<TAB>`` characters, you + may want to call :class:`ParserElement.parseWithTabs` + + Example:: + + wd = Word(alphas) + for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] + """ + locator = Empty().setParseAction(lambda s, l, t: l) + return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) + + +# convenience constants for positional expressions +empty = Empty().setName("empty") +lineStart = LineStart().setName("lineStart") +lineEnd = LineEnd().setName("lineEnd") +stringStart = StringStart().setName("stringStart") +stringEnd = StringEnd().setName("stringEnd") + +_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1]) +_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r'\0x'), 16))) +_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8))) +_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) +_charRange = Group(_singleChar + Suppress("-") + _singleChar) +_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]" + +def srange(s): + r"""Helper to easily define string ranges for use in Word + construction. Borrows syntax from regexp '[]' string range + definitions:: + + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + + The input string must be enclosed in []'s, and the returned string + is the expanded character set joined into a single string. The + values enclosed in the []'s may be: + + - a single character + - an escaped character with a leading backslash (such as ``\-`` + or ``\]``) + - an escaped hex character with a leading ``'\x'`` + (``\x21``, which is a ``'!'`` character) (``\0x##`` + is also supported for backwards compatibility) + - an escaped octal character with a leading ``'\0'`` + (``\041``, which is a ``'!'`` character) + - a range of any of the above, separated by a dash (``'a-z'``, + etc.) + - any combination of the above (``'aeiouy'``, + ``'a-zA-Z0-9_$'``, etc.) + """ + _expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) + try: + return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) + except Exception: + return "" + +def matchOnlyAtCol(n): + """Helper method for defining parse actions that require matching at + a specific column in the input text. + """ + def verifyCol(strg, locn, toks): + if col(locn, strg) != n: + raise ParseException(strg, locn, "matched token not at column %d" % n) + return verifyCol + +def replaceWith(replStr): + """Helper method for common parse actions that simply return + a literal value. Especially useful when used with + :class:`transformString<ParserElement.transformString>` (). + + Example:: + + num = Word(nums).setParseAction(lambda toks: int(toks[0])) + na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) + term = na | num + + OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] + """ + return lambda s, l, t: [replStr] + +def removeQuotes(s, l, t): + """Helper parse action for removing quotation marks from parsed + quoted strings. + + Example:: + + # by default, quotation marks are included in parsed results + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] + + # use removeQuotes to strip quotation marks from parsed results + quotedString.setParseAction(removeQuotes) + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] + """ + return t[0][1:-1] + +def tokenMap(func, *args): + """Helper to define a parse action by mapping a function to all + elements of a ParseResults list. If any additional args are passed, + they are forwarded to the given function as additional arguments + after the token, as in + ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``, + which will convert the parsed data to an integer using base 16. + + Example (compare the last to example in :class:`ParserElement.transformString`:: + + hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) + hex_ints.runTests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).setParseAction(tokenMap(str.upper)) + OneOrMore(upperword).runTests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).setParseAction(tokenMap(str.title)) + OneOrMore(wd).setParseAction(' '.join).runTests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + + prints:: + + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + def pa(s, l, t): + return [func(tokn, *args) for tokn in t] + + try: + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + pa.__name__ = func_name + + return pa + +upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) +"""(Deprecated) Helper parse action to convert tokens to upper case. +Deprecated in favor of :class:`pyparsing_common.upcaseTokens`""" + +downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) +"""(Deprecated) Helper parse action to convert tokens to lower case. +Deprecated in favor of :class:`pyparsing_common.downcaseTokens`""" + +def _makeTags(tagStr, xml, + suppress_LT=Suppress("<"), + suppress_GT=Suppress(">")): + """Internal helper to construct opening and closing tag expressions, given a tag name""" + if isinstance(tagStr, basestring): + resname = tagStr + tagStr = Keyword(tagStr, caseless=not xml) + else: + resname = tagStr.name + + tagAttrName = Word(alphas, alphanums + "_-:") + if xml: + tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes) + openTag = (suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') + + suppress_GT) + else: + tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">") + openTag = (suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens) + + Optional(Suppress("=") + tagAttrValue)))) + + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/') + + suppress_GT) + closeTag = Combine(_L("</") + tagStr + ">", adjacent=False) + + openTag.setName("<%s>" % resname) + # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels + openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy())) + closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("</%s>" % resname) + openTag.tag = resname + closeTag.tag = resname + openTag.tag_body = SkipTo(closeTag()) + return openTag, closeTag + +def makeHTMLTags(tagStr): + """Helper to construct opening and closing tag expressions for HTML, + given a tag name. Matches tags in either upper or lower case, + attributes with namespaces and with quoted or unquoted values. + + Example:: + + text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' + # makeHTMLTags returns pyparsing expressions for the opening and + # closing tags as a 2-tuple + a, a_end = makeHTMLTags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.searchString(text): + # attributes in the <A> tag (like "href" shown here) are + # also accessible as named results + print(link.link_text, '->', link.href) + + prints:: + + pyparsing -> https://github.com/pyparsing/pyparsing/wiki + """ + return _makeTags(tagStr, False) + +def makeXMLTags(tagStr): + """Helper to construct opening and closing tag expressions for XML, + given a tag name. Matches tags only in the given upper/lower case. + + Example: similar to :class:`makeHTMLTags` + """ + return _makeTags(tagStr, True) + +def withAttribute(*args, **attrDict): + """Helper to create a validating parse action to be used with start + tags created with :class:`makeXMLTags` or + :class:`makeHTMLTags`. Use ``withAttribute`` to qualify + a starting tag with a required attribute value, to avoid false + matches on common tags such as ``<TD>`` or ``<DIV>``. + + Call ``withAttribute`` with a series of attribute names and + values. Specify the list of filter attributes names and values as: + + - keyword arguments, as in ``(align="right")``, or + - as an explicit dict with ``**`` operator, when an attribute + name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` + + For attribute names with a namespace prefix, you must use the second + form. Attribute names are matched insensitive to upper/lower case. + + If just testing for ``class`` (with or without a namespace), use + :class:`withClass`. + + To verify that the attribute exists, but without specifying a value, + pass ``withAttribute.ANY_VALUE`` as the value. + + Example:: + + html = ''' + <div> + Some text + <div type="grid">1 4 0 1 0</div> + <div type="graph">1,3 2,3 1,1</div> + <div>this has no type</div> + </div> + + ''' + div,div_end = makeHTMLTags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().setParseAction(withAttribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attrDict.items() + attrs = [(k, v) for k, v in attrs] + def pa(s, l, tokens): + for attrName, attrValue in attrs: + if attrName not in tokens: + raise ParseException(s, l, "no matching attribute " + attrName) + if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" % + (attrName, tokens[attrName], attrValue)) + return pa +withAttribute.ANY_VALUE = object() + +def withClass(classname, namespace=''): + """Simplified version of :class:`withAttribute` when + matching on a div class - made difficult because ``class`` is + a reserved word in Python. + + Example:: + + html = ''' + <div> + Some text + <div class="grid">1 4 0 1 0</div> + <div class="graph">1,3 2,3 1,1</div> + <div>this <div> has no class</div> + </div> + + ''' + div,div_end = makeHTMLTags("div") + div_grid = div().setParseAction(withClass("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = "%s:class" % namespace if namespace else "class" + return withAttribute(**{classattr: classname}) + +opAssoc = SimpleNamespace() +opAssoc.LEFT = object() +opAssoc.RIGHT = object() + +def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')): + """Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary + or binary, left- or right-associative. Parse actions can also be + attached to operator expressions. The generated parser will also + recognize the use of parentheses to override operator precedences + (see example below). + + Note: if you define a deep operator list, you may see performance + issues when using infixNotation. See + :class:`ParserElement.enablePackrat` for a mechanism to potentially + improve your parser performance. + + Parameters: + - baseExpr - expression representing the most basic element for the + nested + - opList - list of tuples, one for each operator precedence level + in the expression grammar; each tuple is of the form ``(opExpr, + numTerms, rightLeftAssoc, parseAction)``, where: + + - opExpr is the pyparsing expression for the operator; may also + be a string, which will be converted to a Literal; if numTerms + is 3, opExpr is a tuple of two expressions, for the two + operators separating the 3 terms + - numTerms is the number of terms for this operator (must be 1, + 2, or 3) + - rightLeftAssoc is the indicator whether the operator is right + or left associative, using the pyparsing-defined constants + ``opAssoc.RIGHT`` and ``opAssoc.LEFT``. + - parseAction is the parse action to be associated with + expressions matching this operator expression (the parse action + tuple member may be omitted); if the parse action is passed + a tuple or list of functions, this is equivalent to calling + ``setParseAction(*fn)`` + (:class:`ParserElement.setParseAction`) + - lpar - expression for matching left-parentheses + (default= ``Suppress('(')``) + - rpar - expression for matching right-parentheses + (default= ``Suppress(')')``) + + Example:: + + # simple example of four-function arithmetic with ints and + # variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infixNotation(integer | varname, + [ + ('-', 1, opAssoc.RIGHT), + (oneOf('* /'), 2, opAssoc.LEFT), + (oneOf('+ -'), 2, opAssoc.LEFT), + ]) + + arith_expr.runTests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', fullDump=False) + + prints:: + + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + # captive version of FollowedBy that does not do parse actions or capture results names + class _FB(FollowedBy): + def parseImpl(self, instring, loc, doActions=True): + self.expr.tryParse(instring, loc) + return loc, [] + + ret = Forward() + lastExpr = baseExpr | (lpar + ret + rpar) + for i, operDef in enumerate(opList): + opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4] + termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr + if arity == 3: + if opExpr is None or len(opExpr) != 2: + raise ValueError( + "if numterms=3, opExpr must be a tuple or list of two expressions") + opExpr1, opExpr2 = opExpr + thisExpr = Forward().setName(termName) + if rightLeftAssoc == opAssoc.LEFT: + if arity == 1: + matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr)) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr)) + else: + matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr)) + elif arity == 3: + matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + elif rightLeftAssoc == opAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Optional): + opExpr = Optional(opExpr) + matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr)) + else: + matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr)) + elif arity == 3: + matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + else: + raise ValueError("operator must indicate right or left associativity") + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.setParseAction(*pa) + else: + matchExpr.setParseAction(pa) + thisExpr <<= (matchExpr.setName(termName) | lastExpr) + lastExpr = thisExpr + ret <<= lastExpr + return ret + +operatorPrecedence = infixNotation +"""(Deprecated) Former name of :class:`infixNotation`, will be +dropped in a future release.""" + +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' + | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("quotedString using single or double quotes") +unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") + +def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): + """Helper method for defining nested lists enclosed in opening and + closing delimiters ("(" and ")" are the default). + + Parameters: + - opener - opening character for a nested list + (default= ``"("``); can also be a pyparsing expression + - closer - closing character for a nested list + (default= ``")"``); can also be a pyparsing expression + - content - expression for items within the nested lists + (default= ``None``) + - ignoreExpr - expression for ignoring opening and closing + delimiters (default= :class:`quotedString`) + + If an expression is not provided for the content argument, the + nested expression will capture all whitespace-delimited content + between delimiters as a list of separate values. + + Use the ``ignoreExpr`` argument to define expressions that may + contain opening or closing characters that should not be treated as + opening or closing characters for nesting, such as quotedString or + a comment expression. Specify multiple expressions using an + :class:`Or` or :class:`MatchFirst`. The default is + :class:`quotedString`, but if no expressions are to be ignored, then + pass ``None`` for this argument. + + Example:: + + data_type = oneOf("void int short long char float double") + decl_data_type = Combine(data_type + Optional(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR, RPAR = map(Suppress, "()") + + code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(cStyleComment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.searchString(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + + prints:: + + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener, basestring) and isinstance(closer, basestring): + if len(opener) == 1 and len(closer) == 1: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + CharsNotIn(opener + + closer + + ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).setParseAction(lambda t: t[0].strip())) + else: + content = (empty.copy() + CharsNotIn(opener + + closer + + ParserElement.DEFAULT_WHITE_CHARS + ).setParseAction(lambda t: t[0].strip())) + else: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) + ).setParseAction(lambda t: t[0].strip())) + else: + content = (Combine(OneOrMore(~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)) + ).setParseAction(lambda t: t[0].strip())) + else: + raise ValueError("opening and closing arguments must be strings if no content expression is given") + ret = Forward() + if ignoreExpr is not None: + ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)) + else: + ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) + ret.setName('nested %s%s expression' % (opener, closer)) + return ret + +def indentedBlock(blockStatementExpr, indentStack, indent=True): + """Helper method for defining space-delimited indentation blocks, + such as those used to define block statements in Python source code. + + Parameters: + + - blockStatementExpr - expression defining syntax of statement that + is repeated within the indented block + - indentStack - list created by caller to manage indentation stack + (multiple statementWithIndentedBlock expressions within a single + grammar should share a common indentStack) + - indent - boolean indicating whether block must be indented beyond + the current level; set to False for block of left-most + statements (default= ``True``) + + A valid block must contain at least one ``blockStatement``. + + Example:: + + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group(funcDecl + func_body) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << (funcDef | assignment | identifier) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + + prints:: + + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + backup_stack = indentStack[:] + + def reset_stack(): + indentStack[:] = backup_stack + + def checkPeerIndent(s, l, t): + if l >= len(s): return + curCol = col(l, s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseException(s, l, "illegal nesting") + raise ParseException(s, l, "not a peer entry") + + def checkSubIndent(s, l, t): + curCol = col(l, s) + if curCol > indentStack[-1]: + indentStack.append(curCol) + else: + raise ParseException(s, l, "not a subentry") + + def checkUnindent(s, l, t): + if l >= len(s): return + curCol = col(l, s) + if not(indentStack and curCol in indentStack): + raise ParseException(s, l, "not an unindent") + if curCol < indentStack[-1]: + indentStack.pop() + + NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress(), stopOn=StringEnd()) + INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') + PEER = Empty().setParseAction(checkPeerIndent).setName('') + UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') + if indent: + smExpr = Group(Optional(NL) + + INDENT + + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) + + UNDENT) + else: + smExpr = Group(Optional(NL) + + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()) + + UNDENT) + smExpr.setFailAction(lambda a, b, c, d: reset_stack()) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.setName('indented block') + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), '><& "\'')) +commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") +def replaceHTMLEntity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") +"Comment of the form ``/* ... */``" + +htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") +"Comment of the form ``<!-- ... -->``" + +restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") +dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") +"Comment of the form ``// ... (to end of line)``" + +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment") +"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" + +javaStyleComment = cppStyleComment +"Same as :class:`cppStyleComment`" + +pythonStyleComment = Regex(r"#.*").setName("Python style comment") +"Comment of the form ``# ... (to end of line)``" + +_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + + Optional(Word(" \t") + + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem") +commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList") +"""(Deprecated) Predefined expression of 1 or more printable words or +quoted strings, separated by commas. + +This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`. +""" + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """Here are some common low-level expressions that may be useful in + jump-starting parser development: + + - numeric forms (:class:`integers<integer>`, :class:`reals<real>`, + :class:`scientific notation<sci_real>`) + - common :class:`programming identifiers<identifier>` + - network addresses (:class:`MAC<mac_address>`, + :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`) + - ISO8601 :class:`dates<iso8601_date>` and + :class:`datetime<iso8601_datetime>` + - :class:`UUID<uuid>` + - :class:`comma-separated list<comma_separated_list>` + + Parse actions: + + - :class:`convertToInteger` + - :class:`convertToFloat` + - :class:`convertToDate` + - :class:`convertToDatetime` + - :class:`stripHTMLTags` + - :class:`upcaseTokens` + - :class:`downcaseTokens` + + Example:: + + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + + prints:: + + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convertToInteger = tokenMap(int) + """ + Parse action for converting parsed integers to Python int + """ + + convertToFloat = tokenMap(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).setName("integer").setParseAction(convertToInteger) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16)) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.addParseAction(lambda t: t[0]/t[-1]) + + mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.addParseAction(sum) + + real = Regex(r'[+-]?(?:\d+\.\d*|\.\d+)').setName("real number").setParseAction(convertToFloat) + """expression that parses a floating point number and returns a float""" + + sci_real = Regex(r'[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + """expression that parses a floating point number with optional + scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) + """any int or real number, returned as float""" + + identifier = Word(alphas + '_', alphanums + '_').setName("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") + "IPv4 address (``0.0.0.0 - 255.255.255.255``)" + + _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part) * 7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) + + "::" + + Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6)) + ).setName("short IPv6 address") + _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") + ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convertToDate(fmt="%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) + + Example:: + + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + + prints:: + + [datetime.date(1999, 12, 31)] + """ + def cvt_fn(s, l, t): + try: + return datetime.strptime(t[0], fmt).date() + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + @staticmethod + def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): + """Helper to create a parse action for converting parsed + datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) + + Example:: + + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + + prints:: + + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + def cvt_fn(s, l, t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date") + "ISO8601 date (``yyyy-mm-dd``)" + + iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") + "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" + + uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") + "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" + + _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() + @staticmethod + def stripHTMLTags(s, l, tokens): + """Parse action to remove HTML tags from web page HTML source + + Example:: + + # strip HTML links from normal text + text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>' + td, td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + print(table_text.parseString(text).body) + + Prints:: + + More info at the pyparsing wiki page + """ + return pyparsing_common._html_stripper.transformString(tokens[0]) + + _commasepitem = Combine(OneOrMore(~Literal(",") + + ~LineEnd() + + Word(printables, excludeChars=',') + + Optional(White(" \t")))).streamline().setName("commaItem") + comma_separated_list = delimitedList(Optional(quotedString.copy() + | _commasepitem, default='') + ).setName("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) + """Parse action to convert tokens to upper case.""" + + downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) + """Parse action to convert tokens to lower case.""" + + +class _lazyclassproperty(object): + def __init__(self, fn): + self.fn = fn + self.__doc__ = fn.__doc__ + self.__name__ = fn.__name__ + + def __get__(self, obj, cls): + if cls is None: + cls = type(obj) + if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) + for superclass in cls.__mro__[1:]): + cls._intern = {} + attrname = self.fn.__name__ + if attrname not in cls._intern: + cls._intern[attrname] = self.fn(cls) + return cls._intern[attrname] + + +class unicode_set(object): + """ + A set of Unicode characters, for language-specific strings for + ``alphas``, ``nums``, ``alphanums``, and ``printables``. + A unicode_set is defined by a list of ranges in the Unicode character + set, in a class attribute ``_ranges``, such as:: + + _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] + + A unicode set can also be defined using multiple inheritance of other unicode sets:: + + class CJK(Chinese, Japanese, Korean): + pass + """ + _ranges = [] + + @classmethod + def _get_chars_for_ranges(cls): + ret = [] + for cc in cls.__mro__: + if cc is unicode_set: + break + for rr in cc._ranges: + ret.extend(range(rr[0], rr[-1] + 1)) + return [unichr(c) for c in sorted(set(ret))] + + @_lazyclassproperty + def printables(cls): + "all non-whitespace characters in this range" + return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges())) + + @_lazyclassproperty + def alphas(cls): + "all alphabetic characters in this range" + return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges())) + + @_lazyclassproperty + def nums(cls): + "all numeric digit characters in this range" + return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges())) + + @_lazyclassproperty + def alphanums(cls): + "all alphanumeric characters in this range" + return cls.alphas + cls.nums + + +class pyparsing_unicode(unicode_set): + """ + A namespace class for defining common language unicode_sets. + """ + _ranges = [(32, sys.maxunicode)] + + class Latin1(unicode_set): + "Unicode set for Latin-1 Unicode Character Range" + _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),] + + class LatinA(unicode_set): + "Unicode set for Latin-A Unicode Character Range" + _ranges = [(0x0100, 0x017f),] + + class LatinB(unicode_set): + "Unicode set for Latin-B Unicode Character Range" + _ranges = [(0x0180, 0x024f),] + + class Greek(unicode_set): + "Unicode set for Greek Unicode Character Ranges" + _ranges = [ + (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d), + (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4), + (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe), + ] + + class Cyrillic(unicode_set): + "Unicode set for Cyrillic Unicode Character Range" + _ranges = [(0x0400, 0x04ff)] + + class Chinese(unicode_set): + "Unicode set for Chinese Unicode Character Range" + _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),] + + class Japanese(unicode_set): + "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" + _ranges = [] + + class Kanji(unicode_set): + "Unicode set for Kanji Unicode Character Range" + _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),] + + class Hiragana(unicode_set): + "Unicode set for Hiragana Unicode Character Range" + _ranges = [(0x3040, 0x309f),] + + class Katakana(unicode_set): + "Unicode set for Katakana Unicode Character Range" + _ranges = [(0x30a0, 0x30ff),] + + class Korean(unicode_set): + "Unicode set for Korean Unicode Character Range" + _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),] + + class CJK(Chinese, Japanese, Korean): + "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" + pass + + class Thai(unicode_set): + "Unicode set for Thai Unicode Character Range" + _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),] + + class Arabic(unicode_set): + "Unicode set for Arabic Unicode Character Range" + _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),] + + class Hebrew(unicode_set): + "Unicode set for Hebrew Unicode Character Range" + _ranges = [(0x0590, 0x05ff),] + + class Devanagari(unicode_set): + "Unicode set for Devanagari Unicode Character Range" + _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)] + +pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges + + pyparsing_unicode.Japanese.Hiragana._ranges + + pyparsing_unicode.Japanese.Katakana._ranges) + +# define ranges in language character sets +if PY_3: + setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic) + setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese) + setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic) + setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek) + setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew) + setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese) + setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji) + setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana) + setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana) + setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean) + setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai) + setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari) + + +class pyparsing_test: + """ + namespace class for classes useful in writing unit tests + """ + + class reset_pyparsing_context: + """ + Context manager to be used when writing unit tests that modify pyparsing config values: + - packrat parsing + - default whitespace characters. + - default keyword characters + - literal string auto-conversion class + - __diag__ settings + + Example: + with reset_pyparsing_context(): + # test that literals used to construct a grammar are automatically suppressed + ParserElement.inlineLiteralsUsing(Suppress) + + term = Word(alphas) | Word(nums) + group = Group('(' + term[...] + ')') + + # assert that the '()' characters are not included in the parsed tokens + self.assertParseAndCheckLisst(group, "(abc 123 def)", ['abc', '123', 'def']) + + # after exiting context manager, literals are converted to Literal expressions again + """ + + def __init__(self): + self._save_context = {} + + def save(self): + self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS + self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS + self._save_context[ + "literal_string_class" + ] = ParserElement._literalStringClass + self._save_context["packrat_enabled"] = ParserElement._packratEnabled + self._save_context["packrat_parse"] = ParserElement._parse + self._save_context["__diag__"] = { + name: getattr(__diag__, name) for name in __diag__._all_names + } + self._save_context["__compat__"] = { + "collect_all_And_tokens": __compat__.collect_all_And_tokens + } + return self + + def restore(self): + # reset pyparsing global state + if ( + ParserElement.DEFAULT_WHITE_CHARS + != self._save_context["default_whitespace"] + ): + ParserElement.setDefaultWhitespaceChars( + self._save_context["default_whitespace"] + ) + Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] + ParserElement.inlineLiteralsUsing( + self._save_context["literal_string_class"] + ) + for name, value in self._save_context["__diag__"].items(): + setattr(__diag__, name, value) + ParserElement._packratEnabled = self._save_context["packrat_enabled"] + ParserElement._parse = self._save_context["packrat_parse"] + __compat__.collect_all_And_tokens = self._save_context["__compat__"] + + def __enter__(self): + return self.save() + + def __exit__(self, *args): + return self.restore() + + class TestParseResultsAsserts: + """ + A mixin class to add parse results assertion methods to normal unittest.TestCase classes. + """ + def assertParseResultsEquals( + self, result, expected_list=None, expected_dict=None, msg=None + ): + """ + Unit test assertion to compare a ParseResults object with an optional expected_list, + and compare any defined results names with an optional expected_dict. + """ + if expected_list is not None: + self.assertEqual(expected_list, result.asList(), msg=msg) + if expected_dict is not None: + self.assertEqual(expected_dict, result.asDict(), msg=msg) + + def assertParseAndCheckList( + self, expr, test_string, expected_list, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ParseResults.asList() is equal to the expected_list. + """ + result = expr.parseString(test_string, parseAll=True) + if verbose: + print(result.dump()) + self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) + + def assertParseAndCheckDict( + self, expr, test_string, expected_dict, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ParseResults.asDict() is equal to the expected_dict. + """ + result = expr.parseString(test_string, parseAll=True) + if verbose: + print(result.dump()) + self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) + + def assertRunTestResults( + self, run_tests_report, expected_parse_results=None, msg=None + ): + """ + Unit test assertion to evaluate output of ParserElement.runTests(). If a list of + list-dict tuples is given as the expected_parse_results argument, then these are zipped + with the report tuples returned by runTests and evaluated using assertParseResultsEquals. + Finally, asserts that the overall runTests() success value is True. + + :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests + :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] + """ + run_test_success, run_test_results = run_tests_report + + if expected_parse_results is not None: + merged = [ + (rpt[0], rpt[1], expected) + for rpt, expected in zip(run_test_results, expected_parse_results) + ] + for test_string, result, expected in merged: + # expected should be a tuple containing a list and/or a dict or an exception, + # and optional failure message string + # an empty tuple will skip any result validation + fail_msg = next( + (exp for exp in expected if isinstance(exp, str)), None + ) + expected_exception = next( + ( + exp + for exp in expected + if isinstance(exp, type) and issubclass(exp, Exception) + ), + None, + ) + if expected_exception is not None: + with self.assertRaises( + expected_exception=expected_exception, msg=fail_msg or msg + ): + if isinstance(result, Exception): + raise result + else: + expected_list = next( + (exp for exp in expected if isinstance(exp, list)), None + ) + expected_dict = next( + (exp for exp in expected if isinstance(exp, dict)), None + ) + if (expected_list, expected_dict) != (None, None): + self.assertParseResultsEquals( + result, + expected_list=expected_list, + expected_dict=expected_dict, + msg=fail_msg or msg, + ) + else: + # warning here maybe? + print("no validation for {!r}".format(test_string)) + + # do this last, in case some specific test results can be reported instead + self.assertTrue( + run_test_success, msg=msg if msg is not None else "failed runTests" + ) + + @contextmanager + def assertRaisesParseException(self, exc_type=ParseException, msg=None): + with self.assertRaises(exc_type, msg=msg): + yield + + +if __name__ == "__main__": + + selectToken = CaselessLiteral("select") + fromToken = CaselessLiteral("from") + + ident = Word(alphas, alphanums + "_$") + + columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + columnNameList = Group(delimitedList(columnName)).setName("columns") + columnSpec = ('*' | columnNameList) + + tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + tableNameList = Group(delimitedList(tableName)).setName("tables") + + simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") + + # demo runTests method, including embedded comments in test string + simpleSQL.runTests(""" + # '*' as column list and dotted table name + select * from SYS.XYZZY + + # caseless match on "SELECT", and casts back to "select" + SELECT * from XYZZY, ABC + + # list of column names, and mixed case SELECT keyword + Select AA,BB,CC from Sys.dual + + # multiple tables + Select A, B, C from Sys.dual, Table2 + + # invalid SELECT keyword - should fail + Xelect A, B, C from Sys.dual + + # incomplete command - should fail + Select + + # invalid column name - should fail + Select ^^^ frox Sys.dual + + """) + + pyparsing_common.number.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + # any int or real number, returned as float + pyparsing_common.fnumber.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + pyparsing_common.hex_integer.runTests(""" + 100 + FF + """) + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(""" + 12345678-1234-5678-1234-567812345678 + """) diff --git a/lib/python_libtorrent/__init__.py b/lib/python_libtorrent/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_armv7/0.16.19/__init__.py b/lib/python_libtorrent/android_armv7/0.16.19/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_armv7/0.16.19/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_armv7/0.16.19/libtorrent.so.size.txt b/lib/python_libtorrent/android_armv7/0.16.19/libtorrent.so.size.txt deleted file mode 100644 index 84e6fea8..00000000 --- a/lib/python_libtorrent/android_armv7/0.16.19/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6804840 \ No newline at end of file diff --git a/lib/python_libtorrent/android_armv7/1.0.6/__init__.py b/lib/python_libtorrent/android_armv7/1.0.6/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_armv7/1.0.6/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_armv7/1.0.6/libtorrent.so.size.txt b/lib/python_libtorrent/android_armv7/1.0.6/libtorrent.so.size.txt deleted file mode 100644 index 6de27f94..00000000 --- a/lib/python_libtorrent/android_armv7/1.0.6/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6545732 \ No newline at end of file diff --git a/lib/python_libtorrent/android_armv7/1.0.7/__init__.py b/lib/python_libtorrent/android_armv7/1.0.7/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_armv7/1.0.7/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_armv7/1.0.7/libtorrent.so.size.txt b/lib/python_libtorrent/android_armv7/1.0.7/libtorrent.so.size.txt deleted file mode 100644 index 2f7afd24..00000000 --- a/lib/python_libtorrent/android_armv7/1.0.7/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6519704 \ No newline at end of file diff --git a/lib/python_libtorrent/android_armv7/1.0.8/__init__.py b/lib/python_libtorrent/android_armv7/1.0.8/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_armv7/1.0.8/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_armv7/1.0.8/libtorrent.so.size.txt b/lib/python_libtorrent/android_armv7/1.0.8/libtorrent.so.size.txt deleted file mode 100644 index fce8b958..00000000 --- a/lib/python_libtorrent/android_armv7/1.0.8/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6513752 \ No newline at end of file diff --git a/lib/python_libtorrent/android_armv7/1.0.9/__init__.py b/lib/python_libtorrent/android_armv7/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_armv7/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_armv7/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/android_armv7/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index 36119234..00000000 --- a/lib/python_libtorrent/android_armv7/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6518156 \ No newline at end of file diff --git a/lib/python_libtorrent/android_armv7/1.1.0/__init__.py b/lib/python_libtorrent/android_armv7/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_armv7/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_armv7/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/android_armv7/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index 5610ac18..00000000 --- a/lib/python_libtorrent/android_armv7/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -4608320 \ No newline at end of file diff --git a/lib/python_libtorrent/android_armv7/1.1.1/__init__.py b/lib/python_libtorrent/android_armv7/1.1.1/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_armv7/1.1.1/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_armv7/1.1.1/libtorrent.so.size.txt b/lib/python_libtorrent/android_armv7/1.1.1/libtorrent.so.size.txt deleted file mode 100644 index cc84d962..00000000 --- a/lib/python_libtorrent/android_armv7/1.1.1/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -4628960 \ No newline at end of file diff --git a/lib/python_libtorrent/android_armv7/__init__.py b/lib/python_libtorrent/android_armv7/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_armv7/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_x86/0.16.19/__init__.py b/lib/python_libtorrent/android_x86/0.16.19/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_x86/0.16.19/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_x86/0.16.19/libtorrent.so.size.txt b/lib/python_libtorrent/android_x86/0.16.19/libtorrent.so.size.txt deleted file mode 100644 index 559db63b..00000000 --- a/lib/python_libtorrent/android_x86/0.16.19/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -7145572 \ No newline at end of file diff --git a/lib/python_libtorrent/android_x86/1.0.6/__init__.py b/lib/python_libtorrent/android_x86/1.0.6/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_x86/1.0.6/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_x86/1.0.6/libtorrent.so.size.txt b/lib/python_libtorrent/android_x86/1.0.6/libtorrent.so.size.txt deleted file mode 100644 index 4f5d95fc..00000000 --- a/lib/python_libtorrent/android_x86/1.0.6/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6921524 \ No newline at end of file diff --git a/lib/python_libtorrent/android_x86/1.0.7/__init__.py b/lib/python_libtorrent/android_x86/1.0.7/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_x86/1.0.7/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_x86/1.0.7/libtorrent.so.size.txt b/lib/python_libtorrent/android_x86/1.0.7/libtorrent.so.size.txt deleted file mode 100644 index 623c5ec5..00000000 --- a/lib/python_libtorrent/android_x86/1.0.7/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6890380 \ No newline at end of file diff --git a/lib/python_libtorrent/android_x86/1.0.8/__init__.py b/lib/python_libtorrent/android_x86/1.0.8/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_x86/1.0.8/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_x86/1.0.8/libtorrent.so.size.txt b/lib/python_libtorrent/android_x86/1.0.8/libtorrent.so.size.txt deleted file mode 100644 index 4ae02ffd..00000000 --- a/lib/python_libtorrent/android_x86/1.0.8/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6889512 \ No newline at end of file diff --git a/lib/python_libtorrent/android_x86/1.0.9/__init__.py b/lib/python_libtorrent/android_x86/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_x86/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_x86/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/android_x86/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index ebea3968..00000000 --- a/lib/python_libtorrent/android_x86/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6889548 \ No newline at end of file diff --git a/lib/python_libtorrent/android_x86/1.1.0/__init__.py b/lib/python_libtorrent/android_x86/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_x86/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_x86/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/android_x86/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index e29817fc..00000000 --- a/lib/python_libtorrent/android_x86/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -5369280 \ No newline at end of file diff --git a/lib/python_libtorrent/android_x86/1.1.1/__init__.py b/lib/python_libtorrent/android_x86/1.1.1/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_x86/1.1.1/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/android_x86/1.1.1/libtorrent.so.size.txt b/lib/python_libtorrent/android_x86/1.1.1/libtorrent.so.size.txt deleted file mode 100644 index 87c887ad..00000000 --- a/lib/python_libtorrent/android_x86/1.1.1/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -5393984 \ No newline at end of file diff --git a/lib/python_libtorrent/android_x86/__init__.py b/lib/python_libtorrent/android_x86/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/android_x86/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/darwin/0.16.19/__init__.py b/lib/python_libtorrent/darwin/0.16.19/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/darwin/0.16.19/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/darwin/0.16.19/libtorrent.so.size.txt b/lib/python_libtorrent/darwin/0.16.19/libtorrent.so.size.txt deleted file mode 100644 index f36331b7..00000000 --- a/lib/python_libtorrent/darwin/0.16.19/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -10425648 \ No newline at end of file diff --git a/lib/python_libtorrent/darwin/1.0.9/__init__.py b/lib/python_libtorrent/darwin/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/darwin/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/darwin/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/darwin/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index 39bbc11f..00000000 --- a/lib/python_libtorrent/darwin/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -3578388 \ No newline at end of file diff --git a/lib/python_libtorrent/darwin/1.1.0/__init__.py b/lib/python_libtorrent/darwin/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/darwin/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/darwin/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/darwin/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index ffcb02f5..00000000 --- a/lib/python_libtorrent/darwin/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -5292640 \ No newline at end of file diff --git a/lib/python_libtorrent/darwin/1.1.1/__init__.py b/lib/python_libtorrent/darwin/1.1.1/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/darwin/1.1.1/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/darwin/1.1.1/libtorrent.so.size.txt b/lib/python_libtorrent/darwin/1.1.1/libtorrent.so.size.txt deleted file mode 100644 index 8b51a1f3..00000000 --- a/lib/python_libtorrent/darwin/1.1.1/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -5331516 \ No newline at end of file diff --git a/lib/python_libtorrent/darwin/__init__.py b/lib/python_libtorrent/darwin/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/darwin/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/ios_arm/1.0.7/__init__.py b/lib/python_libtorrent/ios_arm/1.0.7/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/ios_arm/1.0.7/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/ios_arm/1.0.7/libtorrent.so.size.txt b/lib/python_libtorrent/ios_arm/1.0.7/libtorrent.so.size.txt deleted file mode 100644 index 7ab22f29..00000000 --- a/lib/python_libtorrent/ios_arm/1.0.7/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -8834480 \ No newline at end of file diff --git a/lib/python_libtorrent/ios_arm/1.0.8/__init__.py b/lib/python_libtorrent/ios_arm/1.0.8/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/ios_arm/1.0.8/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/ios_arm/1.0.8/libtorrent.so.size.txt b/lib/python_libtorrent/ios_arm/1.0.8/libtorrent.so.size.txt deleted file mode 100644 index 7fc5097b..00000000 --- a/lib/python_libtorrent/ios_arm/1.0.8/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -8779608 \ No newline at end of file diff --git a/lib/python_libtorrent/ios_arm/1.0.9/__init__.py b/lib/python_libtorrent/ios_arm/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/ios_arm/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/ios_arm/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/ios_arm/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index 3fdc1bf1..00000000 --- a/lib/python_libtorrent/ios_arm/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -8831856 \ No newline at end of file diff --git a/lib/python_libtorrent/ios_arm/1.1.1/__init__.py b/lib/python_libtorrent/ios_arm/1.1.1/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/ios_arm/1.1.1/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/ios_arm/1.1.1/libtorrent.so.size.txt b/lib/python_libtorrent/ios_arm/1.1.1/libtorrent.so.size.txt deleted file mode 100644 index bee1908e..00000000 --- a/lib/python_libtorrent/ios_arm/1.1.1/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -3439420 \ No newline at end of file diff --git a/lib/python_libtorrent/ios_arm/__init__.py b/lib/python_libtorrent/ios_arm/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/ios_arm/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_aarch64_ucs2/1.1.0/__init__.py b/lib/python_libtorrent/linux_aarch64_ucs2/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_aarch64_ucs2/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_aarch64_ucs2/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/linux_aarch64_ucs2/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index 8bf018bb..00000000 --- a/lib/python_libtorrent/linux_aarch64_ucs2/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -5191944 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_aarch64_ucs2/__init__.py b/lib/python_libtorrent/linux_aarch64_ucs2/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_aarch64_ucs2/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_aarch64_ucs4/1.1.0/__init__.py b/lib/python_libtorrent/linux_aarch64_ucs4/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_aarch64_ucs4/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_aarch64_ucs4/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/linux_aarch64_ucs4/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index 8bf018bb..00000000 --- a/lib/python_libtorrent/linux_aarch64_ucs4/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -5191944 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_aarch64_ucs4/1.1.1/__init__.py b/lib/python_libtorrent/linux_aarch64_ucs4/1.1.1/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_aarch64_ucs4/1.1.1/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_aarch64_ucs4/1.1.1/libtorrent.so.size.txt b/lib/python_libtorrent/linux_aarch64_ucs4/1.1.1/libtorrent.so.size.txt deleted file mode 100644 index ef8c5644..00000000 --- a/lib/python_libtorrent/linux_aarch64_ucs4/1.1.1/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2884768 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_aarch64_ucs4/__init__.py b/lib/python_libtorrent/linux_aarch64_ucs4/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_aarch64_ucs4/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv6/0.16.19/__init__.py b/lib/python_libtorrent/linux_armv6/0.16.19/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv6/0.16.19/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv6/0.16.19/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv6/0.16.19/libtorrent.so.size.txt deleted file mode 100644 index 9d285b23..00000000 --- a/lib/python_libtorrent/linux_armv6/0.16.19/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2133072 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv6/1.0.11/__init__.py b/lib/python_libtorrent/linux_armv6/1.0.11/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv6/1.0.11/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv6/1.0.11/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv6/1.0.11/libtorrent.so.size.txt deleted file mode 100644 index 9a1d6002..00000000 --- a/lib/python_libtorrent/linux_armv6/1.0.11/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2286476 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv6/1.0.6/__init__.py b/lib/python_libtorrent/linux_armv6/1.0.6/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv6/1.0.6/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv6/1.0.6/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv6/1.0.6/libtorrent.so.size.txt deleted file mode 100644 index 62a4af3f..00000000 --- a/lib/python_libtorrent/linux_armv6/1.0.6/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -1979232 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv6/1.0.7/__init__.py b/lib/python_libtorrent/linux_armv6/1.0.7/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv6/1.0.7/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv6/1.0.7/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv6/1.0.7/libtorrent.so.size.txt deleted file mode 100644 index 5ff46c86..00000000 --- a/lib/python_libtorrent/linux_armv6/1.0.7/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2286424 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv6/1.0.9/__init__.py b/lib/python_libtorrent/linux_armv6/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv6/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv6/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv6/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index 5ff46c86..00000000 --- a/lib/python_libtorrent/linux_armv6/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2286424 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv6/1.1.0/__init__.py b/lib/python_libtorrent/linux_armv6/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv6/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv6/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv6/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index 56d0afee..00000000 --- a/lib/python_libtorrent/linux_armv6/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2388664 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv6/1.1.1/__init__.py b/lib/python_libtorrent/linux_armv6/1.1.1/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv6/1.1.1/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv6/1.1.1/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv6/1.1.1/libtorrent.so.size.txt deleted file mode 100644 index e97c6282..00000000 --- a/lib/python_libtorrent/linux_armv6/1.1.1/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2577640 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv6/1.1.6/__init__.py b/lib/python_libtorrent/linux_armv6/1.1.6/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv6/1.1.6/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv6/1.1.6/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv6/1.1.6/libtorrent.so.size.txt deleted file mode 100644 index 8df81141..00000000 --- a/lib/python_libtorrent/linux_armv6/1.1.6/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -3630144 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv6/1.1.7/__init__.py b/lib/python_libtorrent/linux_armv6/1.1.7/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv6/1.1.7/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv6/1.1.7/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv6/1.1.7/libtorrent.so.size.txt deleted file mode 100644 index 0e905c96..00000000 --- a/lib/python_libtorrent/linux_armv6/1.1.7/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -3634324 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv6/__init__.py b/lib/python_libtorrent/linux_armv6/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv6/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv7/0.16.19/__init__.py b/lib/python_libtorrent/linux_armv7/0.16.19/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv7/0.16.19/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv7/0.16.19/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv7/0.16.19/libtorrent.so.size.txt deleted file mode 100644 index 375fc94c..00000000 --- a/lib/python_libtorrent/linux_armv7/0.16.19/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -1892840 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv7/1.0.6/__init__.py b/lib/python_libtorrent/linux_armv7/1.0.6/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv7/1.0.6/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv7/1.0.6/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv7/1.0.6/libtorrent.so.size.txt deleted file mode 100644 index 98acdd6d..00000000 --- a/lib/python_libtorrent/linux_armv7/1.0.6/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2048268 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv7/1.0.7/__init__.py b/lib/python_libtorrent/linux_armv7/1.0.7/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv7/1.0.7/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv7/1.0.7/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv7/1.0.7/libtorrent.so.size.txt deleted file mode 100644 index 7cdb4776..00000000 --- a/lib/python_libtorrent/linux_armv7/1.0.7/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2093128 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv7/1.0.9/__init__.py b/lib/python_libtorrent/linux_armv7/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv7/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv7/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv7/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index bb85f14f..00000000 --- a/lib/python_libtorrent/linux_armv7/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2043172 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv7/1.1.0/__init__.py b/lib/python_libtorrent/linux_armv7/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv7/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv7/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv7/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index 0866378e..00000000 --- a/lib/python_libtorrent/linux_armv7/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2662156 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv7/1.1.1/__init__.py b/lib/python_libtorrent/linux_armv7/1.1.1/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv7/1.1.1/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_armv7/1.1.1/libtorrent.so.size.txt b/lib/python_libtorrent/linux_armv7/1.1.1/libtorrent.so.size.txt deleted file mode 100644 index 4a58671f..00000000 --- a/lib/python_libtorrent/linux_armv7/1.1.1/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -2674612 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_armv7/__init__.py b/lib/python_libtorrent/linux_armv7/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_armv7/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_mips/1.0.9/__init__.py b/lib/python_libtorrent/linux_mips/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_mips/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_mips/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/linux_mips/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index 476b9bff..00000000 --- a/lib/python_libtorrent/linux_mips/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -4598636 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_mips/__init__.py b/lib/python_libtorrent/linux_mips/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_mips/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_mipsel_ucs2/1.0.9/__init__.py b/lib/python_libtorrent/linux_mipsel_ucs2/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_mipsel_ucs2/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_mipsel_ucs2/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/linux_mipsel_ucs2/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index 314f7bd9..00000000 --- a/lib/python_libtorrent/linux_mipsel_ucs2/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -4596396 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_mipsel_ucs2/1.1.0/__init__.py b/lib/python_libtorrent/linux_mipsel_ucs2/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_mipsel_ucs2/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_mipsel_ucs2/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/linux_mipsel_ucs2/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index faeab6c5..00000000 --- a/lib/python_libtorrent/linux_mipsel_ucs2/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6325240 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_mipsel_ucs2/__init__.py b/lib/python_libtorrent/linux_mipsel_ucs2/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_mipsel_ucs2/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_mipsel_ucs4/1.0.9/__init__.py b/lib/python_libtorrent/linux_mipsel_ucs4/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_mipsel_ucs4/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_mipsel_ucs4/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/linux_mipsel_ucs4/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index 476b9bff..00000000 --- a/lib/python_libtorrent/linux_mipsel_ucs4/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -4598636 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_mipsel_ucs4/1.1.0/__init__.py b/lib/python_libtorrent/linux_mipsel_ucs4/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_mipsel_ucs4/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_mipsel_ucs4/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/linux_mipsel_ucs4/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index faeab6c5..00000000 --- a/lib/python_libtorrent/linux_mipsel_ucs4/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6325240 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_mipsel_ucs4/__init__.py b/lib/python_libtorrent/linux_mipsel_ucs4/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_mipsel_ucs4/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86/0.16.19/__init__.py b/lib/python_libtorrent/linux_x86/0.16.19/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86/0.16.19/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86/0.16.19/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86/0.16.19/libtorrent.so.size.txt deleted file mode 100644 index a346659a..00000000 --- a/lib/python_libtorrent/linux_x86/0.16.19/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6257605 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86/1.0.6/__init__.py b/lib/python_libtorrent/linux_x86/1.0.6/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86/1.0.6/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86/1.0.6/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86/1.0.6/libtorrent.so.size.txt deleted file mode 100644 index 3e74d111..00000000 --- a/lib/python_libtorrent/linux_x86/1.0.6/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -3517944 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86/1.0.7/__init__.py b/lib/python_libtorrent/linux_x86/1.0.7/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86/1.0.7/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86/1.0.7/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86/1.0.7/libtorrent.so.size.txt deleted file mode 100644 index c7e345be..00000000 --- a/lib/python_libtorrent/linux_x86/1.0.7/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -3544068 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86/1.0.9/__init__.py b/lib/python_libtorrent/linux_x86/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index 5b7c9ab3..00000000 --- a/lib/python_libtorrent/linux_x86/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -3239792 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86/1.1.0/__init__.py b/lib/python_libtorrent/linux_x86/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index 18303722..00000000 --- a/lib/python_libtorrent/linux_x86/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -4601280 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86/1.1.1/__init__.py b/lib/python_libtorrent/linux_x86/1.1.1/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86/1.1.1/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86/1.1.1/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86/1.1.1/libtorrent.so.size.txt deleted file mode 100644 index 9f9b66e9..00000000 --- a/lib/python_libtorrent/linux_x86/1.1.1/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6652780 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86/__init__.py b/lib/python_libtorrent/linux_x86/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86_64/0.16.19/__init__.py b/lib/python_libtorrent/linux_x86_64/0.16.19/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86_64/0.16.19/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86_64/0.16.19/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86_64/0.16.19/libtorrent.so.size.txt deleted file mode 100644 index 4b476b5b..00000000 --- a/lib/python_libtorrent/linux_x86_64/0.16.19/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -6620181 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86_64/1.0.6/__init__.py b/lib/python_libtorrent/linux_x86_64/1.0.6/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86_64/1.0.6/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86_64/1.0.6/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86_64/1.0.6/libtorrent.so.size.txt deleted file mode 100644 index bd538ff2..00000000 --- a/lib/python_libtorrent/linux_x86_64/1.0.6/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -3514688 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86_64/1.0.7/__init__.py b/lib/python_libtorrent/linux_x86_64/1.0.7/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86_64/1.0.7/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86_64/1.0.7/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86_64/1.0.7/libtorrent.so.size.txt deleted file mode 100644 index bea17883..00000000 --- a/lib/python_libtorrent/linux_x86_64/1.0.7/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -3576128 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86_64/1.0.9/__init__.py b/lib/python_libtorrent/linux_x86_64/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86_64/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86_64/1.0.9/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86_64/1.0.9/libtorrent.so.size.txt deleted file mode 100644 index 24691028..00000000 --- a/lib/python_libtorrent/linux_x86_64/1.0.9/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -3290600 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86_64/1.1.0/__init__.py b/lib/python_libtorrent/linux_x86_64/1.1.0/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86_64/1.1.0/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86_64/1.1.0/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86_64/1.1.0/libtorrent.so.size.txt deleted file mode 100644 index c66f5c33..00000000 --- a/lib/python_libtorrent/linux_x86_64/1.1.0/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -4246184 \ No newline at end of file diff --git a/lib/python_libtorrent/linux_x86_64/1.1.1/__init__.py b/lib/python_libtorrent/linux_x86_64/1.1.1/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/linux_x86_64/1.1.1/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/linux_x86_64/1.1.1/libtorrent.so.size.txt b/lib/python_libtorrent/linux_x86_64/1.1.1/libtorrent.so.size.txt deleted file mode 100644 index 53ec69f8..00000000 --- a/lib/python_libtorrent/linux_x86_64/1.1.1/libtorrent.so.size.txt +++ /dev/null @@ -1 +0,0 @@ -4263000 \ No newline at end of file diff --git a/lib/python_libtorrent/platform_pulsar.py b/lib/python_libtorrent/platform_pulsar.py deleted file mode 100644 index db827cac..00000000 --- a/lib/python_libtorrent/platform_pulsar.py +++ /dev/null @@ -1,198 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' - -import sys -import os -try: - import xbmc, xbmcaddon - #__settings__ = xbmcaddon.Addon(id='script.module.libtorrent') ### Alfa - #__version__ = __settings__.getAddonInfo('version') ### Alfa - #__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__ ### Alfa - __settings__ = xbmcaddon.Addon(id='plugin.video.kod') ### Alfa - __version__ = '1.1.17' ### Alfa - __plugin__ = "python-libtorrent v.1.1.7" ### Alfa -except: - __plugin__ = "python-libtorrent v.1.1.7" ### Alfa - pass - -def log(msg): - try: - xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE ) - except UnicodeEncodeError: - xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE ) - except: - try: - xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE ) - except: - print msg - -def get_libname(platform): - libname=[] - if platform['system'] in ['darwin', 'linux_x86', 'linux_arm', 'linux_armv6', - 'linux_armv7', 'linux_x86_64', 'ios_arm', - 'linux_mipsel_ucs2', 'linux_mipsel_ucs4', 'linux_aarch64_ucs2', 'linux_aarch64_ucs4']: - libname=['libtorrent.so'] - elif platform['system'] == 'windows': - libname=['libtorrent.pyd'] - elif platform['system'] in ['android_armv7', 'android_x86']: - libname=['libtorrent.so', 'liblibtorrent.so'] - return libname - -def get_platform(): - #__settings__ = xbmcaddon.Addon(id='script.module.libtorrent') - #__version__ = __settings__.getAddonInfo('version') - #__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__ - __language__ = __settings__.getLocalizedString - - if __settings__.getSetting('custom_system').lower() == "true": - system = int(__settings__.getSetting('set_system')) - log('USE CUSTOM SYSTEM: '+__language__(1100+system)) - - ret={} - - if system==0: - ret["os"] = "windows" - ret["arch"] = "x86" - elif system==1: - ret["os"] = "linux" - ret["arch"] = "x86" - elif system==2: - ret["os"] = "linux" - ret["arch"] = "x64" - elif system==3: - ret["os"] = "linux" - ret["arch"] = "armv7" - elif system==4: - ret["os"] = "linux" - ret["arch"] = "armv6" - elif system==5: - ret["os"] = "android" - ret["arch"] = "arm" - elif system==6: - ret["os"] = "android" - ret["arch"] = "x86" - elif system==7: - ret["os"] = "darwin" - ret["arch"] = "x64" - elif system==8: - ret["os"] = "ios" - ret["arch"] = "arm" - elif system==9: - ret["os"] = "ios" - ret["arch"] = "arm" - elif system==10: - ret["os"] = "linux" - ret["arch"] = "mipsel_ucs2" - elif system==11: - ret["os"] = "linux" - ret["arch"] = "mipsel_ucs4" - elif system == 12: - ret["os"] = "linux" - ret["arch"] = "linux_aarch64_ucs2" - elif system == 13: - ret["os"] = "linux" - ret["arch"] = "linux_aarch64_ucs4" - else: - - ret = { - "arch": sys.maxsize > 2 ** 32 and "x64" or "x86", - } - if xbmc.getCondVisibility("system.platform.android"): - ret["os"] = "android" - if "arm" in os.uname()[4] or "aarch64" in os.uname()[4]: - ret["arch"] = "arm" - elif xbmc.getCondVisibility("system.platform.linux"): - ret["os"] = "linux" - uname=os.uname()[4] - if "arm" in uname: - if "armv7" in uname: - ret["arch"] = "armv7" - else: - ret["arch"] = "armv6" - elif "mips" in uname: - if sys.maxunicode > 65536: - ret["arch"] = 'mipsel_ucs4' - else: - ret["arch"] = 'mipsel_ucs2' - elif "aarch64" in uname: - if sys.maxint > 2147483647: #is_64bit_system - if sys.maxunicode > 65536: - ret["arch"] = 'aarch64_ucs4' - else: - ret["arch"] = 'aarch64_ucs2' - else: - ret["arch"] = "armv7" #32-bit userspace - elif xbmc.getCondVisibility("system.platform.windows"): - ret["os"] = "windows" - elif xbmc.getCondVisibility("system.platform.osx"): - ret["os"] = "darwin" - elif xbmc.getCondVisibility("system.platform.ios"): - ret["os"] = "ios" - ret["arch"] = "arm" - - ret=get_system(ret) - return ret - -def get_system(ret): - ret["system"] = '' - ret["message"] = ['', ''] - - if ret["os"] == 'windows': - ret["system"] = 'windows' - ret["message"] = ['Windows has static compiled python-libtorrent included.', - 'You should install "script.module.libtorrent" from "MyShows.me Kodi Repo"'] - elif ret["os"] == "linux" and ret["arch"] == "x64": - ret["system"] = 'linux_x86_64' - ret["message"] = ['Linux x64 has not static compiled python-libtorrent included.', - 'You should install it by "sudo apt-get install python-libtorrent"'] - elif ret["os"] == "linux" and ret["arch"] == "x86": - ret["system"] = 'linux_x86' - ret["message"] = ['Linux has static compiled python-libtorrent included but it didn\'t work.', - 'You should install it by "sudo apt-get install python-libtorrent"'] - elif ret["os"] == "linux" and "aarch64" in ret["arch"]: - ret["system"] = 'linux_' + ret["arch"] - ret["message"] = ['Linux has static compiled python-libtorrent included but it didn\'t work.', - 'You should install it by "sudo apt-get install python-libtorrent"'] - elif ret["os"] == "linux" and ("arm" or "mips" in ret["arch"]): - ret["system"] = 'linux_'+ret["arch"] - ret["message"] = ['As far as I know you can compile python-libtorrent for ARMv6-7.', - 'You should search for "OneEvil\'s OpenELEC libtorrent" or use Ace Stream.'] - elif ret["os"] == "android": - if ret["arch"]=='arm': - ret["system"] = 'android_armv7' - else: - ret["system"] = 'android_x86' - ret["message"] = ['Please contact DiMartino on kodi.tv forum. We compiled python-libtorrent for Android,', - 'but we need your help with some tests on different processors.'] - elif ret["os"] == "darwin": - ret["system"] = 'darwin' - ret["message"] = ['It is possible to compile python-libtorrent for OS X.', - 'But you would have to do it by yourself, there is some info on github.com.'] - elif ret["os"] == "ios" and ret["arch"] == "arm": - ret["system"] = 'ios_arm' - ret["message"] = ['It is probably NOT possible to compile python-libtorrent for iOS.', - 'But you can use torrent-client control functions.'] - - return ret \ No newline at end of file diff --git a/lib/python_libtorrent/public.py b/lib/python_libtorrent/public.py deleted file mode 100644 index 46afdf0c..00000000 --- a/lib/python_libtorrent/public.py +++ /dev/null @@ -1,83 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' - -import os - -from platform_pulsar import get_libname - -class Public: - def __init__( self ): - self.platforms=[] - self.root=os.path.dirname(__file__) - for dir in os.listdir(self.root): - if os.path.isdir(os.path.join(self.root,dir)): - for subdir in os.listdir(os.path.join(self.root,dir)): - if os.path.isdir(os.path.join(self.root,dir,subdir)): - self.platforms.append({'system':dir, 'version':subdir}) - self._generate_size_file() - - def _generate_size_file( self ): - for platform in self.platforms: - for libname in get_libname(platform): - self.libname=libname - self.platform=platform - self.libdir = os.path.join(self.root, self.platform['system'], self.platform['version']) - self.libpath = os.path.join(self.libdir, self.libname) - self.sizepath=self.libpath+'.size.txt' - self.zipname=self.libname+'.zip' - zippath=os.path.join(self.libdir, self.zipname) - system=platform['system']+'/'+platform['version']+'/' - if os.path.exists(self.libpath): - if not os.path.exists(self.sizepath): - print system+self.libname+' NO SIZE' - self._makezip() - elif not os.path.exists(zippath): - print system+self.libname+' NO ZIP' - self._makezip() - else: - size=str(os.path.getsize(self.libpath)) - size_old=open( self.sizepath, "r" ).read() - if size_old!=size: - print system+self.libname+' NOT EQUAL' - self._makezip() - else: - print system+self.libname+' NO ACTION' - else: - print system+self.libname+' NO LIB' - - def _makezip(self): - open( self.sizepath, "w" ).write( str(os.path.getsize(self.libpath)) ) - os.chdir(self.libdir) - os.system('del %s' % (self.zipname)) - os.system('"C:\\Program Files\\7-Zip\\7z.exe" a %s.zip %s' % - (self.libname, self.libname)) - os.chdir(self.root) - #os.system('"C:\\Program Files\\7-Zip\\7z.exe" a %s.zip %s' % - # (self.platform['system']+os.sep+self.libname, self.platform['system']+os.sep+self.libname)) - -if ( __name__ == "__main__" ): - # start - #TODO: publicate - Public() \ No newline at end of file diff --git a/lib/python_libtorrent/python_libtorrent/__init__.py b/lib/python_libtorrent/python_libtorrent/__init__.py deleted file mode 100644 index a0a8485e..00000000 --- a/lib/python_libtorrent/python_libtorrent/__init__.py +++ /dev/null @@ -1,243 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' -from __future__ import absolute_import -#from builtins import str -import sys -PY3 = False -if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int - -from .functions import * -import xbmc, xbmcaddon -import sys -import os -import traceback - -__version__ = '1.1.17' -__plugin__ = "python-libtorrent v.1.1.7" -__root__ = os.path.dirname(os.path.dirname(__file__)) - -libtorrent=None -platform = get_platform() -set_dirname=__root__ -if getSettingAsBool('custom_dirname') and set_dirname: - log('set_dirname:' +str(set_dirname)) - dirname=set_dirname -else: - dirname=set_dirname - -log('dirname: ' +str(dirname)) - -#versions = ['0.16.19', '1.0.6', '1.0.7', '1.0.8', '1.0.9', '1.0.11', '1.1.0', '1.1.1', '1.1.6', '1.1.7', '1.2.2', '1.2.3'] -versions = ['0.16.19', '1.0.6', '1.0.7', '1.0.8', '1.0.9', '1.0.11', '1.1.0', '1.1.1', '1.1.6', '1.1.7', '1.2.2'] -default_path = versions[-1] -set_version = 0 -if getSettingAsBool('custom_version'): - log('set_version:' +str(set_version)+' '+versions[set_version]) - platform['version'] = versions[set_version] -else: - platform['version'] = default_path - -sizefile_path = os.path.join(__root__, platform['system'], platform['version']) -if not os.path.exists(sizefile_path): - log('set_version: no sizefile at %s back to default %s' % (sizefile_path, default_path)) - platform['version'] = default_path - sizefile_path = os.path.join(__root__, platform['system'], platform['version']) - if not os.path.exists(sizefile_path): - log('set_version: no default at %s searching for any version' % sizefile_path) - try: - versions = sorted(os.listdir(os.path.join(__root__, platform['system']))) - except: - versions = [] - for ver in versions: - if not os.path.isdir(os.path.join(__root__, platform['system'], ver)): - versions.remove(ver) - - if len(versions)>0: - platform['version'] = versions[-1] - log('set_version: chose %s out of %s' % (platform['version'], str(versions))) - else: - e = 'die because the folder is empty' - log(e) - raise Exception(e) -dest_path = os.path.join(dirname, platform['system'], platform['version']) -sys.path.insert(0, dest_path) - -lm=LibraryManager(dest_path, platform) -if not lm.check_exist(): - ok=lm.download() - xbmc.sleep(2000) - - -log('platform: ' + str(platform)) -if platform['system'] not in ['windows', 'windows_x64']: - log('os: '+str(os.uname())) - log_text = 'ucs4' if sys.maxunicode > 65536 else 'ucs2' - log_text += ' x64' if sys.maxsize > 2147483647 else ' x86' - log(log_text) - -try: - fp = '' - pathname = '' - description = '' - libtorrent = '' - from platformcode import config - - if platform['system'] in ['linux_x86', 'windows', 'windows_x64', 'linux_armv6', 'linux_armv7', - 'linux_x86_64', 'linux_mipsel_ucs2', 'linux_mipsel_ucs4', - 'linux_aarch64_ucs2', 'linux_aarch64_ucs4']: - import libtorrent - - elif PY3 and platform['system'] not in ['android_armv7', 'android_x86']: - import libtorrent - - elif platform['system'] in ['darwin', 'ios_arm']: - import imp - - path_list = [dest_path] - log('path_list = ' + str(path_list)) - fp, pathname, description = imp.find_module('libtorrent', path_list) - log('fp = ' + str(fp)) - log('pathname = ' + str(pathname)) - log('description = ' + str(description)) - try: - libtorrent = imp.load_module('libtorrent', fp, pathname, description) - finally: - if fp: fp.close() - - elif platform['system'] in ['android_armv7', 'android_x86']: - try: - import imp - from ctypes import CDLL - - dest_path=lm.android_workaround(os.path.join(xbmc.translatePath('special://xbmc/'), 'files').replace('/cache/apk/assets', '')) - dll_path=os.path.join(dest_path, 'liblibtorrent.so') - log('CDLL path = ' + dll_path) - liblibtorrent=CDLL(dll_path) - log('CDLL = ' + str(liblibtorrent)) - path_list = [dest_path] - log('path_list = ' + str(path_list)) - fp, pathname, description = imp.find_module('libtorrent', path_list) - log('fp = ' + str(fp)) - log('pathname = ' + str(pathname)) - log('description = ' + str(description)) - try: - libtorrent = imp.load_module('libtorrent', fp, pathname, description) - finally: - if fp: fp.close() - except Exception as e: - if not PY3: - e = unicode(str(e), "utf8", errors="replace").encode("utf8") - config.set_setting("libtorrent_path", "", server="torrent") - config.set_setting("libtorrent_error", str(e), server="torrent") - log(traceback.format_exc(1)) - log('fp = ' + str(fp)) - log('pathname = ' + str(pathname)) - log('description = ' + str(description)) - log('Error importing libtorrent from "' + dest_path + '". Exception: ' + str(e)) - if fp: fp.close() - - # If no permission in dest_path we need to go deeper on root! - try: - sys_path = '/data/app/' - fp = '' - pathname = sys_path - dest_path = sys_path - description = '' - libtorrent = '' - LIBTORRENT_MSG = config.get_setting("libtorrent_msg", server="torrent", default='') - if not LIBTORRENT_MSG: - dialog = xbmcgui.Dialog() - dialog.notification('KoD: '+ config.get_localizad_string(70766), config.get_localizad_string(70767), time=15000) - config.set_setting("libtorrent_msg", 'OK', server="torrent") - - from core import scrapertools - kodi_app = xbmc.translatePath('special://xbmc') - kodi_app = scrapertools.find_single_match(kodi_app, '\/\w+\/\w+\/.*?\/(.*?)\/') - kodi_dir = '%s-1' % kodi_app - dir_list = '' - try: - dir_list = os.listdir(sys_path).split() - except: - import subprocess - command = ['su', '-c', 'ls', sys_path] - p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output_cmd, error_cmd = p.communicate() - log('Comando ROOT: %s' % str(command)) - dir_list = output_cmd.split() - - if not dir_list: - raise - - for file in dir_list: - if kodi_app in file: - kodi_dir = file - break - - bits = sys.maxsize > 2 ** 32 and "64" or "" - dest_path = os.path.join(sys_path, kodi_dir, 'lib', platform['arch'] + bits) - dest_path=lm.android_workaround(new_dest_path=dest_path) - sys.path.insert(0, dest_path) - dll_path=os.path.join(dest_path, 'liblibtorrent.so') - log('NEW CDLL path = ' + dll_path) - if not PY3: - liblibtorrent=CDLL(dll_path) - log('CDLL = ' + str(liblibtorrent)) - path_list = [dest_path] - log('path_list = ' + str(path_list)) - fp, pathname, description = imp.find_module('libtorrent', path_list) - try: - libtorrent = imp.load_module('libtorrent', fp, pathname, description) - finally: - if fp: fp.close() - else: - import libtorrent - - except Exception as e: - log('ERROR Comando ROOT: %s, %s' % (str(command), str(dest_path))) - if not PY3: - e = unicode(str(e), "utf8", errors="replace").encode("utf8") - log(traceback.format_exc(1)) - log('fp = ' + str(fp)) - log('pathname = ' + str(pathname)) - log('description = ' + str(description)) - log('Error importing libtorrent from "' + dest_path + '". Exception: ' + str(e)) - if fp: fp.close() - - if libtorrent: - config.set_setting("libtorrent_path", dest_path, server="torrent") - config.set_setting("libtorrent_error", "", server="torrent") - log('Imported libtorrent v' + libtorrent.version + ' from "' + dest_path + '"') - -except Exception as e: - if not PY3: - e = unicode(str(e), "utf8", errors="replace").encode("utf8") - config.set_setting("libtorrent_path", "", server="torrent") - config.set_setting("libtorrent_error", str(e), server="torrent") - log('Error importing libtorrent from "' + dest_path + '". Exception: ' + str(e)) - if fp: fp.close() - - -def get_libtorrent(): - return libtorrent diff --git a/lib/python_libtorrent/python_libtorrent/functions.py b/lib/python_libtorrent/python_libtorrent/functions.py deleted file mode 100644 index da479591..00000000 --- a/lib/python_libtorrent/python_libtorrent/functions.py +++ /dev/null @@ -1,187 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' -from __future__ import absolute_import -#from builtins import str -import sys -PY3 = False -if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -from builtins import object - -import os -import xbmc, xbmcgui, xbmcaddon - -from .net import HTTP -from core import filetools ### Alfa - -__libbaseurl__ = "https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent" -#__settings__ = xbmcaddon.Addon(id='script.module.libtorrent') -#__version__ = __settings__.getAddonInfo('version') -#__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__ -#__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons', -# 'script.module.libtorrent', 'icon.png') -#__settings__ = xbmcaddon.Addon(id='plugin.video.kod') ### Alfa -__version__ = '1.1.17' ### Alfa -__plugin__ = "python-libtorrent v.1.1.7" ### Alfa -__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons', - 'plugin.video.kod', 'icon.png') ### Alfa -#__language__ = __settings__.getLocalizedString ### Alfa - -#from python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa -from lib.python_libtorrent.python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa - -def log(msg): - try: - xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE ) - except UnicodeEncodeError: - xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE ) - except: - xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE ) - -def getSettingAsBool(setting): - __settings__ = xbmcaddon.Addon(id='plugin.video.kod') ### Alfa - return __settings__.getSetting(setting).lower() == "true" - -class LibraryManager(object): - def __init__(self, dest_path, platform): - self.dest_path = dest_path - self.platform = platform - self.root=os.path.dirname(os.path.dirname(__file__)) - ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método - try: - ver1 = int(ver1) - ver2 = int(ver2) - except: - pass - if ver1 >= 1 and ver2 >= 2: - global __libbaseurl__ - #__libbaseurl__ = 'https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent' - __libbaseurl__ = 'https://bitbucket.org/alfa_addon/alfa-repo/raw/master/downloads/libtorrent' - - def check_exist(self): - for libname in get_libname(self.platform): - if not filetools.exists(os.path.join(self.dest_path,libname)): - return False - return True - - def check_update(self): - need_update=False - for libname in get_libname(self.platform): - if libname!='liblibtorrent.so': - self.libpath = os.path.join(self.dest_path, libname) - self.sizepath=os.path.join(self.root, self.platform['system'], self.platform['version'], libname+'.size.txt') - size=str(os.path.getsize(self.libpath)) - size_old=open( self.sizepath, "r" ).read() - if size_old!=size: - need_update=True - return need_update - - def update(self): - if self.check_update(): - for libname in get_libname(self.platform): - self.libpath = os.path.join(self.dest_path, libname) - filetools.remove(self.libpath) - self.download() - - def download(self): - __settings__ = xbmcaddon.Addon(id='plugin.video.kod') ### Alfa - filetools.mkdir(self.dest_path) - for libname in get_libname(self.platform): - dest = os.path.join(self.dest_path, libname) - log("try to fetch %s" % libname) - url = "%s/%s/%s/%s.zip" % (__libbaseurl__, self.platform['system'], self.platform['version'], libname) - if libname!='liblibtorrent.so': - try: - self.http = HTTP() - self.http.fetch(url, download=dest + ".zip", progress=False) ### Alfa - log("%s -> %s" % (url, dest)) - xbmc.executebuiltin('XBMC.Extract("%s.zip","%s")' % (dest, self.dest_path), True) - filetools.remove(dest + ".zip") - except: - text = 'Failed download %s!' % libname - xbmc.executebuiltin("XBMC.Notification(%s,%s,%s,%s)" % (__plugin__,text,750,__icon__)) - else: - filetools.copy(os.path.join(self.dest_path, 'libtorrent.so'), dest, silent=True) ### Alfa - dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), \ - 'lib', libname) ### Alfa - filetools.copy(dest, dest_alfa, silent=True) ### Alfa - dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Profile')), \ - 'custom_code', 'lib', libname) ### Alfa - filetools.copy(dest, dest_alfa, silent=True) ### Alfa - return True - - def android_workaround(self, new_dest_path): ### Alfa (entera) - import subprocess - - for libname in get_libname(self.platform): - libpath=os.path.join(self.dest_path, libname) - size=str(os.path.getsize(libpath)) - new_libpath=os.path.join(new_dest_path, libname) - - if filetools.exists(new_libpath): - new_size=str(os.path.getsize(new_libpath)) - if size != new_size: - filetools.remove(new_libpath) - if filetools.exists(new_libpath): - try: - command = ['su', '-c', 'rm', '%s' % new_libpath] - p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output_cmd, error_cmd = p.communicate() - log('Comando ROOT: %s' % str(command)) - except: - log('Sin PERMISOS ROOT: %s' % str(command)) - - if not filetools.exists(new_libpath): - log('Deleted: (%s) %s -> (%s) %s' %(size, libpath, new_size, new_libpath)) - - if not filetools.exists(new_libpath): - filetools.copy(libpath, new_libpath, silent=True) ### ALFA - log('Copying... %s -> %s' %(libpath, new_libpath)) - - if not filetools.exists(new_libpath): - try: - command = ['su', '-c', 'cp', '%s' % libpath, '%s' % new_libpath] - p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output_cmd, error_cmd = p.communicate() - log('Comando ROOT: %s' % str(command)) - - command = ['su', '-c', 'chmod', '777', '%s' % new_libpath] - p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output_cmd, error_cmd = p.communicate() - log('Comando ROOT: %s' % str(command)) - except: - log('Sin PERMISOS ROOT: %s' % str(command)) - - if not filetools.exists(new_libpath): - log('ROOT Copy Failed!') - - else: - command = ['chmod', '777', '%s' % new_libpath] - p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output_cmd, error_cmd = p.communicate() - log('Comando: %s' % str(command)) - else: - log('Module exists. Not copied... %s' % new_libpath) ### ALFA - - return new_dest_path diff --git a/lib/python_libtorrent/python_libtorrent/net.py b/lib/python_libtorrent/python_libtorrent/net.py deleted file mode 100644 index 68519899..00000000 --- a/lib/python_libtorrent/python_libtorrent/net.py +++ /dev/null @@ -1,332 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' -from __future__ import division -from future import standard_library -standard_library.install_aliases() -#from builtins import str -import sys -PY3 = False -if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -from builtins import object -from past.utils import old_div - -import os -import time -import re -import urllib.request, urllib.parse, urllib.error -# import http.cookiejar -import future.backports.http.cookiejar as http_cookiejar -import base64 - -import xbmc -import xbmcgui -#import xbmcvfs ### Alfa - -RE = { - 'content-disposition': re.compile('attachment;\sfilename="*([^"\s]+)"|\s') -} - -# ################################ -# -# HTTP -# -# ################################ - -class HTTP(object): - def __init__(self): - #self._dirname = xbmc.translatePath('special://temp') ### Alfa - #for subdir in ('xbmcup', 'script.module.libtorrent'): ### Alfa - self._dirname = os.path.dirname(os.path.dirname(__file__)) ### Alfa - #for subdir in ('lib', 'python_libtorrent'): ### Alfa - # self._dirname = os.path.join(self._dirname, subdir) ### Alfa - # if not xbmcvfs.exists(self._dirname): ### Alfa - # xbmcvfs.mkdir(self._dirname) ### Alfa - - def fetch(self, request, **kwargs): - self.con, self.fd, self.progress, self.cookies, self.request = None, None, None, None, request - - if not isinstance(self.request, HTTPRequest): - self.request = HTTPRequest(url=self.request, **kwargs) - - self.response = HTTPResponse(self.request) - - xbmc.log('XBMCup: HTTP: request: ' + str(self.request), xbmc.LOGDEBUG) - - try: - self._opener() - self._fetch() - except Exception as e: - xbmc.log('XBMCup: HTTP: ' + str(e), xbmc.LOGERROR) - if isinstance(e, urllib.error.HTTPError): - self.response.code = e.code - self.response.error = e - else: - self.response.code = 200 - - if self.fd: - self.fd.close() - self.fd = None - - if self.con: - self.con.close() - self.con = None - - if self.progress: - self.progress.close() - self.progress = None - - self.response.time = time.time() - self.response.time - - xbmc.log('XBMCup: HTTP: response: ' + str(self.response), xbmc.LOGDEBUG) - - return self.response - - def _opener(self): - - build = [urllib.request.HTTPHandler()] - - if self.request.redirect: - build.append(urllib.request.HTTPRedirectHandler()) - - if self.request.proxy_host and self.request.proxy_port: - build.append(urllib.request.ProxyHandler( - {self.request.proxy_protocol: self.request.proxy_host + ':' + str(self.request.proxy_port)})) - - if self.request.proxy_username: - proxy_auth_handler = urllib.request.ProxyBasicAuthHandler() - proxy_auth_handler.add_password('realm', 'uri', self.request.proxy_username, - self.request.proxy_password) - build.append(proxy_auth_handler) - - if self.request.cookies: - self.request.cookies = os.path.join(self._dirname, self.request.cookies) - self.cookies = http_cookiejar.MozillaCookieJar() - if os.path.isfile(self.request.cookies): - self.cookies.load(self.request.cookies) - build.append(urllib.request.HTTPCookieProcessor(self.cookies)) - - urllib.request.install_opener(urllib.request.build_opener(*build)) - - def _fetch(self): - params = {} if self.request.params is None else self.request.params - - if self.request.upload: - boundary, upload = self._upload(self.request.upload, params) - req = urllib.request.Request(self.request.url) - req.add_data(upload) - else: - - if self.request.method == 'POST': - if isinstance(params, dict) or isinstance(params, list): - params = urllib.parse.urlencode(params) - req = urllib.request.Request(self.request.url, params) - else: - req = urllib.request.Request(self.request.url) - - for key, value in self.request.headers.items(): - req.add_header(key, value) - - if self.request.upload: - req.add_header('Content-type', 'multipart/form-data; boundary=%s' % boundary) - req.add_header('Content-length', len(upload)) - - if self.request.auth_username and self.request.auth_password: - req.add_header('Authorization', 'Basic %s' % base64.encodestring( - ':'.join([self.request.auth_username, self.request.auth_password])).strip()) - - self.con = urllib.request.urlopen(req, timeout=self.request.timeout) - # self.con = urllib2.urlopen(req) - self.response.headers = self._headers(self.con.info()) - - if self.request.download: - self._download() - else: - self.response.body = self.con.read() - - if self.request.cookies: - self.cookies.save(self.request.cookies) - - def _download(self): - fd = open(self.request.download, 'wb') - if self.request.progress: - self.progress = xbmcgui.DialogProgress() - self.progress.create(u'Download') - - bs = 1024 * 8 - size = -1 - read = 0 - name = None - - if self.request.progress: - if 'content-length' in self.response.headers: - size = int(self.response.headers['content-length']) - if 'content-disposition' in self.response.headers: - r = RE['content-disposition'].search(self.response.headers['content-disposition']) - if r: - name = urllib.parse.unquote(r.group(1)) - - while 1: - buf = self.con.read(bs) - if not buf: - break - read += len(buf) - fd.write(buf) - - if self.request.progress: - self.progress.update(*self._progress(read, size, name)) - - self.response.filename = self.request.download - - def _upload(self, upload, params): - import mimetools - import itertools - - res = [] - boundary = mimetools.choose_boundary() - part_boundary = '--' + boundary - - if params: - for name, value in params.items(): - res.append([part_boundary, 'Content-Disposition: form-data; name="%s"' % name, '', value]) - - if isinstance(upload, dict): - upload = [upload] - - for obj in upload: - name = obj.get('name') - filename = obj.get('filename', 'default') - content_type = obj.get('content-type') - try: - body = obj['body'].read() - except AttributeError: - body = obj['body'] - - if content_type: - res.append([part_boundary, - 'Content-Disposition: file; name="%s"; filename="%s"' % (name, urllib.parse.quote(filename)), - 'Content-Type: %s' % content_type, '', body]) - else: - res.append([part_boundary, - 'Content-Disposition: file; name="%s"; filename="%s"' % (name, urllib.parse.quote(filename)), '', - body]) - - result = list(itertools.chain(*res)) - result.append('--' + boundary + '--') - result.append('') - return boundary, '\r\n'.join(result) - - def _headers(self, raw): - headers = {} - for line in str(raw).split('\n'): - pair = line.split(':', 1) - if len(pair) == 2: - tag = pair[0].lower().strip() - value = pair[1].strip() - if tag and value: - headers[tag] = value - return headers - - def _progress(self, read, size, name): - res = [] - if size < 0: - res.append(1) - else: - res.append(int(float(read) / (float(size) / 100.0))) - if name: - res.append(u'File: ' + name) - if size != -1: - res.append(u'Size: ' + self._human(size)) - res.append(u'Load: ' + self._human(read)) - return res - - def _human(self, size): - human = None - for h, f in (('KB', 1024), ('MB', 1024 * 1024), ('GB', 1024 * 1024 * 1024), ('TB', 1024 * 1024 * 1024 * 1024)): - if old_div(size, f) > 0: - human = h - factor = f - else: - break - if human is None: - return (u'%10.1f %s' % (size, u'byte')).replace(u'.0', u'') - else: - return u'%10.2f %s' % (float(size) / float(factor), human) - - -class HTTPRequest(object): - def __init__(self, url, method='GET', headers=None, cookies=None, params=None, upload=None, download=None, - progress=False, auth_username=None, auth_password=None, proxy_protocol='http', proxy_host=None, - proxy_port=None, proxy_username=None, proxy_password='', timeout=20.0, redirect=True, gzip=False): - if headers is None: - headers = {} - - self.url = url - self.method = method - self.headers = headers - - self.cookies = cookies - - self.params = params - - self.upload = upload - self.download = download - self.progress = progress - - self.auth_username = auth_username - self.auth_password = auth_password - - self.proxy_protocol = proxy_protocol - self.proxy_host = proxy_host - self.proxy_port = proxy_port - self.proxy_username = proxy_username - self.proxy_password = proxy_password - - self.timeout = timeout - - self.redirect = redirect - - self.gzip = gzip - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, ','.join('%s=%r' % i for i in self.__dict__.items())) - - -class HTTPResponse(object): - def __init__(self, request): - self.request = request - self.code = None - self.headers = {} - self.error = None - self.body = None - self.filename = None - self.time = time.time() - - def __repr__(self): - args = ','.join('%s=%r' % i for i in self.__dict__.items() if i[0] != 'body') - if self.body: - args += ',body=<data>' - else: - args += ',body=None' - return '%s(%s)' % (self.__class__.__name__, args) diff --git a/lib/python_libtorrent/python_libtorrent/platform_pulsar.py b/lib/python_libtorrent/python_libtorrent/platform_pulsar.py deleted file mode 100644 index 1153b44b..00000000 --- a/lib/python_libtorrent/python_libtorrent/platform_pulsar.py +++ /dev/null @@ -1,208 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' -from __future__ import print_function -import sys -PY3 = False -if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int - -import os -try: - import xbmc, xbmcaddon - #__settings__ = xbmcaddon.Addon(id='script.module.libtorrent') ### Alfa - #__version__ = __settings__.getAddonInfo('version') ### Alfa - #__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__ ### Alfa - __settings__ = xbmcaddon.Addon(id='plugin.video.kod') ### Alfa - __version__ = '1.1.17' ### Alfa - __plugin__ = "python-libtorrent v.1.1.7" ### Alfa -except: - __plugin__ = "python-libtorrent v.1.1.7" ### Alfa - pass - -def log(msg): - try: - xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE ) - except UnicodeEncodeError: - xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE ) - except: - try: - xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE ) - except: - print(msg) - -def get_libname(platform): - libname=[] - if platform['system'] in ['darwin', 'linux_x86', 'linux_arm', 'linux_armv6', - 'linux_armv7', 'linux_x86_64', 'ios_arm', - 'linux_mipsel_ucs2', 'linux_mipsel_ucs4', 'linux_aarch64_ucs2', 'linux_aarch64_ucs4']: - libname=['libtorrent.so'] - elif platform['system'] in ['windows', 'windows_x64']: ### Alfa - libname=['libtorrent.pyd'] - elif platform['system'] in ['android_armv7', 'android_x86']: - libname=['libtorrent.so', 'liblibtorrent.so'] - return libname - -def get_platform(): - #__settings__ = xbmcaddon.Addon(id='script.module.libtorrent') ### Alfa - #__version__ = __settings__.getAddonInfo('version') ### Alfa - #__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__ ### Alfa - __settings__ = xbmcaddon.Addon(id='plugin.video.kod') ### Alfa - __version__ = '1.1.17' ### Alfa - __plugin__ = "python-libtorrent v.1.1.7" ### Alfa - __language__ = __settings__.getLocalizedString - - if __settings__.getSetting('custom_system').lower() == "true": - system = int(__settings__.getSetting('set_system')) - log('USE CUSTOM SYSTEM: '+__language__(1100+system)) - - ret={} - - if system==0: - ret["os"] = "windows" - ret["arch"] = "x86" - elif system==1: - ret["os"] = "linux" - ret["arch"] = "x86" - elif system==2: - ret["os"] = "linux" - ret["arch"] = "x64" - elif system==3: - ret["os"] = "linux" - ret["arch"] = "armv7" - elif system==4: - ret["os"] = "linux" - ret["arch"] = "armv6" - elif system==5: - ret["os"] = "android" - ret["arch"] = "arm" - elif system==6: - ret["os"] = "android" - ret["arch"] = "x86" - elif system==7: - ret["os"] = "darwin" - ret["arch"] = "x64" - elif system==8: - ret["os"] = "ios" - ret["arch"] = "arm" - elif system==9: - ret["os"] = "ios" - ret["arch"] = "arm" - elif system==10: - ret["os"] = "linux" - ret["arch"] = "mipsel_ucs2" - elif system==11: - ret["os"] = "linux" - ret["arch"] = "mipsel_ucs4" - elif system == 12: - ret["os"] = "linux" - ret["arch"] = "linux_aarch64_ucs2" - elif system == 13: - ret["os"] = "linux" - ret["arch"] = "linux_aarch64_ucs4" - else: - - ret = { - "arch": sys.maxsize > 2 ** 32 and "x64" or "x86", - } - if xbmc.getCondVisibility("system.platform.android"): - ret["os"] = "android" - if "arm" in os.uname()[4] or "aarch64" in os.uname()[4]: - ret["arch"] = "arm" - elif xbmc.getCondVisibility("system.platform.linux"): - ret["os"] = "linux" - uname=os.uname()[4] - if "arm" in uname: - if "armv7" in uname: - ret["arch"] = "armv7" - else: - ret["arch"] = "armv6" - elif "mips" in uname: - if sys.maxunicode > 65536: - ret["arch"] = 'mipsel_ucs4' - else: - ret["arch"] = 'mipsel_ucs2' - elif "aarch64" in uname: - if sys.maxsize > 2147483647: #is_64bit_system - if sys.maxunicode > 65536: - ret["arch"] = 'aarch64_ucs4' - else: - ret["arch"] = 'aarch64_ucs2' - else: - ret["arch"] = "armv7" #32-bit userspace - elif xbmc.getCondVisibility("system.platform.windows"): - ret["os"] = "windows" - elif xbmc.getCondVisibility("system.platform.osx"): - ret["os"] = "darwin" - elif xbmc.getCondVisibility("system.platform.ios"): - ret["os"] = "ios" - ret["arch"] = "arm" - - ret=get_system(ret) - return ret - -def get_system(ret): - ret["system"] = '' - ret["message"] = ['', ''] - - if ret["os"] == 'windows' and ret["arch"] != "x64": ### Alfa - ret["system"] = 'windows' - ret["message"] = ['Windows has static compiled python-libtorrent included.', - 'You should install "script.module.libtorrent" from "MyShows.me Kodi Repo"'] - elif ret["os"] == 'windows' and ret["arch"] == "x64": ### Alfa - ret["system"] = 'windows_x64' ### Alfa - ret["message"] = ['Windows x64 has static compiled python-libtorrent included.', ### Alfa - 'You should install "script.module.libtorrent" from "MyShows.me Kodi Repo"'] ### Alfa - elif ret["os"] == "linux" and ret["arch"] == "x64": - ret["system"] = 'linux_x86_64' - ret["message"] = ['Linux x64 has not static compiled python-libtorrent included.', - 'You should install it by "sudo apt-get install python-libtorrent"'] - elif ret["os"] == "linux" and ret["arch"] == "x86": - ret["system"] = 'linux_x86' - ret["message"] = ['Linux has static compiled python-libtorrent included but it didn\'t work.', - 'You should install it by "sudo apt-get install python-libtorrent"'] - elif ret["os"] == "linux" and "aarch64" in ret["arch"]: - ret["system"] = 'linux_' + ret["arch"] - ret["message"] = ['Linux has static compiled python-libtorrent included but it didn\'t work.', - 'You should install it by "sudo apt-get install python-libtorrent"'] - elif ret["os"] == "linux" and ("arm" or "mips" in ret["arch"]): - ret["system"] = 'linux_'+ret["arch"] - ret["message"] = ['As far as I know you can compile python-libtorrent for ARMv6-7.', - 'You should search for "OneEvil\'s OpenELEC libtorrent" or use Ace Stream.'] - elif ret["os"] == "android": - if ret["arch"]=='arm': - ret["system"] = 'android_armv7' - else: - ret["system"] = 'android_x86' - ret["message"] = ['Please contact DiMartino on kodi.tv forum. We compiled python-libtorrent for Android,', - 'but we need your help with some tests on different processors.'] - elif ret["os"] == "darwin": - ret["system"] = 'darwin' - ret["message"] = ['It is possible to compile python-libtorrent for OS X.', - 'But you would have to do it by yourself, there is some info on github.com.'] - elif ret["os"] == "ios" and ret["arch"] == "arm": - ret["system"] = 'ios_arm' - ret["message"] = ['It is probably NOT possible to compile python-libtorrent for iOS.', - 'But you can use torrent-client control functions.'] - - return ret \ No newline at end of file diff --git a/lib/python_libtorrent/windows/0.16.19/__init__.py b/lib/python_libtorrent/windows/0.16.19/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows/0.16.19/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/windows/0.16.19/libtorrent.pyd.size.txt b/lib/python_libtorrent/windows/0.16.19/libtorrent.pyd.size.txt deleted file mode 100644 index f7b7ca56..00000000 --- a/lib/python_libtorrent/windows/0.16.19/libtorrent.pyd.size.txt +++ /dev/null @@ -1 +0,0 @@ -2363904 \ No newline at end of file diff --git a/lib/python_libtorrent/windows/1.0.6/__init__.py b/lib/python_libtorrent/windows/1.0.6/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows/1.0.6/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/windows/1.0.6/libtorrent.pyd.size.txt b/lib/python_libtorrent/windows/1.0.6/libtorrent.pyd.size.txt deleted file mode 100644 index 3c2fa282..00000000 --- a/lib/python_libtorrent/windows/1.0.6/libtorrent.pyd.size.txt +++ /dev/null @@ -1 +0,0 @@ -2281472 \ No newline at end of file diff --git a/lib/python_libtorrent/windows/1.0.8/__init__.py b/lib/python_libtorrent/windows/1.0.8/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows/1.0.8/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/windows/1.0.8/libtorrent.pyd.size.txt b/lib/python_libtorrent/windows/1.0.8/libtorrent.pyd.size.txt deleted file mode 100644 index d073a9fc..00000000 --- a/lib/python_libtorrent/windows/1.0.8/libtorrent.pyd.size.txt +++ /dev/null @@ -1 +0,0 @@ -2426368 \ No newline at end of file diff --git a/lib/python_libtorrent/windows/1.0.9/__init__.py b/lib/python_libtorrent/windows/1.0.9/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows/1.0.9/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/windows/1.0.9/libtorrent.pyd.size.txt b/lib/python_libtorrent/windows/1.0.9/libtorrent.pyd.size.txt deleted file mode 100644 index 117a8649..00000000 --- a/lib/python_libtorrent/windows/1.0.9/libtorrent.pyd.size.txt +++ /dev/null @@ -1 +0,0 @@ -2427392 \ No newline at end of file diff --git a/lib/python_libtorrent/windows/1.1.1/__init__.py b/lib/python_libtorrent/windows/1.1.1/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows/1.1.1/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/windows/1.1.1/libtorrent.pyd.size.txt b/lib/python_libtorrent/windows/1.1.1/libtorrent.pyd.size.txt deleted file mode 100644 index 9f5f81bd..00000000 --- a/lib/python_libtorrent/windows/1.1.1/libtorrent.pyd.size.txt +++ /dev/null @@ -1 +0,0 @@ -2671616 \ No newline at end of file diff --git a/lib/python_libtorrent/windows/1.2.2/__init__.py b/lib/python_libtorrent/windows/1.2.2/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows/1.2.2/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/windows/1.2.2/libtorrent.pyd.size.txt b/lib/python_libtorrent/windows/1.2.2/libtorrent.pyd.size.txt deleted file mode 100644 index 1b2e3d2c..00000000 --- a/lib/python_libtorrent/windows/1.2.2/libtorrent.pyd.size.txt +++ /dev/null @@ -1 +0,0 @@ -2996736 \ No newline at end of file diff --git a/lib/python_libtorrent/windows/1.2.3/__init__.py b/lib/python_libtorrent/windows/1.2.3/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows/1.2.3/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/windows/1.2.3/libtorrent.pyd.size.txt b/lib/python_libtorrent/windows/1.2.3/libtorrent.pyd.size.txt deleted file mode 100644 index 8d654ae7..00000000 --- a/lib/python_libtorrent/windows/1.2.3/libtorrent.pyd.size.txt +++ /dev/null @@ -1 +0,0 @@ -3049472 \ No newline at end of file diff --git a/lib/python_libtorrent/windows/__init__.py b/lib/python_libtorrent/windows/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/windows_x64/1.2.2/__init__.py b/lib/python_libtorrent/windows_x64/1.2.2/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows_x64/1.2.2/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/windows_x64/1.2.2/libtorrent.pyd b/lib/python_libtorrent/windows_x64/1.2.2/libtorrent.pyd deleted file mode 100644 index eb0bf80f..00000000 Binary files a/lib/python_libtorrent/windows_x64/1.2.2/libtorrent.pyd and /dev/null differ diff --git a/lib/python_libtorrent/windows_x64/1.2.2/libtorrent.pyd.size.txt b/lib/python_libtorrent/windows_x64/1.2.2/libtorrent.pyd.size.txt deleted file mode 100644 index 449c75d2..00000000 --- a/lib/python_libtorrent/windows_x64/1.2.2/libtorrent.pyd.size.txt +++ /dev/null @@ -1 +0,0 @@ -4290048 \ No newline at end of file diff --git a/lib/python_libtorrent/windows_x64/1.2.3/__init__.py b/lib/python_libtorrent/windows_x64/1.2.3/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows_x64/1.2.3/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/python_libtorrent/windows_x64/1.2.3/libtorrent.pyd.size.txt b/lib/python_libtorrent/windows_x64/1.2.3/libtorrent.pyd.size.txt deleted file mode 100644 index 2a4790d3..00000000 --- a/lib/python_libtorrent/windows_x64/1.2.3/libtorrent.pyd.size.txt +++ /dev/null @@ -1 +0,0 @@ -3797504 \ No newline at end of file diff --git a/lib/python_libtorrent/windows_x64/__init__.py b/lib/python_libtorrent/windows_x64/__init__.py deleted file mode 100644 index e0aed70e..00000000 --- a/lib/python_libtorrent/windows_x64/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#-*- coding: utf-8 -*- -''' - python-libtorrent for Kodi (script.module.libtorrent) - Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -''' diff --git a/lib/quasar/daemon.py b/lib/quasar/daemon.py deleted file mode 100644 index 6f1dbe7e..00000000 --- a/lib/quasar/daemon.py +++ /dev/null @@ -1,295 +0,0 @@ -import os -import stat -import time -import xbmc -import shutil -import socket -import urllib2 -import xbmcgui -import threading -import subprocess -from quasar.logger import log -from quasar.osarch import PLATFORM -from quasar.config import QUASARD_HOST -from quasar.addon import ADDON, ADDON_ID, ADDON_PATH -from quasar.util import notify, system_information, getLocalizedString, getWindowsShortPath - -def ensure_exec_perms(file_): - st = os.stat(file_) - os.chmod(file_, st.st_mode | stat.S_IEXEC) - return file_ - -def android_get_current_appid(): - with open("/proc/%d/cmdline" % os.getpid()) as fp: - return fp.read().rstrip("\0") - -def get_quasard_checksum(path): - try: - with open(path) as fp: - fp.seek(-40, os.SEEK_END) # we put a sha1 there - return fp.read() - except Exception: - return "" - -def get_quasar_binary(): - binary = "quasar" + (PLATFORM["os"] == "windows" and ".exe" or "") - - log.info("PLATFORM: %s" % str(PLATFORM)) - binary_dir = os.path.join(ADDON_PATH, "resources", "bin", "%(os)s_%(arch)s" % PLATFORM) - if PLATFORM["os"] == "android": - log.info("Detected binary folder: %s" % binary_dir) - binary_dir_legacy = binary_dir.replace("/storage/emulated/0", "/storage/emulated/legacy") - if os.path.exists(binary_dir_legacy): - binary_dir = binary_dir_legacy - log.info("Using binary folder: %s" % binary_dir) - app_id = android_get_current_appid() - xbmc_data_path = os.path.join("/data", "data", app_id) - - try: #Test if there is any permisions problem - f = open(os.path.join(xbmc_data_path, "test.txt"), "wb") - f.write("test") - f.close() - os.remove(os.path.join(xbmc_data_path, "test.txt")) - except: - xbmc_data_path = '' - - if not os.path.exists(xbmc_data_path): - log.info("%s path does not exist, so using %s as xbmc_data_path" % (xbmc_data_path, xbmc.translatePath("special://xbmcbin/"))) - xbmc_data_path = xbmc.translatePath("special://xbmcbin/") - - try: #Test if there is any permisions problem - f = open(os.path.join(xbmc_data_path, "test.txt"), "wb") - f.write("test") - f.close() - os.remove(os.path.join(xbmc_data_path, "test.txt")) - except: - xbmc_data_path = '' - - if not os.path.exists(xbmc_data_path): - log.info("%s path does not exist, so using %s as xbmc_data_path" % (xbmc_data_path, xbmc.translatePath("special://masterprofile/"))) - xbmc_data_path = xbmc.translatePath("special://masterprofile/") - dest_binary_dir = os.path.join(xbmc_data_path, "files", ADDON_ID, "bin", "%(os)s_%(arch)s" % PLATFORM) - else: - dest_binary_dir = os.path.join(xbmc.translatePath(ADDON.getAddonInfo("profile")).decode('utf-8'), "bin", "%(os)s_%(arch)s" % PLATFORM) - - log.info("Using destination binary folder: %s" % dest_binary_dir) - binary_path = os.path.join(binary_dir, binary) - dest_binary_path = os.path.join(dest_binary_dir, binary) - - if not os.path.exists(binary_path): - notify((getLocalizedString(30103) + " %(os)s_%(arch)s" % PLATFORM), time=7000) - system_information() - try: - log.info("Source directory (%s):\n%s" % (binary_dir, os.listdir(os.path.join(binary_dir, "..")))) - log.info("Destination directory (%s):\n%s" % (dest_binary_dir, os.listdir(os.path.join(dest_binary_dir, "..")))) - except Exception: - pass - return False, False - - if os.path.isdir(dest_binary_path): - log.warning("Destination path is a directory, expected previous binary file, removing...") - try: - shutil.rmtree(dest_binary_path) - except Exception as e: - log.error("Unable to remove destination path for update: %s" % e) - system_information() - return False, False - - if not os.path.exists(dest_binary_path) or get_quasard_checksum(dest_binary_path) != get_quasard_checksum(binary_path): - log.info("Updating quasar daemon...") - try: - os.makedirs(dest_binary_dir) - except OSError: - pass - try: - shutil.rmtree(dest_binary_dir) - except Exception as e: - log.error("Unable to remove destination path for update: %s" % e) - system_information() - pass - try: - shutil.copytree(binary_dir, dest_binary_dir) - except Exception as e: - log.error("Unable to copy to destination path for update: %s" % e) - system_information() - return False, False - - # Clean stale files in the directory, as this can cause headaches on - # Android when they are unreachable - dest_files = set(os.listdir(dest_binary_dir)) - orig_files = set(os.listdir(binary_dir)) - log.info("Deleting stale files %s" % (dest_files - orig_files)) - for file_ in (dest_files - orig_files): - path = os.path.join(dest_binary_dir, file_) - if os.path.isdir(path): - shutil.rmtree(path) - else: - os.remove(path) - - return dest_binary_dir, ensure_exec_perms(dest_binary_path) - -def clear_fd_inherit_flags(): - # Ensure the spawned quasar binary doesn't inherit open files from Kodi - # which can break things like addon updates. [WINDOWS ONLY] - from ctypes import windll - - HANDLE_RANGE = xrange(0, 65536) - HANDLE_FLAG_INHERIT = 1 - FILE_TYPE_DISK = 1 - - for hd in HANDLE_RANGE: - if windll.kernel32.GetFileType(hd) == FILE_TYPE_DISK: - if not windll.kernel32.SetHandleInformation(hd, HANDLE_FLAG_INHERIT, 0): - log.error("Error clearing inherit flag, disk file handle %x" % hd) - - -def jsonrpc_enabled(notify=False): - try: - s = socket.socket() - s.connect(('127.0.0.1', 9090)) - s.close() - log.info("Kodi's JSON-RPC service is available, starting up...") - del s - return True - except Exception as e: - log.error(repr(e)) - if notify: - xbmc.executebuiltin("ActivateWindow(ServiceSettings)") - dialog = xbmcgui.Dialog() - dialog.ok("Quasar", getLocalizedString(30199)) - return False - -def start_quasard(**kwargs): - jsonrpc_failures = 0 - while jsonrpc_enabled() is False: - jsonrpc_failures += 1 - log.warning("Unable to connect to Kodi's JSON-RPC service, retrying...") - if jsonrpc_failures > 1: - time.sleep(5) - if not jsonrpc_enabled(notify=True): - log.error("Unable to reach Kodi's JSON-RPC service, aborting...") - return False - else: - break - time.sleep(3) - - quasar_dir, quasar_binary = get_quasar_binary() - - if quasar_dir is False or quasar_binary is False: - return False - - lockfile = os.path.join(ADDON_PATH, ".lockfile") - if os.path.exists(lockfile): - log.warning("Existing process found from lockfile, killing...") - try: - with open(lockfile) as lf: - pid = int(lf.read().rstrip(" \t\r\n\0")) - os.kill(pid, 9) - except Exception as e: - log.error(repr(e)) - - if PLATFORM["os"] == "windows": - log.warning("Removing library.db.lock file...") - try: - library_lockfile = os.path.join(xbmc.translatePath(ADDON.getAddonInfo("profile")).decode('utf-8'), "library.db.lock") - os.remove(library_lockfile) - except Exception as e: - log.error(repr(e)) - - SW_HIDE = 0 - STARTF_USESHOWWINDOW = 1 - - args = [quasar_binary] - kwargs["cwd"] = quasar_dir - - if PLATFORM["os"] == "windows": - args[0] = getWindowsShortPath(quasar_binary) - kwargs["cwd"] = getWindowsShortPath(quasar_dir) - si = subprocess.STARTUPINFO() - si.dwFlags = STARTF_USESHOWWINDOW - si.wShowWindow = SW_HIDE - clear_fd_inherit_flags() - kwargs["startupinfo"] = si - else: - env = os.environ.copy() - env["LD_LIBRARY_PATH"] = "%s:%s" % (quasar_dir, env.get("LD_LIBRARY_PATH", "")) - kwargs["env"] = env - kwargs["close_fds"] = True - - wait_counter = 1 - while xbmc.getCondVisibility('Window.IsVisible(10140)') or xbmc.getCondVisibility('Window.IsActive(10140)'): - if wait_counter == 1: - log.info('Add-on settings currently opened, waiting before starting...') - if wait_counter > 300: - break - time.sleep(1) - wait_counter += 1 - - return subprocess.Popen(args, **kwargs) - -def shutdown(): - try: - urllib2.urlopen(QUASARD_HOST + "/shutdown") - except: - pass - -def wait_for_abortRequested(proc, monitor): - monitor.closing.wait() - log.info("quasard: exiting quasard daemon") - try: - proc.terminate() - except OSError: - pass # Process already exited, nothing to terminate - log.info("quasard: quasard daemon exited") - -def quasard_thread(monitor): - crash_count = 0 - try: - while not xbmc.abortRequested: - log.info("quasard: starting quasard") - proc = start_quasard(stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - if not proc: - break - threading.Thread(target=wait_for_abortRequested, args=[proc, monitor]).start() - - if PLATFORM["os"] == "windows": - while proc.poll() is None: - log.info(proc.stdout.readline()) - else: - # Kodi hangs on some Android (sigh...) systems when doing a blocking - # read. We count on the fact that Quasar daemon flushes its log - # output on \n, creating a pretty clean output - import fcntl - import select - fd = proc.stdout.fileno() - fl = fcntl.fcntl(fd, fcntl.F_GETFL) - fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) - while proc.poll() is None: - try: - to_read, _, _ = select.select([proc.stdout], [], []) - for ro in to_read: - line = ro.readline() - if line == "": # write end is closed - break - log.info(line) - except IOError: - time.sleep(1) # nothing to read, sleep - - log.info("quasard: proc.return code: %s" % str(proc.returncode)) - if proc.returncode == 0 or xbmc.abortRequested: - break - - crash_count += 1 - notify(getLocalizedString(30100), time=3000) - xbmc.executebuiltin("Dialog.Close(all, true)") - system_information() - time.sleep(5) - if crash_count >= 3: - notify(getLocalizedString(30110), time=3000) - break - - except Exception as e: - import traceback - map(log.error, traceback.format_exc().split("\n")) - notify("%s: %s" % (getLocalizedString(30226), repr(e).encode('utf-8'))) - raise diff --git a/lib/quasar/navigation.py b/lib/quasar/navigation.py deleted file mode 100644 index 70658b6c..00000000 --- a/lib/quasar/navigation.py +++ /dev/null @@ -1,260 +0,0 @@ -import os -import sys -import socket -import urllib2 -import urlparse -import xbmc -import xbmcgui -import xbmcplugin -from quasar.logger import log -from quasar.config import QUASARD_HOST -from quasar.addon import ADDON, ADDON_ID, ADDON_PATH -from quasar.util import notify, getLocalizedString, getLocalizedLabel, system_information - -try: - import simplejson as json -except ImportError: - import json - - -HANDLE = int(sys.argv[1]) - - -class InfoLabels(dict): - def __init__(self, *args, **kwargs): - self.update(*args, **kwargs) - - def __getitem__(self, key): - return dict.get(self, key.lower(), "") - - def __setitem__(self, key, val): - dict.__setitem__(self, key.lower(), val) - - def update(self, *args, **kwargs): - for k, v in dict(*args, **kwargs).iteritems(): - self[k] = v - - -class closing(object): - def __init__(self, thing): - self.thing = thing - - def __enter__(self): - return self.thing - - def __exit__(self, *exc_info): - self.thing.close() - - -class NoRedirectHandler(urllib2.HTTPRedirectHandler): - def http_error_302(self, req, fp, code, msg, headers): - import urllib - infourl = urllib.addinfourl(fp, headers, headers["Location"]) - infourl.status = code - infourl.code = code - return infourl - http_error_300 = http_error_302 - http_error_301 = http_error_302 - http_error_303 = http_error_302 - http_error_307 = http_error_302 - - -def getInfoLabels(): - id_list = [int(s) for s in sys.argv[0].split("/") if s.isdigit()] - tmdb_id = id_list[0] if id_list else None - - if not tmdb_id: - parsed_url = urlparse.urlparse(sys.argv[0] + sys.argv[2]) - query = urlparse.parse_qs(parsed_url.query) - log.debug("Parsed URL: %s, Query: %s", repr(parsed_url), repr(query)) - if 'tmdb' in query and 'show' not in query: - tmdb_id = query['tmdb'][0] - url = "%s/movie/%s/infolabels" % (QUASARD_HOST, tmdb_id) - elif 'show' in query: - tmdb_id = query['show'][0] - if 'season' in query and 'episode' in query: - url = "%s/show/%s/season/%s/episode/%s/infolabels" % (QUASARD_HOST, tmdb_id, query['season'][0], query['episode'][0]) - else: - url = "%s/show/%s/infolabels" % (QUASARD_HOST, tmdb_id) - else: - url = "%s/infolabels" % (QUASARD_HOST) - elif 'movie' in sys.argv[0]: - url = "%s/movie/%s/infolabels" % (QUASARD_HOST, tmdb_id) - elif ('episode' in sys.argv[0] or 'show' in sys.argv[0]) and len(id_list) > 2: - url = "%s/show/%s/season/%s/episode/%s/infolabels" % (QUASARD_HOST, tmdb_id, id_list[1], id_list[2]) - elif 'show' in sys.argv[0] and len(id_list) == 2: - url = "%s/show/%s/season/%s/episode/%s/infolabels" % (QUASARD_HOST, tmdb_id, id_list[1], 1) - else: - url = "%s/infolabels" % (QUASARD_HOST) - - log.debug("Resolving TMDB item by calling %s for %s" % (url, repr(sys.argv))) - - try: - with closing(urllib2.urlopen(url)) as response: - resolved = json.loads(response.read()) - if not resolved: - return {} - - if 'info' in resolved and resolved['info']: - resolved.update(resolved['info']) - - if 'art' in resolved and resolved['art']: - resolved['artbanner'] = '' - for k, v in resolved['art'].items(): - resolved['art' + k] = v - - if 'info' in resolved: - del resolved['info'] - if 'art' in resolved: - del resolved['art'] - if 'stream_info' in resolved: - del resolved['stream_info'] - - if 'dbtype' not in resolved: - resolved['dbtype'] = 'video' - if 'mediatype' not in resolved or resolved['mediatype'] == '': - resolved['Mediatype'] = resolved['dbtype'] - - return resolved - except: - log.debug("Could not resolve TMDB item: %s" % tmdb_id) - return {} - - -def _json(url): - with closing(urllib2.urlopen(url)) as response: - if response.code >= 300 and response.code <= 307: - # Pause currently playing Quasar file to avoid doubling requests - if xbmc.Player().isPlaying() and ADDON_ID in xbmc.Player().getPlayingFile(): - xbmc.Player().pause() - _infoLabels = InfoLabels(getInfoLabels()) - - item = xbmcgui.ListItem( - path=response.geturl(), - label=_infoLabels["label"], - label2=_infoLabels["label2"], - thumbnailImage=_infoLabels["thumbnail"]) - - item.setArt({ - "poster": _infoLabels["artposter"], - "banner": _infoLabels["artbanner"], - "fanart": _infoLabels["artfanart"] - }) - - item.setInfo(type='Video', infoLabels=_infoLabels) - xbmcplugin.setResolvedUrl(HANDLE, True, item) - return - - payload = response.read() - - try: - if payload: - return json.loads(payload) - except: - raise Exception(payload) - - -def run(url_suffix=""): - if not os.path.exists(os.path.join(ADDON_PATH, ".firstrun")): - notify(getLocalizedString(30101)) - system_information() - return - - donatePath = os.path.join(ADDON_PATH, ".donate") - if not os.path.exists(donatePath): - with open(donatePath, "w"): - os.utime(donatePath, None) - dialog = xbmcgui.Dialog() - dialog.ok("Quasar", getLocalizedString(30141)) - - socket.setdefaulttimeout(int(ADDON.getSetting("buffer_timeout"))) - urllib2.install_opener(urllib2.build_opener(NoRedirectHandler())) - - # Pause currently playing Quasar file to avoid doubling requests - if xbmc.Player().isPlaying() and ADDON_ID in xbmc.Player().getPlayingFile(): - xbmc.Player().pause() - - url = sys.argv[0].replace("plugin://%s" % ADDON_ID, QUASARD_HOST + url_suffix) + sys.argv[2] - log.debug("Requesting %s from %s" % (url, repr(sys.argv))) - - try: - data = _json(url) - except urllib2.URLError as e: - if 'Connection refused' in e.reason: - notify(getLocalizedString(30116), time=7000) - else: - import traceback - map(log.error, traceback.format_exc().split("\n")) - notify(e.reason, time=7000) - return - except Exception as e: - import traceback - map(log.error, traceback.format_exc().split("\n")) - try: - msg = unicode(e) - except: - try: - msg = str(e) - except: - msg = repr(e) - notify(getLocalizedLabel(msg), time=7000) - return - - if not data: - return - - if data["content_type"]: - content_type = data["content_type"] - if data["content_type"].startswith("menus"): - content_type = data["content_type"].split("_")[1] - - xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_UNSORTED) - if content_type != "tvshows": - xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE) - xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_DATE) - xbmcplugin.addSortMethod(HANDLE, xbmcplugin.SORT_METHOD_GENRE) - xbmcplugin.setContent(HANDLE, content_type) - - listitems = range(len(data["items"])) - for i, item in enumerate(data["items"]): - # Translate labels - if item["label"][0:8] == "LOCALIZE": - item["label"] = unicode(getLocalizedLabel(item["label"]), 'utf-8') - if item["label2"][0:8] == "LOCALIZE": - item["label2"] = getLocalizedLabel(item["label2"]) - - listItem = xbmcgui.ListItem(label=item["label"], label2=item["label2"], iconImage=item["icon"], thumbnailImage=item["thumbnail"]) - if item.get("info"): - listItem.setInfo("video", item["info"]) - if item.get("stream_info"): - for type_, values in item["stream_info"].items(): - listItem.addStreamInfo(type_, values) - if item.get("art"): - listItem.setArt(item["art"]) - elif ADDON.getSetting('default_fanart') == 'true' and item["label"] != unicode(getLocalizedString(30218), 'utf-8'): - fanart = os.path.join(ADDON_PATH, "fanart.jpg") - listItem.setArt({'fanart': fanart}) - if item.get("context_menu"): - # Translate context menus - for m, menu in enumerate(item["context_menu"]): - if menu[0][0:8] == "LOCALIZE": - menu[0] = getLocalizedLabel(menu[0]) - listItem.addContextMenuItems(item["context_menu"]) - listItem.setProperty("isPlayable", item["is_playable"] and "true" or "false") - if item.get("properties"): - for k, v in item["properties"].items(): - listItem.setProperty(k, v) - listitems[i] = (item["path"], listItem, not item["is_playable"]) - - xbmcplugin.addDirectoryItems(HANDLE, listitems, totalItems=len(listitems)) - - # Set ViewMode - if data["content_type"]: - viewMode = ADDON.getSetting("viewmode_%s" % data["content_type"]) - if viewMode: - try: - xbmc.executebuiltin('Container.SetViewMode(%s)' % viewMode) - except Exception as e: - log.warning("Unable to SetViewMode(%s): %s" % (viewMode, repr(e))) - - xbmcplugin.endOfDirectory(HANDLE, succeeded=True, updateListing=False, cacheToDisc=True) diff --git a/lib/quasar/osarch.py b/lib/quasar/osarch.py deleted file mode 100644 index 5b580c28..00000000 --- a/lib/quasar/osarch.py +++ /dev/null @@ -1,56 +0,0 @@ -import xbmc -import sys -import platform - -def get_platform(): - build = xbmc.getInfoLabel("System.BuildVersion") - kodi_version = int(build.split()[0][:2]) - ret = { - "auto_arch": sys.maxsize > 2 ** 32 and "64-bit" or "32-bit", - "arch": sys.maxsize > 2 ** 32 and "x64" or "x86", - "os": "", - "version": platform.release(), - "kodi": kodi_version, - "build": build - } - if xbmc.getCondVisibility("system.platform.android"): - ret["os"] = "android" - if "arm" in platform.machine() or "aarch" in platform.machine(): - ret["arch"] = "arm" - if "64" in platform.machine() and ret["auto_arch"] == "64-bit": - ret["arch"] = "arm" - #ret["arch"] = "x64" #The binary is corrupted in install package - elif xbmc.getCondVisibility("system.platform.linux"): - ret["os"] = "linux" - if "aarch" in platform.machine() or "arm64" in platform.machine(): - if xbmc.getCondVisibility("system.platform.linux.raspberrypi"): - ret["arch"] = "armv7" - elif ret["auto_arch"] == "32-bit": - ret["arch"] = "armv7" - elif ret["auto_arch"] == "64-bit": - ret["arch"] = "arm64" - elif platform.architecture()[0].startswith("32"): - ret["arch"] = "arm" - else: - ret["arch"] = "arm64" - elif "armv7" in platform.machine(): - ret["arch"] = "armv7" - elif "arm" in platform.machine(): - ret["arch"] = "arm" - elif xbmc.getCondVisibility("system.platform.xbox"): - ret["os"] = "windows" - ret["arch"] = "x64" - elif xbmc.getCondVisibility("system.platform.windows"): - ret["os"] = "windows" - if platform.machine().endswith('64'): - ret["arch"] = "x64" - elif xbmc.getCondVisibility("system.platform.osx"): - ret["os"] = "darwin" - ret["arch"] = "x64" - elif xbmc.getCondVisibility("system.platform.ios"): - ret["os"] = "ios" - ret["arch"] = "arm" - return ret - - -PLATFORM = get_platform() diff --git a/lib/quasar/util.py b/lib/quasar/util.py deleted file mode 100644 index c3c3b63b..00000000 --- a/lib/quasar/util.py +++ /dev/null @@ -1,72 +0,0 @@ -import platform -import xbmc -import xbmcgui -from quasar.logger import log -from quasar.osarch import PLATFORM -from quasar.addon import ADDON, ADDON_NAME, ADDON_ICON - - -def notify(message, header=ADDON_NAME, time=5000, image=ADDON_ICON): - sound = ADDON.getSetting('do_not_disturb') == 'false' - dialog = xbmcgui.Dialog() - return dialog.notification(toUtf8(header), toUtf8(message), toUtf8(image), time, sound) - -def getLocalizedLabel(label): - try: - if "LOCALIZE" not in label: - return label - if ";;" not in label and label.endswith(']'): - return getLocalizedString(int(label[9:-1])) - else: - parts = label.split(";;") - translation = getLocalizedString(int(parts[0][9:14])) - for i, part in enumerate(parts[1:]): - if part[0:8] == "LOCALIZE": - parts[i + 1] = getLocalizedString(int(part[9:14])) - - return (translation.decode('utf-8', 'replace') % tuple(parts[1:])).encode('utf-8', 'ignore') - except: - return label - -def getLocalizedString(stringId): - try: - return ADDON.getLocalizedString(stringId).encode('utf-8', 'ignore') - except: - return stringId - -def toUtf8(string): - if isinstance(string, unicode): - return string.encode('utf-8', 'ignore') - return string - -def system_information(): - build = xbmc.getInfoLabel("System.BuildVersion") - log.info("System information: %(os)s_%(arch)s %(version)s" % PLATFORM) - log.info("Kodi build version: %s" % build) - log.info("OS type: %s" % platform.system()) - log.info("uname: %s" % repr(platform.uname())) - return PLATFORM - -def getShortPath(path): - if PLATFORM["os"] == "windows": - return getWindowsShortPath(path) - return path - -def getWindowsShortPath(path): - try: - import ctypes - import ctypes.wintypes - - ctypes.windll.kernel32.GetShortPathNameW.argtypes = [ - ctypes.wintypes.LPCWSTR, # lpszLongPath - ctypes.wintypes.LPWSTR, # lpszShortPath - ctypes.wintypes.DWORD # cchBuffer - ] - ctypes.windll.kernel32.GetShortPathNameW.restype = ctypes.wintypes.DWORD - - buf = ctypes.create_unicode_buffer(1024) # adjust buffer size, if necessary - ctypes.windll.kernel32.GetShortPathNameW(path, buf, len(buf)) - - return buf.value - except: - return path diff --git a/lib/rebulk/__init__.py b/lib/rebulk/__init__.py new file mode 100755 index 00000000..93d5e477 --- /dev/null +++ b/lib/rebulk/__init__.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Define simple search patterns in bulk to perform advanced matching on any string. +""" +# pylint:disable=import-self +from .rebulk import Rebulk +from .rules import Rule, CustomRule, AppendMatch, RemoveMatch, RenameMatch, AppendTags, RemoveTags +from .processors import ConflictSolver, PrivateRemover, POST_PROCESS, PRE_PROCESS +from .pattern import REGEX_AVAILABLE diff --git a/lib/rebulk/__version__.py b/lib/rebulk/__version__.py new file mode 100755 index 00000000..5aa37ed6 --- /dev/null +++ b/lib/rebulk/__version__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Version module +""" +# pragma: no cover +__version__ = '2.0.1.dev0' diff --git a/lib/rebulk/builder.py b/lib/rebulk/builder.py new file mode 100755 index 00000000..c91420aa --- /dev/null +++ b/lib/rebulk/builder.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Base builder class for Rebulk +""" +from abc import ABCMeta, abstractmethod +from copy import deepcopy +from logging import getLogger + +from six import add_metaclass + +from .loose import set_defaults +from .pattern import RePattern, StringPattern, FunctionalPattern + +log = getLogger(__name__).log + + +@add_metaclass(ABCMeta) +class Builder(object): + """ + Base builder class for patterns + """ + + def __init__(self): + self._defaults = {} + self._regex_defaults = {} + self._string_defaults = {} + self._functional_defaults = {} + self._chain_defaults = {} + + def reset(self): + """ + Reset all defaults. + + :return: + """ + self.__init__() + + def defaults(self, **kwargs): + """ + Define default keyword arguments for all patterns + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(kwargs, self._defaults, override=True) + return self + + def regex_defaults(self, **kwargs): + """ + Define default keyword arguments for functional patterns. + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(kwargs, self._regex_defaults, override=True) + return self + + def string_defaults(self, **kwargs): + """ + Define default keyword arguments for string patterns. + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(kwargs, self._string_defaults, override=True) + return self + + def functional_defaults(self, **kwargs): + """ + Define default keyword arguments for functional patterns. + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(kwargs, self._functional_defaults, override=True) + return self + + def chain_defaults(self, **kwargs): + """ + Define default keyword arguments for patterns chain. + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(kwargs, self._chain_defaults, override=True) + return self + + def build_re(self, *pattern, **kwargs): + """ + Builds a new regular expression pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(self._regex_defaults, kwargs) + set_defaults(self._defaults, kwargs) + return RePattern(*pattern, **kwargs) + + def build_string(self, *pattern, **kwargs): + """ + Builds a new string pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(self._string_defaults, kwargs) + set_defaults(self._defaults, kwargs) + return StringPattern(*pattern, **kwargs) + + def build_functional(self, *pattern, **kwargs): + """ + Builds a new functional pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + set_defaults(self._functional_defaults, kwargs) + set_defaults(self._defaults, kwargs) + return FunctionalPattern(*pattern, **kwargs) + + def build_chain(self, **kwargs): + """ + Builds a new patterns chain + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + from .chain import Chain + set_defaults(self._chain_defaults, kwargs) + set_defaults(self._defaults, kwargs) + chain = Chain(self, **kwargs) + chain._defaults = deepcopy(self._defaults) # pylint: disable=protected-access + chain._regex_defaults = deepcopy(self._regex_defaults) # pylint: disable=protected-access + chain._functional_defaults = deepcopy(self._functional_defaults) # pylint: disable=protected-access + chain._string_defaults = deepcopy(self._string_defaults) # pylint: disable=protected-access + chain._chain_defaults = deepcopy(self._chain_defaults) # pylint: disable=protected-access + return chain + + @abstractmethod + def pattern(self, *pattern): + """ + Register a list of Pattern instance + :param pattern: + :return: + """ + pass + + def regex(self, *pattern, **kwargs): + """ + Add re pattern + + :param pattern: + :type pattern: + :return: self + :rtype: Rebulk + """ + return self.pattern(self.build_re(*pattern, **kwargs)) + + def string(self, *pattern, **kwargs): + """ + Add string pattern + + :param pattern: + :type pattern: + :return: self + :rtype: Rebulk + """ + return self.pattern(self.build_string(*pattern, **kwargs)) + + def functional(self, *pattern, **kwargs): + """ + Add functional pattern + + :param pattern: + :type pattern: + :return: self + :rtype: Rebulk + """ + functional = self.build_functional(*pattern, **kwargs) + return self.pattern(functional) + + def chain(self, **kwargs): + """ + Add patterns chain, using configuration of this rebulk + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + chain = self.build_chain(**kwargs) + self.pattern(chain) + return chain diff --git a/lib/rebulk/chain.py b/lib/rebulk/chain.py new file mode 100755 index 00000000..ba31ec9a --- /dev/null +++ b/lib/rebulk/chain.py @@ -0,0 +1,380 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Chain patterns and handle repetiting capture group +""" +# pylint: disable=super-init-not-called +import itertools + +from .builder import Builder +from .loose import call +from .match import Match, Matches +from .pattern import Pattern, filter_match_kwargs, BasePattern +from .remodule import re + + +class _InvalidChainException(Exception): + """ + Internal exception raised when a chain is not valid + """ + pass + + +class Chain(Pattern, Builder): + """ + Definition of a pattern chain to search for. + """ + + def __init__(self, parent, chain_breaker=None, **kwargs): + Builder.__init__(self) + call(Pattern.__init__, self, **kwargs) + self._kwargs = kwargs + self._match_kwargs = filter_match_kwargs(kwargs) + if callable(chain_breaker): + self.chain_breaker = chain_breaker + else: + self.chain_breaker = None + self.parent = parent + self.parts = [] + + def pattern(self, *pattern): + """ + + :param pattern: + :return: + """ + if not pattern: + raise ValueError("One pattern should be given to the chain") + if len(pattern) > 1: + raise ValueError("Only one pattern can be given to the chain") + part = ChainPart(self, pattern[0]) + self.parts.append(part) + return part + + def close(self): + """ + Deeply close the chain + :return: Rebulk instance + """ + parent = self.parent + while isinstance(parent, Chain): + parent = parent.parent + return parent + + def _match(self, pattern, input_string, context=None): + # pylint: disable=too-many-locals,too-many-nested-blocks + chain_matches = [] + chain_input_string = input_string + offset = 0 + while offset < len(input_string): + chain_found = False + current_chain_matches = [] + valid_chain = True + for chain_part in self.parts: + try: + chain_part_matches, raw_chain_part_matches = chain_part.matches(chain_input_string, + context, + with_raw_matches=True) + + chain_found, chain_input_string, offset = \ + self._to_next_chain_part(chain_part, chain_part_matches, raw_chain_part_matches, chain_found, + input_string, chain_input_string, offset, current_chain_matches) + except _InvalidChainException: + valid_chain = False + if current_chain_matches: + offset = current_chain_matches[0].raw_end + break + if not chain_found: + break + if current_chain_matches and valid_chain: + match = self._build_chain_match(current_chain_matches, input_string) + chain_matches.append(match) + + return chain_matches + + def _to_next_chain_part(self, chain_part, chain_part_matches, raw_chain_part_matches, chain_found, + input_string, chain_input_string, offset, current_chain_matches): + Chain._fix_matches_offset(chain_part_matches, input_string, offset) + Chain._fix_matches_offset(raw_chain_part_matches, input_string, offset) + + if raw_chain_part_matches: + grouped_matches_dict = self._group_by_match_index(chain_part_matches) + grouped_raw_matches_dict = self._group_by_match_index(raw_chain_part_matches) + + for match_index, grouped_raw_matches in grouped_raw_matches_dict.items(): + chain_found = True + offset = grouped_raw_matches[-1].raw_end + chain_input_string = input_string[offset:] + + if not chain_part.is_hidden: + grouped_matches = grouped_matches_dict.get(match_index, []) + if self._chain_breaker_eval(current_chain_matches + grouped_matches): + current_chain_matches.extend(grouped_matches) + return chain_found, chain_input_string, offset + + def _process_match(self, match, match_index, child=False): + """ + Handle a match + :param match: + :type match: + :param match_index: + :type match_index: + :param child: + :type child: + :return: + :rtype: + """ + # pylint: disable=too-many-locals + ret = super(Chain, self)._process_match(match, match_index, child=child) + if ret: + return True + + if match.children: + last_pattern = match.children[-1].pattern + last_pattern_groups = self._group_by_match_index( + [child_ for child_ in match.children if child_.pattern == last_pattern] + ) + + if last_pattern_groups: + original_children = Matches(match.children) + original_end = match.end + + for index in reversed(list(last_pattern_groups)): + last_matches = last_pattern_groups[index] + for last_match in last_matches: + match.children.remove(last_match) + match.end = match.children[-1].end if match.children else match.start + ret = super(Chain, self)._process_match(match, match_index, child=child) + if ret: + return True + + match.children = original_children + match.end = original_end + + return False + + def _build_chain_match(self, current_chain_matches, input_string): + start = None + end = None + for match in current_chain_matches: + if start is None or start > match.start: + start = match.start + if end is None or end < match.end: + end = match.end + match = call(Match, start, end, pattern=self, input_string=input_string, **self._match_kwargs) + for chain_match in current_chain_matches: + if chain_match.children: + for child in chain_match.children: + match.children.append(child) + if chain_match not in match.children: + match.children.append(chain_match) + chain_match.parent = match + return match + + def _chain_breaker_eval(self, matches): + return not self.chain_breaker or not self.chain_breaker(Matches(matches)) + + @staticmethod + def _fix_matches_offset(chain_part_matches, input_string, offset): + for chain_part_match in chain_part_matches: + if chain_part_match.input_string != input_string: + chain_part_match.input_string = input_string + chain_part_match.end += offset + chain_part_match.start += offset + if chain_part_match.children: + Chain._fix_matches_offset(chain_part_match.children, input_string, offset) + + @staticmethod + def _group_by_match_index(matches): + grouped_matches_dict = dict() + for match_index, match in itertools.groupby(matches, lambda m: m.match_index): + grouped_matches_dict[match_index] = list(match) + return grouped_matches_dict + + @property + def match_options(self): + return {} + + @property + def patterns(self): + return [self] + + def __repr__(self): + defined = "" + if self.defined_at: + defined = "@%s" % (self.defined_at,) + return "<%s%s:%s>" % (self.__class__.__name__, defined, self.parts) + + +class ChainPart(BasePattern): + """ + Part of a pattern chain. + """ + + def __init__(self, chain, pattern): + self._chain = chain + self.pattern = pattern + self.repeater_start = 1 + self.repeater_end = 1 + self._hidden = False + + @property + def _is_chain_start(self): + return self._chain.parts[0] == self + + def matches(self, input_string, context=None, with_raw_matches=False): + matches, raw_matches = self.pattern.matches(input_string, context=context, with_raw_matches=True) + + matches = self._truncate_repeater(matches, input_string) + raw_matches = self._truncate_repeater(raw_matches, input_string) + + self._validate_repeater(raw_matches) + + if with_raw_matches: + return matches, raw_matches + + return matches + + def _truncate_repeater(self, matches, input_string): + if not matches: + return matches + + if not self._is_chain_start: + separator = input_string[0:matches[0].initiator.raw_start] + if separator: + return [] + + j = 1 + for i in range(0, len(matches) - 1): + separator = input_string[matches[i].initiator.raw_end: + matches[i + 1].initiator.raw_start] + if separator: + break + j += 1 + truncated = matches[:j] + if self.repeater_end is not None: + truncated = [m for m in truncated if m.match_index < self.repeater_end] + return truncated + + def _validate_repeater(self, matches): + max_match_index = -1 + if matches: + max_match_index = max([m.match_index for m in matches]) + if max_match_index + 1 < self.repeater_start: + raise _InvalidChainException + + def chain(self): + """ + Add patterns chain, using configuration from this chain + + :return: + :rtype: + """ + return self._chain.chain() + + def hidden(self, hidden=True): + """ + Hide chain part results from global chain result + + :param hidden: + :type hidden: + :return: + :rtype: + """ + self._hidden = hidden + return self + + @property + def is_hidden(self): + """ + Check if the chain part is hidden + :return: + :rtype: + """ + return self._hidden + + def regex(self, *pattern, **kwargs): + """ + Add re pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + return self._chain.regex(*pattern, **kwargs) + + def functional(self, *pattern, **kwargs): + """ + Add functional pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + return self._chain.functional(*pattern, **kwargs) + + def string(self, *pattern, **kwargs): + """ + Add string pattern + + :param pattern: + :type pattern: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + return self._chain.string(*pattern, **kwargs) + + def close(self): + """ + Close the chain builder to continue registering other patterns + + :return: + :rtype: + """ + return self._chain.close() + + def repeater(self, value): + """ + Define the repeater of the current chain part. + + :param value: + :type value: + :return: + :rtype: + """ + try: + value = int(value) + self.repeater_start = value + self.repeater_end = value + return self + except ValueError: + pass + if value == '+': + self.repeater_start = 1 + self.repeater_end = None + if value == '*': + self.repeater_start = 0 + self.repeater_end = None + elif value == '?': + self.repeater_start = 0 + self.repeater_end = 1 + else: + match = re.match(r'\{\s*(\d*)\s*,?\s*(\d*)\s*\}', value) + if match: + start = match.group(1) + end = match.group(2) + if start or end: + self.repeater_start = int(start) if start else 0 + self.repeater_end = int(end) if end else None + return self + + def __repr__(self): + return "%s({%s,%s})" % (self.pattern, self.repeater_start, self.repeater_end) diff --git a/lib/rebulk/debug.py b/lib/rebulk/debug.py new file mode 100755 index 00000000..2384b26e --- /dev/null +++ b/lib/rebulk/debug.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Debug tools. + +Can be configured by changing values of those variable. + +DEBUG = False +Enable this variable to activate debug features (like defined_at parameters). It can slow down Rebulk + +LOG_LEVEL = 0 +Default log level of generated rebulk logs. +""" + +import inspect +import logging +import os +from collections import namedtuple + + +DEBUG = False +LOG_LEVEL = logging.DEBUG + + +class Frame(namedtuple('Frame', ['lineno', 'package', 'name', 'filename'])): + """ + Stack frame representation. + """ + __slots__ = () + + def __repr__(self): + return "%s#L%s" % (os.path.basename(self.filename), self.lineno) + + +def defined_at(): + """ + Get definition location of a pattern or a match (outside of rebulk package). + :return: + :rtype: + """ + if DEBUG: + frame = inspect.currentframe() + while frame: + try: + if frame.f_globals['__package__'] != __package__: + break + except KeyError: # pragma:no cover + # If package is missing, consider we are in. Workaround for python 3.3. + break + frame = frame.f_back + ret = Frame(frame.f_lineno, + frame.f_globals.get('__package__'), + frame.f_globals.get('__name__'), + frame.f_code.co_filename) + del frame + return ret diff --git a/lib/rebulk/formatters.py b/lib/rebulk/formatters.py new file mode 100755 index 00000000..7175a54a --- /dev/null +++ b/lib/rebulk/formatters.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Formatter functions to use in patterns. + +All those function have last argument as match.value (str). +""" + + +def formatters(*chained_formatters): + """ + Chain formatter functions. + :param chained_formatters: + :type chained_formatters: + :return: + :rtype: + """ + + def formatters_chain(input_string): # pylint:disable=missing-docstring + for chained_formatter in chained_formatters: + input_string = chained_formatter(input_string) + return input_string + + return formatters_chain + + +def default_formatter(input_string): + """ + Default formatter + :param input_string: + :return: + """ + return input_string diff --git a/lib/rebulk/introspector.py b/lib/rebulk/introspector.py new file mode 100755 index 00000000..bfefcb75 --- /dev/null +++ b/lib/rebulk/introspector.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Introspect rebulk object to retrieve capabilities. +""" +from abc import ABCMeta, abstractmethod +from collections import defaultdict + +import six +from .pattern import StringPattern, RePattern, FunctionalPattern +from .utils import extend_safe + + +@six.add_metaclass(ABCMeta) +class Description(object): + """ + Abstract class for a description. + """ + @property + @abstractmethod + def properties(self): # pragma: no cover + """ + Properties of described object. + :return: all properties that described object can generate grouped by name. + :rtype: dict + """ + pass + + +class PatternDescription(Description): + """ + Description of a pattern. + """ + def __init__(self, pattern): # pylint:disable=too-many-branches + self.pattern = pattern + self._properties = defaultdict(list) + + if pattern.properties: + for key, values in pattern.properties.items(): + extend_safe(self._properties[key], values) + elif 'value' in pattern.match_options: + self._properties[pattern.name].append(pattern.match_options['value']) + elif isinstance(pattern, StringPattern): + extend_safe(self._properties[pattern.name], pattern.patterns) + elif isinstance(pattern, RePattern): + if pattern.name and pattern.name not in pattern.private_names: + extend_safe(self._properties[pattern.name], [None]) + if not pattern.private_children: + for regex_pattern in pattern.patterns: + for group_name, values in regex_pattern.groupindex.items(): + if group_name not in pattern.private_names: + extend_safe(self._properties[group_name], [None]) + elif isinstance(pattern, FunctionalPattern): + if pattern.name and pattern.name not in pattern.private_names: + extend_safe(self._properties[pattern.name], [None]) + + + @property + def properties(self): + """ + Properties for this rule. + :return: + :rtype: dict + """ + return self._properties + + +class RuleDescription(Description): + """ + Description of a rule. + """ + def __init__(self, rule): + self.rule = rule + + self._properties = defaultdict(list) + + if rule.properties: + for key, values in rule.properties.items(): + extend_safe(self._properties[key], values) + + @property + def properties(self): + """ + Properties for this rule. + :return: + :rtype: dict + """ + return self._properties + + +class Introspection(Description): + """ + Introspection results. + """ + def __init__(self, rebulk, context=None): + self.patterns = [PatternDescription(pattern) for pattern in rebulk.effective_patterns(context) + if not pattern.private and not pattern.marker] + self.rules = [RuleDescription(rule) for rule in rebulk.effective_rules(context)] + + @property + def properties(self): + """ + Properties for Introspection results. + :return: + :rtype: + """ + properties = defaultdict(list) + for pattern in self.patterns: + for key, values in pattern.properties.items(): + extend_safe(properties[key], values) + for rule in self.rules: + for key, values in rule.properties.items(): + extend_safe(properties[key], values) + return properties + + +def introspect(rebulk, context=None): + """ + Introspect a Rebulk instance to grab defined objects and properties that can be generated. + :param rebulk: + :type rebulk: Rebulk + :param context: + :type context: + :return: Introspection instance + :rtype: Introspection + """ + return Introspection(rebulk, context) diff --git a/lib/rebulk/loose.py b/lib/rebulk/loose.py new file mode 100755 index 00000000..5e747a88 --- /dev/null +++ b/lib/rebulk/loose.py @@ -0,0 +1,242 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Various utilities functions +""" + +import sys + +from inspect import isclass +try: + from inspect import getfullargspec as getargspec + + _fullargspec_supported = True +except ImportError: + _fullargspec_supported = False + from inspect import getargspec + +from .utils import is_iterable + +if sys.version_info < (3, 4, 0): # pragma: no cover + def _constructor(class_): + """ + Retrieves constructor from given class + + :param class_: + :type class_: class + :return: constructor from given class + :rtype: callable + """ + return class_.__init__ +else: # pragma: no cover + def _constructor(class_): + """ + Retrieves constructor from given class + + :param class_: + :type class_: class + :return: constructor from given class + :rtype: callable + """ + return class_ + + +def call(function, *args, **kwargs): + """ + Call a function or constructor with given args and kwargs after removing args and kwargs that doesn't match + function or constructor signature + + :param function: Function or constructor to call + :type function: callable + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: sale vakye as default function call + :rtype: object + """ + func = constructor_args if isclass(function) else function_args + call_args, call_kwargs = func(function, *args, **kwargs) + return function(*call_args, **call_kwargs) + + +def function_args(callable_, *args, **kwargs): + """ + Return (args, kwargs) matching the function signature + + :param callable: callable to inspect + :type callable: callable + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: (args, kwargs) matching the function signature + :rtype: tuple + """ + argspec = getargspec(callable_) # pylint:disable=deprecated-method + return argspec_args(argspec, False, *args, **kwargs) + + +def constructor_args(class_, *args, **kwargs): + """ + Return (args, kwargs) matching the function signature + + :param callable: callable to inspect + :type callable: Callable + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: (args, kwargs) matching the function signature + :rtype: tuple + """ + argspec = getargspec(_constructor(class_)) # pylint:disable=deprecated-method + return argspec_args(argspec, True, *args, **kwargs) + + +def argspec_args(argspec, constructor, *args, **kwargs): + """ + Return (args, kwargs) matching the argspec object + + :param argspec: argspec to use + :type argspec: argspec + :param constructor: is it a constructor ? + :type constructor: bool + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: (args, kwargs) matching the function signature + :rtype: tuple + """ + if argspec.varkw: + call_kwarg = kwargs + else: + call_kwarg = dict((k, kwargs[k]) for k in kwargs if k in argspec.args) # Python 2.6 dict comprehension + if argspec.varargs: + call_args = args + else: + call_args = args[:len(argspec.args) - (1 if constructor else 0)] + return call_args, call_kwarg + + +if not _fullargspec_supported: + def argspec_args_legacy(argspec, constructor, *args, **kwargs): + """ + Return (args, kwargs) matching the argspec object + + :param argspec: argspec to use + :type argspec: argspec + :param constructor: is it a constructor ? + :type constructor: bool + :param args: + :type args: + :param kwargs: + :type kwargs: + :return: (args, kwargs) matching the function signature + :rtype: tuple + """ + if argspec.keywords: + call_kwarg = kwargs + else: + call_kwarg = dict((k, kwargs[k]) for k in kwargs if k in argspec.args) # Python 2.6 dict comprehension + if argspec.varargs: + call_args = args + else: + call_args = args[:len(argspec.args) - (1 if constructor else 0)] + return call_args, call_kwarg + + + argspec_args = argspec_args_legacy + + +def ensure_list(param): + """ + Retrieves a list from given parameter. + + :param param: + :type param: + :return: + :rtype: + """ + if not param: + param = [] + elif not is_iterable(param): + param = [param] + return param + + +def ensure_dict(param, default_value, default_key=None): + """ + Retrieves a dict and a default value from given parameter. + + if parameter is not a dict, it will be promoted as the default value. + + :param param: + :type param: + :param default_value: + :type default_value: + :param default_key: + :type default_key: + :return: + :rtype: + """ + if not param: + param = default_value + if not isinstance(param, dict): + if param: + default_value = param + return {default_key: param}, default_value + return param, default_value + + +def filter_index(collection, predicate=None, index=None): + """ + Filter collection with predicate function and index. + + If index is not found, returns None. + :param collection: + :type collection: collection supporting iteration and slicing + :param predicate: function to filter the collection with + :type predicate: function + :param index: position of a single element to retrieve + :type index: int + :return: filtered list, or single element of filtered list if index is defined + :rtype: list or object + """ + if index is None and isinstance(predicate, int): + index = predicate + predicate = None + if predicate: + collection = collection.__class__(filter(predicate, collection)) + if index is not None: + try: + collection = collection[index] + except IndexError: + collection = None + return collection + + +def set_defaults(defaults, kwargs, override=False): + """ + Set defaults from defaults dict to kwargs dict + + :param override: + :type override: + :param defaults: + :type defaults: + :param kwargs: + :type kwargs: + :return: + :rtype: + """ + if 'clear' in defaults.keys() and defaults.pop('clear'): + kwargs.clear() + for key, value in defaults.items(): + if key in kwargs: + if isinstance(value, list) and isinstance(kwargs[key], list): + kwargs[key] = list(value) + kwargs[key] + elif isinstance(value, dict) and isinstance(kwargs[key], dict): + set_defaults(value, kwargs[key]) + if key not in kwargs or override: + kwargs[key] = value diff --git a/lib/rebulk/match.py b/lib/rebulk/match.py new file mode 100755 index 00000000..d8e72df4 --- /dev/null +++ b/lib/rebulk/match.py @@ -0,0 +1,890 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Classes and functions related to matches +""" +import copy +import itertools +from collections import defaultdict +try: + from collections.abc import MutableSequence +except ImportError: + from collections import MutableSequence + +try: + from collections import OrderedDict # pylint:disable=ungrouped-imports +except ImportError: # pragma: no cover + from ordereddict import OrderedDict # pylint:disable=import-error +import six + +from .loose import ensure_list, filter_index +from .utils import is_iterable +from .debug import defined_at + + +class MatchesDict(OrderedDict): + """ + A custom dict with matches property. + """ + + def __init__(self): + super(MatchesDict, self).__init__() + self.matches = defaultdict(list) + self.values_list = defaultdict(list) + + +class _BaseMatches(MutableSequence): + """ + A custom list[Match] that automatically maintains name, tag, start and end lookup structures. + """ + _base = list + _base_add = _base.append + _base_remove = _base.remove + _base_extend = _base.extend + + def __init__(self, matches=None, input_string=None): # pylint: disable=super-init-not-called + self.input_string = input_string + self._max_end = 0 + self._delegate = [] + self.__name_dict = None + self.__tag_dict = None + self.__start_dict = None + self.__end_dict = None + self.__index_dict = None + if matches: + self.extend(matches) + + @property + def _name_dict(self): + if self.__name_dict is None: + self.__name_dict = defaultdict(_BaseMatches._base) + for name, values in itertools.groupby([m for m in self._delegate if m.name], lambda item: item.name): + _BaseMatches._base_extend(self.__name_dict[name], values) + + return self.__name_dict + + @property + def _start_dict(self): + if self.__start_dict is None: + self.__start_dict = defaultdict(_BaseMatches._base) + for start, values in itertools.groupby([m for m in self._delegate], lambda item: item.start): + _BaseMatches._base_extend(self.__start_dict[start], values) + + return self.__start_dict + + @property + def _end_dict(self): + if self.__end_dict is None: + self.__end_dict = defaultdict(_BaseMatches._base) + for start, values in itertools.groupby([m for m in self._delegate], lambda item: item.end): + _BaseMatches._base_extend(self.__end_dict[start], values) + + return self.__end_dict + + @property + def _tag_dict(self): + if self.__tag_dict is None: + self.__tag_dict = defaultdict(_BaseMatches._base) + for match in self._delegate: + for tag in match.tags: + _BaseMatches._base_add(self.__tag_dict[tag], match) + + return self.__tag_dict + + @property + def _index_dict(self): + if self.__index_dict is None: + self.__index_dict = defaultdict(_BaseMatches._base) + for match in self._delegate: + for index in range(*match.span): + _BaseMatches._base_add(self.__index_dict[index], match) + + return self.__index_dict + + def _add_match(self, match): + """ + Add a match + :param match: + :type match: Match + """ + if self.__name_dict is not None: + if match.name: + _BaseMatches._base_add(self._name_dict[match.name], (match)) + if self.__tag_dict is not None: + for tag in match.tags: + _BaseMatches._base_add(self._tag_dict[tag], match) + if self.__start_dict is not None: + _BaseMatches._base_add(self._start_dict[match.start], match) + if self.__end_dict is not None: + _BaseMatches._base_add(self._end_dict[match.end], match) + if self.__index_dict is not None: + for index in range(*match.span): + _BaseMatches._base_add(self._index_dict[index], match) + if match.end > self._max_end: + self._max_end = match.end + + def _remove_match(self, match): + """ + Remove a match + :param match: + :type match: Match + """ + if self.__name_dict is not None: + if match.name: + _BaseMatches._base_remove(self._name_dict[match.name], match) + if self.__tag_dict is not None: + for tag in match.tags: + _BaseMatches._base_remove(self._tag_dict[tag], match) + if self.__start_dict is not None: + _BaseMatches._base_remove(self._start_dict[match.start], match) + if self.__end_dict is not None: + _BaseMatches._base_remove(self._end_dict[match.end], match) + if self.__index_dict is not None: + for index in range(*match.span): + _BaseMatches._base_remove(self._index_dict[index], match) + if match.end >= self._max_end and not self._end_dict[match.end]: + self._max_end = max(self._end_dict.keys()) + + def previous(self, match, predicate=None, index=None): + """ + Retrieves the nearest previous matches. + :param match: + :type match: + :param predicate: + :type predicate: + :param index: + :type index: int + :return: + :rtype: + """ + current = match.start + while current > -1: + previous_matches = self.ending(current) + if previous_matches: + return filter_index(previous_matches, predicate, index) + current -= 1 + return filter_index(_BaseMatches._base(), predicate, index) + + def next(self, match, predicate=None, index=None): + """ + Retrieves the nearest next matches. + :param match: + :type match: + :param predicate: + :type predicate: + :param index: + :type index: int + :return: + :rtype: + """ + current = match.start + 1 + while current <= self._max_end: + next_matches = self.starting(current) + if next_matches: + return filter_index(next_matches, predicate, index) + current += 1 + return filter_index(_BaseMatches._base(), predicate, index) + + def named(self, name, predicate=None, index=None): + """ + Retrieves a set of Match objects that have the given name. + :param name: + :type name: str + :param predicate: + :type predicate: + :param index: + :type index: int + :return: set of matches + :rtype: set[Match] + """ + return filter_index(_BaseMatches._base(self._name_dict[name]), predicate, index) + + def tagged(self, tag, predicate=None, index=None): + """ + Retrieves a set of Match objects that have the given tag defined. + :param tag: + :type tag: str + :param predicate: + :type predicate: + :param index: + :type index: int + :return: set of matches + :rtype: set[Match] + """ + return filter_index(_BaseMatches._base(self._tag_dict[tag]), predicate, index) + + def starting(self, start, predicate=None, index=None): + """ + Retrieves a set of Match objects that starts at given index. + :param start: the starting index + :type start: int + :param predicate: + :type predicate: + :param index: + :type index: int + :return: set of matches + :rtype: set[Match] + """ + return filter_index(_BaseMatches._base(self._start_dict[start]), predicate, index) + + def ending(self, end, predicate=None, index=None): + """ + Retrieves a set of Match objects that ends at given index. + :param end: the ending index + :type end: int + :param predicate: + :type predicate: + :return: set of matches + :rtype: set[Match] + """ + return filter_index(_BaseMatches._base(self._end_dict[end]), predicate, index) + + def range(self, start=0, end=None, predicate=None, index=None): + """ + Retrieves a set of Match objects that are available in given range, sorted from start to end. + :param start: the starting index + :type start: int + :param end: the ending index + :type end: int + :param predicate: + :type predicate: + :param index: + :type index: int + :return: set of matches + :rtype: set[Match] + """ + if end is None: + end = self.max_end + else: + end = min(self.max_end, end) + ret = _BaseMatches._base() + for match in sorted(self): + if match.start < end and match.end > start: + ret.append(match) + return filter_index(ret, predicate, index) + + def chain_before(self, position, seps, start=0, predicate=None, index=None): + """ + Retrieves a list of chained matches, before position, matching predicate and separated by characters from seps + only. + :param position: + :type position: + :param seps: + :type seps: + :param start: + :type start: + :param predicate: + :type predicate: + :param index: + :type index: + :return: + :rtype: + """ + if hasattr(position, 'start'): + position = position.start + + chain = _BaseMatches._base() + position = min(self.max_end, position) + + for i in reversed(range(start, position)): + index_matches = self.at_index(i) + filtered_matches = [index_match for index_match in index_matches if not predicate or predicate(index_match)] + if filtered_matches: + for chain_match in filtered_matches: + if chain_match not in chain: + chain.append(chain_match) + elif self.input_string[i] not in seps: + break + + return filter_index(chain, predicate, index) + + def chain_after(self, position, seps, end=None, predicate=None, index=None): + """ + Retrieves a list of chained matches, after position, matching predicate and separated by characters from seps + only. + :param position: + :type position: + :param seps: + :type seps: + :param end: + :type end: + :param predicate: + :type predicate: + :param index: + :type index: + :return: + :rtype: + """ + if hasattr(position, 'end'): + position = position.end + chain = _BaseMatches._base() + + if end is None: + end = self.max_end + else: + end = min(self.max_end, end) + + for i in range(position, end): + index_matches = self.at_index(i) + filtered_matches = [index_match for index_match in index_matches if not predicate or predicate(index_match)] + if filtered_matches: + for chain_match in filtered_matches: + if chain_match not in chain: + chain.append(chain_match) + elif self.input_string[i] not in seps: + break + + return filter_index(chain, predicate, index) + + @property + def max_end(self): + """ + Retrieves the maximum index. + :return: + """ + return max(len(self.input_string), self._max_end) if self.input_string else self._max_end + + def _hole_start(self, position, ignore=None): + """ + Retrieves the start of hole index from position. + :param position: + :type position: + :param ignore: + :type ignore: + :return: + :rtype: + """ + for lindex in reversed(range(0, position)): + for starting in self.starting(lindex): + if not ignore or not ignore(starting): + return lindex + return 0 + + def _hole_end(self, position, ignore=None): + """ + Retrieves the end of hole index from position. + :param position: + :type position: + :param ignore: + :type ignore: + :return: + :rtype: + """ + for rindex in range(position, self.max_end): + for starting in self.starting(rindex): + if not ignore or not ignore(starting): + return rindex + return self.max_end + + def holes(self, start=0, end=None, formatter=None, ignore=None, seps=None, predicate=None, + index=None): # pylint: disable=too-many-branches,too-many-locals + """ + Retrieves a set of Match objects that are not defined in given range. + :param start: + :type start: + :param end: + :type end: + :param formatter: + :type formatter: + :param ignore: + :type ignore: + :param seps: + :type seps: + :param predicate: + :type predicate: + :param index: + :type index: + :return: + :rtype: + """ + assert self.input_string if seps else True, "input_string must be defined when using seps parameter" + if end is None: + end = self.max_end + else: + end = min(self.max_end, end) + ret = _BaseMatches._base() + hole = False + rindex = start + + loop_start = self._hole_start(start, ignore) + + for rindex in range(loop_start, end): + current = [] + for at_index in self.at_index(rindex): + if not ignore or not ignore(at_index): + current.append(at_index) + + if seps and hole and self.input_string and self.input_string[rindex] in seps: + hole = False + ret[-1].end = rindex + else: + if not current and not hole: + # Open a new hole match + hole = True + ret.append(Match(max(rindex, start), None, input_string=self.input_string, formatter=formatter)) + elif current and hole: + # Close current hole match + hole = False + ret[-1].end = rindex + + if ret and hole: + # go the the next starting element ... + ret[-1].end = min(self._hole_end(rindex, ignore), end) + return filter_index(ret, predicate, index) + + def conflicting(self, match, predicate=None, index=None): + """ + Retrieves a list of ``Match`` objects that conflicts with given match. + :param match: + :type match: + :param predicate: + :type predicate: + :param index: + :type index: + :return: + :rtype: + """ + ret = _BaseMatches._base() + + for i in range(*match.span): + for at_match in self.at_index(i): + if at_match not in ret: + ret.append(at_match) + + ret.remove(match) + + return filter_index(ret, predicate, index) + + def at_match(self, match, predicate=None, index=None): + """ + Retrieves a list of matches from given match. + """ + return self.at_span(match.span, predicate, index) + + def at_span(self, span, predicate=None, index=None): + """ + Retrieves a list of matches from given (start, end) tuple. + """ + starting = self._index_dict[span[0]] + ending = self._index_dict[span[1] - 1] + + merged = list(starting) + for marker in ending: + if marker not in merged: + merged.append(marker) + + return filter_index(merged, predicate, index) + + def at_index(self, pos, predicate=None, index=None): + """ + Retrieves a list of matches from given position + """ + return filter_index(self._index_dict[pos], predicate, index) + + @property + def names(self): + """ + Retrieve all names. + :return: + """ + return self._name_dict.keys() + + @property + def tags(self): + """ + Retrieve all tags. + :return: + """ + return self._tag_dict.keys() + + def to_dict(self, details=False, first_value=False, enforce_list=False): + """ + Converts matches to a dict object. + :param details if True, values will be complete Match object, else it will be only string Match.value property + :type details: bool + :param first_value if True, only the first value will be kept. Else, multiple values will be set as a list in + the dict. + :type first_value: bool + :param enforce_list: if True, value is wrapped in a list even when a single value is found. Else, list values + are available under `values_list` property of the returned dict object. + :type enforce_list: bool + :return: + :rtype: dict + """ + ret = MatchesDict() + for match in sorted(self): + value = match if details else match.value + ret.matches[match.name].append(match) + if not enforce_list and value not in ret.values_list[match.name]: + ret.values_list[match.name].append(value) + if match.name in ret.keys(): + if not first_value: + if not isinstance(ret[match.name], list): + if ret[match.name] == value: + continue + ret[match.name] = [ret[match.name]] + else: + if value in ret[match.name]: + continue + ret[match.name].append(value) + else: + if enforce_list and not isinstance(value, list): + ret[match.name] = [value] + else: + ret[match.name] = value + return ret + + if six.PY2: # pragma: no cover + def clear(self): + """ + Python 3 backport + """ + del self[:] + + def __len__(self): + return len(self._delegate) + + def __getitem__(self, index): + ret = self._delegate[index] + if isinstance(ret, list): + return Matches(ret) + return ret + + def __setitem__(self, index, match): + self._delegate[index] = match + if isinstance(index, slice): + for match_item in match: + self._add_match(match_item) + return + self._add_match(match) + + def __delitem__(self, index): + match = self._delegate[index] + del self._delegate[index] + if isinstance(match, list): + # if index is a slice, we has a match list + for match_item in match: + self._remove_match(match_item) + else: + self._remove_match(match) + + def __repr__(self): + return self._delegate.__repr__() + + def insert(self, index, value): + self._delegate.insert(index, value) + self._add_match(value) + + +class Matches(_BaseMatches): + """ + A custom list[Match] contains matches list. + """ + + def __init__(self, matches=None, input_string=None): + self.markers = Markers(input_string=input_string) + super(Matches, self).__init__(matches=matches, input_string=input_string) + + def _add_match(self, match): + assert not match.marker, "A marker match should not be added to <Matches> object" + super(Matches, self)._add_match(match) + + +class Markers(_BaseMatches): + """ + A custom list[Match] containing markers list. + """ + + def __init__(self, matches=None, input_string=None): + super(Markers, self).__init__(matches=None, input_string=input_string) + + def _add_match(self, match): + assert match.marker, "A non-marker match should not be added to <Markers> object" + super(Markers, self)._add_match(match) + + +class Match(object): + """ + Object storing values related to a single match + """ + + def __init__(self, start, end, value=None, name=None, tags=None, marker=None, parent=None, private=None, + pattern=None, input_string=None, formatter=None, conflict_solver=None, **kwargs): + # pylint: disable=unused-argument + self.start = start + self.end = end + self.name = name + self._value = value + self.tags = ensure_list(tags) + self.marker = marker + self.parent = parent + self.input_string = input_string + self.formatter = formatter + self.pattern = pattern + self.private = private + self.conflict_solver = conflict_solver + self._children = None + self._raw_start = None + self._raw_end = None + self.defined_at = pattern.defined_at if pattern else defined_at() + + @property + def span(self): + """ + 2-tuple with start and end indices of the match + """ + return self.start, self.end + + @property + def children(self): + """ + Children matches. + """ + if self._children is None: + self._children = Matches(None, self.input_string) + return self._children + + @children.setter + def children(self, value): + self._children = value + + @property + def value(self): + """ + Get the value of the match, using formatter if defined. + :return: + :rtype: + """ + if self._value: + return self._value + if self.formatter: + return self.formatter(self.raw) + return self.raw + + @value.setter + def value(self, value): + """ + Set the value (hardcode) + :param value: + :type value: + :return: + :rtype: + """ + self._value = value # pylint: disable=attribute-defined-outside-init + + @property + def names(self): + """ + Get all names of children + :return: + :rtype: + """ + if not self.children: + return set([self.name]) + ret = set() + for child in self.children: + for name in child.names: + ret.add(name) + return ret + + @property + def raw_start(self): + """ + start index of raw value + :return: + :rtype: + """ + if self._raw_start is None: + return self.start + return self._raw_start + + @raw_start.setter + def raw_start(self, value): + """ + Set start index of raw value + :return: + :rtype: + """ + self._raw_start = value + + @property + def raw_end(self): + """ + end index of raw value + :return: + :rtype: + """ + if self._raw_end is None: + return self.end + return self._raw_end + + @raw_end.setter + def raw_end(self, value): + """ + Set end index of raw value + :return: + :rtype: + """ + self._raw_end = value + + @property + def raw(self): + """ + Get the raw value of the match, without using hardcoded value nor formatter. + :return: + :rtype: + """ + if self.input_string: + return self.input_string[self.raw_start:self.raw_end] + return None + + @property + def initiator(self): + """ + Retrieve the initiator parent of a match + :param match: + :type match: + :return: + :rtype: + """ + match = self + while match.parent: + match = match.parent + return match + + def crop(self, crops, predicate=None, index=None): + """ + crop the match with given Match objects or spans tuples + :param crops: + :type crops: list or object + :return: a list of Match objects + :rtype: list[Match] + """ + if not is_iterable(crops) or len(crops) == 2 and isinstance(crops[0], int): + crops = [crops] + initial = copy.deepcopy(self) + ret = [initial] + for crop in crops: + if hasattr(crop, 'span'): + start, end = crop.span + else: + start, end = crop + for current in list(ret): + if start <= current.start and end >= current.end: + # self is included in crop, remove current ... + ret.remove(current) + elif start >= current.start and end <= current.end: + # crop is included in self, split current ... + right = copy.deepcopy(current) + current.end = start + if not current: + ret.remove(current) + right.start = end + if right: + ret.append(right) + elif current.end >= end > current.start: + current.start = end + elif current.start <= start < current.end: + current.end = start + return filter_index(ret, predicate, index) + + def split(self, seps, predicate=None, index=None): + """ + Split this match in multiple matches using given separators. + :param seps: + :type seps: string containing separator characters + :return: list of new Match objects + :rtype: list + """ + split_match = copy.deepcopy(self) + current_match = split_match + ret = [] + + for i in range(0, len(self.raw)): + if self.raw[i] in seps: + if not split_match: + split_match = copy.deepcopy(current_match) + current_match.end = self.start + i + + else: + if split_match: + split_match.start = self.start + i + current_match = split_match + ret.append(split_match) + split_match = None + + return filter_index(ret, predicate, index) + + def tagged(self, *tags): + """ + Check if this match has at least one of the provided tags + + :param tags: + :return: True if at least one tag is defined, False otherwise. + """ + return any(tag in self.tags for tag in tags) + + def named(self, *names): + """ + Check if one of the children match has one of the provided name + + :param names: + :return: True if at least one child is named with a given name is defined, False otherwise. + """ + return any(name in self.names for name in names) + + def __len__(self): + return self.end - self.start + + def __hash__(self): + return hash(Match) + hash(self.start) + hash(self.end) + hash(self.value) + + def __eq__(self, other): + if isinstance(other, Match): + return self.span == other.span and self.value == other.value and self.name == other.name and \ + self.parent == other.parent + return NotImplemented + + def __ne__(self, other): + if isinstance(other, Match): + return self.span != other.span or self.value != other.value or self.name != other.name or \ + self.parent != other.parent + return NotImplemented + + def __lt__(self, other): + if isinstance(other, Match): + return self.span < other.span + return NotImplemented + + def __gt__(self, other): + if isinstance(other, Match): + return self.span > other.span + return NotImplemented + + def __le__(self, other): + if isinstance(other, Match): + return self.span <= other.span + return NotImplemented + + def __ge__(self, other): + if isinstance(other, Match): + return self.span >= other.span + return NotImplemented + + def __repr__(self): + flags = "" + name = "" + tags = "" + defined = "" + initiator = "" + if self.initiator.value != self.value: + initiator = "+initiator=" + self.initiator.value + if self.private: + flags += '+private' + if self.name: + name = "+name=%s" % (self.name,) + if self.tags: + tags = "+tags=%s" % (self.tags,) + if self.defined_at: + defined += "@%s" % (self.defined_at,) + return "<%s:%s%s%s%s%s%s>" % (self.value, self.span, flags, name, tags, initiator, defined) diff --git a/lib/rebulk/pattern.py b/lib/rebulk/pattern.py new file mode 100755 index 00000000..beb8b273 --- /dev/null +++ b/lib/rebulk/pattern.py @@ -0,0 +1,559 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Abstract pattern class definition along with various implementations (regexp, string, functional) +""" +# pylint: disable=super-init-not-called,wrong-import-position + +from abc import ABCMeta, abstractmethod, abstractproperty + +import six + +from . import debug +from .formatters import default_formatter +from .loose import call, ensure_list, ensure_dict +from .match import Match +from .remodule import re, REGEX_AVAILABLE +from .utils import find_all, is_iterable, get_first_defined +from .validators import allways_true + + +@six.add_metaclass(ABCMeta) +class BasePattern(object): + """ + Base class for Pattern like objects + """ + + @abstractmethod + def matches(self, input_string, context=None, with_raw_matches=False): + """ + Computes all matches for a given input + + :param input_string: the string to parse + :type input_string: str + :param context: the context + :type context: dict + :param with_raw_matches: should return details + :type with_raw_matches: dict + :return: matches based on input_string for this pattern + :rtype: iterator[Match] + """ + pass + + +@six.add_metaclass(ABCMeta) +class Pattern(BasePattern): + """ + Definition of a particular pattern to search for. + """ + + def __init__(self, name=None, tags=None, formatter=None, value=None, validator=None, children=False, every=False, + private_parent=False, private_children=False, private=False, private_names=None, ignore_names=None, + marker=False, format_all=False, validate_all=False, disabled=lambda context: False, log_level=None, + properties=None, post_processor=None, pre_match_processor=None, post_match_processor=None, **kwargs): + """ + :param name: Name of this pattern + :type name: str + :param tags: List of tags related to this pattern + :type tags: list[str] + :param formatter: dict (name, func) of formatter to use with this pattern. name is the match name to support, + and func a function(input_string) that returns the formatted string. A single formatter function can also be + passed as a shortcut for {None: formatter}. The returned formatted string with be set in Match.value property. + :type formatter: dict[str, func] || func + :param value: dict (name, value) of value to use with this pattern. name is the match name to support, + and value an object for the match value. A single object value can also be + passed as a shortcut for {None: value}. The value with be set in Match.value property. + :type value: dict[str, object] || object + :param validator: dict (name, func) of validator to use with this pattern. name is the match name to support, + and func a function(match) that returns the a boolean. A single validator function can also be + passed as a shortcut for {None: validator}. If return value is False, match will be ignored. + :param children: generates children instead of parent + :type children: bool + :param every: generates both parent and children. + :type every: bool + :param private: flag this pattern as beeing private. + :type private: bool + :param private_parent: force return of parent and flag parent matches as private. + :type private_parent: bool + :param private_children: force return of children and flag children matches as private. + :type private_children: bool + :param private_names: force return of named matches as private. + :type private_names: bool + :param ignore_names: drop some named matches after validation. + :type ignore_names: bool + :param marker: flag this pattern as beeing a marker. + :type private: bool + :param format_all if True, pattern will format every match in the hierarchy (even match not yield). + :type format_all: bool + :param validate_all if True, pattern will validate every match in the hierarchy (even match not yield). + :type validate_all: bool + :param disabled: if True, this pattern is disabled. Can also be a function(context). + :type disabled: bool|function + :param log_lvl: Log level associated to this pattern + :type log_lvl: int + :param post_processor: Post processing function + :type post_processor: func + :param pre_match_processor: Pre match processing function + :type pre_match_processor: func + :param post_match_processor: Post match processing function + :type post_match_processor: func + """ + # pylint:disable=too-many-locals,unused-argument + self.name = name + self.tags = ensure_list(tags) + self.formatters, self._default_formatter = ensure_dict(formatter, default_formatter) + self.values, self._default_value = ensure_dict(value, None) + self.validators, self._default_validator = ensure_dict(validator, allways_true) + self.every = every + self.children = children + self.private = private + self.private_names = private_names if private_names else [] + self.ignore_names = ignore_names if ignore_names else [] + self.private_parent = private_parent + self.private_children = private_children + self.marker = marker + self.format_all = format_all + self.validate_all = validate_all + if not callable(disabled): + self.disabled = lambda context: disabled + else: + self.disabled = disabled + self._log_level = log_level + self._properties = properties + self.defined_at = debug.defined_at() + if not callable(post_processor): + self.post_processor = None + else: + self.post_processor = post_processor + if not callable(pre_match_processor): + self.pre_match_processor = None + else: + self.pre_match_processor = pre_match_processor + if not callable(post_match_processor): + self.post_match_processor = None + else: + self.post_match_processor = post_match_processor + + @property + def log_level(self): + """ + Log level for this pattern. + :return: + :rtype: + """ + return self._log_level if self._log_level is not None else debug.LOG_LEVEL + + def matches(self, input_string, context=None, with_raw_matches=False): + """ + Computes all matches for a given input + + :param input_string: the string to parse + :type input_string: str + :param context: the context + :type context: dict + :param with_raw_matches: should return details + :type with_raw_matches: dict + :return: matches based on input_string for this pattern + :rtype: iterator[Match] + """ + # pylint: disable=too-many-branches + + matches = [] + raw_matches = [] + + for pattern in self.patterns: + match_index = 0 + for match in self._match(pattern, input_string, context): + raw_matches.append(match) + matches.extend(self._process_matches(match, match_index)) + match_index += 1 + + matches = self._post_process_matches(matches) + + if with_raw_matches: + return matches, raw_matches + return matches + + @property + def _should_include_children(self): + """ + Check if children matches from this pattern should be included in matches results. + :param match: + :type match: + :return: + :rtype: + """ + return self.children or self.every + + @property + def _should_include_parent(self): + """ + Check is a match from this pattern should be included in matches results. + :param match: + :type match: + :return: + :rtype: + """ + return not self.children or self.every + + @staticmethod + def _match_config_property_keys(match, child=False): + if match.name: + yield match.name + if child: + yield '__children__' + else: + yield '__parent__' + yield None + + @staticmethod + def _process_match_index(match, match_index): + """ + Process match index from this pattern process state. + + :param match: + :return: + """ + match.match_index = match_index + + def _process_match_private(self, match, child=False): + """ + Process match privacy from this pattern configuration. + + :param match: + :param child: + :return: + """ + + if match.name and match.name in self.private_names or \ + not child and self.private_parent or \ + child and self.private_children: + match.private = True + + def _process_match_value(self, match, child=False): + """ + Process match value from this pattern configuration. + :param match: + :return: + """ + keys = self._match_config_property_keys(match, child=child) + pattern_value = get_first_defined(self.values, keys, self._default_value) + if pattern_value: + match.value = pattern_value + + def _process_match_formatter(self, match, child=False): + """ + Process match formatter from this pattern configuration. + + :param match: + :return: + """ + included = self._should_include_children if child else self._should_include_parent + if included or self.format_all: + keys = self._match_config_property_keys(match, child=child) + match.formatter = get_first_defined(self.formatters, keys, self._default_formatter) + + def _process_match_validator(self, match, child=False): + """ + Process match validation from this pattern configuration. + + :param match: + :return: True if match is validated by the configured validator, False otherwise. + """ + included = self._should_include_children if child else self._should_include_parent + if included or self.validate_all: + keys = self._match_config_property_keys(match, child=child) + validator = get_first_defined(self.validators, keys, self._default_validator) + if validator and not validator(match): + return False + return True + + def _process_match(self, match, match_index, child=False): + """ + Process match from this pattern by setting all properties from defined configuration + (index, private, value, formatter, validator, ...). + + :param match: + :type match: + :return: True if match is validated by the configured validator, False otherwise. + :rtype: + """ + self._process_match_index(match, match_index) + self._process_match_private(match, child) + self._process_match_value(match, child) + self._process_match_formatter(match, child) + return self._process_match_validator(match, child) + + @staticmethod + def _process_match_processor(match, processor): + if processor: + ret = processor(match) + if ret is not None: + return ret + return match + + def _process_matches(self, match, match_index): + """ + Process and generate all matches for the given unprocessed match. + :param match: + :param match_index: + :return: Process and dispatched matches. + """ + match = self._process_match_processor(match, self.pre_match_processor) + if not match: + return + + if not self._process_match(match, match_index): + return + + for child in match.children: + if not self._process_match(child, match_index, child=True): + return + + match = self._process_match_processor(match, self.post_match_processor) + if not match: + return + + if (self._should_include_parent or self.private_parent) and match.name not in self.ignore_names: + yield match + if self._should_include_children or self.private_children: + children = [x for x in match.children if x.name not in self.ignore_names] + for child in children: + yield child + + def _post_process_matches(self, matches): + """ + Post process matches with user defined function + :param matches: + :type matches: + :return: + :rtype: + """ + if self.post_processor: + return self.post_processor(matches, self) + return matches + + @abstractproperty + def patterns(self): # pragma: no cover + """ + List of base patterns defined + + :return: A list of base patterns + :rtype: list + """ + pass + + @property + def properties(self): + """ + Properties names and values that can ben retrieved by this pattern. + :return: + :rtype: + """ + if self._properties: + return self._properties + return {} + + @abstractproperty + def match_options(self): # pragma: no cover + """ + dict of default options for generated Match objects + + :return: **options to pass to Match constructor + :rtype: dict + """ + pass + + @abstractmethod + def _match(self, pattern, input_string, context=None): # pragma: no cover + """ + Computes all unprocess matches for a given pattern and input. + + :param pattern: the pattern to use + :param input_string: the string to parse + :type input_string: str + :param context: the context + :type context: dict + :return: matches based on input_string for this pattern + :rtype: iterator[Match] + """ + pass + + def __repr__(self): + defined = "" + if self.defined_at: + defined = "@%s" % (self.defined_at,) + return "<%s%s:%s>" % (self.__class__.__name__, defined, self.__repr__patterns__) + + @property + def __repr__patterns__(self): + return self.patterns + + +class StringPattern(Pattern): + """ + Definition of one or many strings to search for. + """ + + def __init__(self, *patterns, **kwargs): + super(StringPattern, self).__init__(**kwargs) + self._patterns = patterns + self._kwargs = kwargs + self._match_kwargs = filter_match_kwargs(kwargs) + + @property + def patterns(self): + return self._patterns + + @property + def match_options(self): + return self._match_kwargs + + def _match(self, pattern, input_string, context=None): + for index in find_all(input_string, pattern, **self._kwargs): + match = Match(index, index + len(pattern), pattern=self, input_string=input_string, **self._match_kwargs) + if match: + yield match + + +class RePattern(Pattern): + """ + Definition of one or many regular expression pattern to search for. + """ + + def __init__(self, *patterns, **kwargs): + super(RePattern, self).__init__(**kwargs) + self.repeated_captures = REGEX_AVAILABLE + if 'repeated_captures' in kwargs: + self.repeated_captures = kwargs.get('repeated_captures') + if self.repeated_captures and not REGEX_AVAILABLE: # pragma: no cover + raise NotImplementedError("repeated_capture is available only with regex module.") + self.abbreviations = kwargs.get('abbreviations', []) + self._kwargs = kwargs + self._match_kwargs = filter_match_kwargs(kwargs) + self._children_match_kwargs = filter_match_kwargs(kwargs, children=True) + self._patterns = [] + for pattern in patterns: + if isinstance(pattern, six.string_types): + if self.abbreviations and pattern: + for key, replacement in self.abbreviations: + pattern = pattern.replace(key, replacement) + pattern = call(re.compile, pattern, **self._kwargs) + elif isinstance(pattern, dict): + if self.abbreviations and 'pattern' in pattern: + for key, replacement in self.abbreviations: + pattern['pattern'] = pattern['pattern'].replace(key, replacement) + pattern = re.compile(**pattern) + elif hasattr(pattern, '__iter__'): + pattern = re.compile(*pattern) + self._patterns.append(pattern) + + @property + def patterns(self): + return self._patterns + + @property + def __repr__patterns__(self): + return [pattern.pattern for pattern in self.patterns] + + @property + def match_options(self): + return self._match_kwargs + + def _match(self, pattern, input_string, context=None): + names = dict((v, k) for k, v in pattern.groupindex.items()) + for match_object in pattern.finditer(input_string): + start = match_object.start() + end = match_object.end() + main_match = Match(start, end, pattern=self, input_string=input_string, **self._match_kwargs) + + if pattern.groups: + for i in range(1, pattern.groups + 1): + name = names.get(i, main_match.name) + if self.repeated_captures: + for start, end in match_object.spans(i): + child_match = Match(start, end, name=name, parent=main_match, pattern=self, + input_string=input_string, **self._children_match_kwargs) + if child_match: + main_match.children.append(child_match) + else: + start, end = match_object.span(i) + if start > -1 and end > -1: + child_match = Match(start, end, name=name, parent=main_match, pattern=self, + input_string=input_string, **self._children_match_kwargs) + if child_match: + main_match.children.append(child_match) + + if main_match: + yield main_match + + +class FunctionalPattern(Pattern): + """ + Definition of one or many functional pattern to search for. + """ + + def __init__(self, *patterns, **kwargs): + super(FunctionalPattern, self).__init__(**kwargs) + self._patterns = patterns + self._kwargs = kwargs + self._match_kwargs = filter_match_kwargs(kwargs) + + @property + def patterns(self): + return self._patterns + + @property + def match_options(self): + return self._match_kwargs + + def _match(self, pattern, input_string, context=None): + ret = call(pattern, input_string, context, **self._kwargs) + if ret: + if not is_iterable(ret) or isinstance(ret, dict) \ + or (is_iterable(ret) and hasattr(ret, '__getitem__') and isinstance(ret[0], int)): + args_iterable = [ret] + else: + args_iterable = ret + for args in args_iterable: + if isinstance(args, dict): + options = args + options.pop('input_string', None) + options.pop('pattern', None) + if self._match_kwargs: + options = self._match_kwargs.copy() + options.update(args) + match = Match(pattern=self, input_string=input_string, **options) + if match: + yield match + else: + kwargs = self._match_kwargs + if isinstance(args[-1], dict): + kwargs = dict(kwargs) + kwargs.update(args[-1]) + args = args[:-1] + match = Match(*args, pattern=self, input_string=input_string, **kwargs) + if match: + yield match + + +def filter_match_kwargs(kwargs, children=False): + """ + Filters out kwargs for Match construction + + :param kwargs: + :type kwargs: dict + :param children: + :type children: Flag to filter children matches + :return: A filtered dict + :rtype: dict + """ + kwargs = kwargs.copy() + for key in ('pattern', 'start', 'end', 'parent', 'formatter', 'value'): + if key in kwargs: + del kwargs[key] + if children: + for key in ('name',): + if key in kwargs: + del kwargs[key] + return kwargs diff --git a/lib/rebulk/processors.py b/lib/rebulk/processors.py new file mode 100755 index 00000000..6a4f0bab --- /dev/null +++ b/lib/rebulk/processors.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Processor functions +""" +from logging import getLogger + +from .utils import IdentitySet + +from .rules import Rule, RemoveMatch + +log = getLogger(__name__).log + +DEFAULT = '__default__' + +POST_PROCESS = -2048 +PRE_PROCESS = 2048 + + +def _default_conflict_solver(match, conflicting_match): + """ + Default conflict solver for matches, shorter matches if they conflicts with longer ones + + :param conflicting_match: + :type conflicting_match: + :param match: + :type match: + :return: + :rtype: + """ + if len(conflicting_match.initiator) < len(match.initiator): + return conflicting_match + if len(match.initiator) < len(conflicting_match.initiator): + return match + return None + + +class ConflictSolver(Rule): + """ + Remove conflicting matches. + """ + priority = PRE_PROCESS + + consequence = RemoveMatch + + @property + def default_conflict_solver(self): # pylint:disable=no-self-use + """ + Default conflict solver to use. + """ + return _default_conflict_solver + + def when(self, matches, context): + # pylint:disable=too-many-nested-blocks + to_remove_matches = IdentitySet() + + public_matches = [match for match in matches if not match.private] + public_matches.sort(key=len) + + for match in public_matches: + conflicting_matches = matches.conflicting(match) + + if conflicting_matches: + # keep the match only if it's the longest + conflicting_matches = [conflicting_match for conflicting_match in conflicting_matches if + not conflicting_match.private] + conflicting_matches.sort(key=len) + + for conflicting_match in conflicting_matches: + conflict_solvers = [(self.default_conflict_solver, False)] + + if match.conflict_solver: + conflict_solvers.append((match.conflict_solver, False)) + if conflicting_match.conflict_solver: + conflict_solvers.append((conflicting_match.conflict_solver, True)) + + for conflict_solver, reverse in reversed(conflict_solvers): + if reverse: + to_remove = conflict_solver(conflicting_match, match) + else: + to_remove = conflict_solver(match, conflicting_match) + if to_remove == DEFAULT: + continue + if to_remove and to_remove not in to_remove_matches: + both_matches = [match, conflicting_match] + both_matches.remove(to_remove) + to_keep = both_matches[0] + + if to_keep not in to_remove_matches: + log(self.log_level, "Conflicting match %s will be removed in favor of match %s", + to_remove, to_keep) + + to_remove_matches.add(to_remove) + break + return to_remove_matches + + +class PrivateRemover(Rule): + """ + Removes private matches rule. + """ + priority = POST_PROCESS + + consequence = RemoveMatch + + def when(self, matches, context): + return [match for match in matches if match.private] diff --git a/lib/rebulk/rebulk.py b/lib/rebulk/rebulk.py new file mode 100755 index 00000000..a6a0fd2f --- /dev/null +++ b/lib/rebulk/rebulk.py @@ -0,0 +1,190 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Entry point functions and classes for Rebulk +""" +from logging import getLogger + +from .builder import Builder +from .match import Matches +from .processors import ConflictSolver, PrivateRemover +from .rules import Rules +from .utils import extend_safe + +log = getLogger(__name__).log + + +class Rebulk(Builder): + r""" + Regular expression, string and function based patterns are declared in a ``Rebulk`` object. It use a fluent API to + chain ``string``, ``regex``, and ``functional`` methods to define various patterns types. + + .. code-block:: python + + >>> from rebulk import Rebulk + >>> bulk = Rebulk().string('brown').regex(r'qu\w+').functional(lambda s: (20, 25)) + + When ``Rebulk`` object is fully configured, you can call ``matches`` method with an input string to retrieve all + ``Match`` objects found by registered pattern. + + .. code-block:: python + + >>> bulk.matches("The quick brown fox jumps over the lazy dog") + [<brown:(10, 15)>, <quick:(4, 9)>, <jumps:(20, 25)>] + + If multiple ``Match`` objects are found at the same position, only the longer one is kept. + + .. code-block:: python + + >>> bulk = Rebulk().string('lakers').string('la') + >>> bulk.matches("the lakers are from la") + [<lakers:(4, 10)>, <la:(20, 22)>] + """ + + # pylint:disable=protected-access + + def __init__(self, disabled=lambda context: False, default_rules=True): + """ + Creates a new Rebulk object. + :param disabled: if True, this pattern is disabled. Can also be a function(context). + :type disabled: bool|function + :param default_rules: use default rules + :type default_rules: + :return: + :rtype: + """ + super(Rebulk, self).__init__() + if not callable(disabled): + self.disabled = lambda context: disabled + else: + self.disabled = disabled + self._patterns = [] + self._rules = Rules() + if default_rules: + self.rules(ConflictSolver, PrivateRemover) + self._rebulks = [] + + def pattern(self, *pattern): + """ + Add patterns objects + + :param pattern: + :type pattern: rebulk.pattern.Pattern + :return: self + :rtype: Rebulk + """ + self._patterns.extend(pattern) + return self + + def rules(self, *rules): + """ + Add rules as a module, class or instance. + :param rules: + :type rules: list[Rule] + :return: + """ + self._rules.load(*rules) + return self + + def rebulk(self, *rebulks): + """ + Add a children rebulk object + :param rebulks: + :type rebulks: Rebulk + :return: + """ + self._rebulks.extend(rebulks) + return self + + def matches(self, string, context=None): + """ + Search for all matches with current configuration against input_string + :param string: string to search into + :type string: str + :param context: context to use + :type context: dict + :return: A custom list of matches + :rtype: Matches + """ + matches = Matches(input_string=string) + if context is None: + context = {} + + self._matches_patterns(matches, context) + + self._execute_rules(matches, context) + + return matches + + def effective_rules(self, context=None): + """ + Get effective rules for this rebulk object and its children. + :param context: + :type context: + :return: + :rtype: + """ + rules = Rules() + rules.extend(self._rules) + for rebulk in self._rebulks: + if not rebulk.disabled(context): + extend_safe(rules, rebulk._rules) + return rules + + def _execute_rules(self, matches, context): + """ + Execute rules for this rebulk and children. + :param matches: + :type matches: + :param context: + :type context: + :return: + :rtype: + """ + if not self.disabled(context): + rules = self.effective_rules(context) + rules.execute_all_rules(matches, context) + + def effective_patterns(self, context=None): + """ + Get effective patterns for this rebulk object and its children. + :param context: + :type context: + :return: + :rtype: + """ + patterns = list(self._patterns) + for rebulk in self._rebulks: + if not rebulk.disabled(context): + extend_safe(patterns, rebulk._patterns) + return patterns + + def _matches_patterns(self, matches, context): + """ + Search for all matches with current paterns agains input_string + :param matches: matches list + :type matches: Matches + :param context: context to use + :type context: dict + :return: + :rtype: + """ + if not self.disabled(context): + patterns = self.effective_patterns(context) + for pattern in patterns: + if not pattern.disabled(context): + pattern_matches = pattern.matches(matches.input_string, context) + if pattern_matches: + log(pattern.log_level, "Pattern has %s match(es). (%s)", len(pattern_matches), pattern) + else: + pass + # log(pattern.log_level, "Pattern doesn't match. (%s)" % (pattern,)) + for match in pattern_matches: + if match.marker: + log(pattern.log_level, "Marker found. (%s)", match) + matches.markers.append(match) + else: + log(pattern.log_level, "Match found. (%s)", match) + matches.append(match) + else: + log(pattern.log_level, "Pattern is disabled. (%s)", pattern) diff --git a/lib/rebulk/remodule.py b/lib/rebulk/remodule.py new file mode 100755 index 00000000..d1d68d19 --- /dev/null +++ b/lib/rebulk/remodule.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Uniform re module +""" +# pylint: disable-all +import os + +REGEX_AVAILABLE = False +if os.environ.get('REGEX_DISABLED') in ["1", "true", "True", "Y"]: + import re +else: + try: + import regex as re + REGEX_AVAILABLE = True + except ImportError: + import re diff --git a/lib/rebulk/rules.py b/lib/rebulk/rules.py new file mode 100755 index 00000000..2514904f --- /dev/null +++ b/lib/rebulk/rules.py @@ -0,0 +1,373 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Abstract rule class definition and rule engine implementation +""" +from abc import ABCMeta, abstractmethod +import inspect +from itertools import groupby +from logging import getLogger + +import six +from .utils import is_iterable + +from .toposort import toposort + +from . import debug + +log = getLogger(__name__).log + + +@six.add_metaclass(ABCMeta) +class Consequence(object): + """ + Definition of a consequence to apply. + """ + @abstractmethod + def then(self, matches, when_response, context): # pragma: no cover + """ + Action implementation. + + :param matches: + :type matches: rebulk.match.Matches + :param context: + :type context: + :param when_response: return object from when call. + :type when_response: object + :return: True if the action was runned, False if it wasn't. + :rtype: bool + """ + pass + + +@six.add_metaclass(ABCMeta) +class Condition(object): + """ + Definition of a condition to check. + """ + @abstractmethod + def when(self, matches, context): # pragma: no cover + """ + Condition implementation. + + :param matches: + :type matches: rebulk.match.Matches + :param context: + :type context: + :return: truthy if rule should be triggered and execute then action, falsy if it should not. + :rtype: object + """ + pass + + +@six.add_metaclass(ABCMeta) +class CustomRule(Condition, Consequence): + """ + Definition of a rule to apply + """ + # pylint: disable=no-self-use, unused-argument, abstract-method + priority = 0 + name = None + dependency = None + properties = {} + + def __init__(self, log_level=None): + self.defined_at = debug.defined_at() + if log_level is None and not hasattr(self, 'log_level'): + self.log_level = debug.LOG_LEVEL + + def enabled(self, context): + """ + Disable rule. + + :param context: + :type context: + :return: True if rule is enabled, False if disabled + :rtype: bool + """ + return True + + def __lt__(self, other): + return self.priority > other.priority + + def __repr__(self): + defined = "" + if self.defined_at: + defined = "@%s" % (self.defined_at,) + return "<%s%s>" % (self.name if self.name else self.__class__.__name__, defined) + + def __eq__(self, other): + return self.__class__ == other.__class__ + + def __hash__(self): + return hash(self.__class__) + + +class Rule(CustomRule): + """ + Definition of a rule to apply + """ + # pylint:disable=abstract-method + consequence = None + + def then(self, matches, when_response, context): + assert self.consequence + if is_iterable(self.consequence): + if not is_iterable(when_response): + when_response = [when_response] + iterator = iter(when_response) + for cons in self.consequence: #pylint: disable=not-an-iterable + if inspect.isclass(cons): + cons = cons() + cons.then(matches, next(iterator), context) + else: + cons = self.consequence + if inspect.isclass(cons): + cons = cons() # pylint:disable=not-callable + cons.then(matches, when_response, context) + + +class RemoveMatch(Consequence): # pylint: disable=abstract-method + """ + Remove matches returned by then + """ + def then(self, matches, when_response, context): + if is_iterable(when_response): + ret = [] + when_response = list(when_response) + for match in when_response: + if match in matches: + matches.remove(match) + ret.append(match) + return ret + if when_response in matches: + matches.remove(when_response) + return when_response + + +class AppendMatch(Consequence): # pylint: disable=abstract-method + """ + Append matches returned by then + """ + def __init__(self, match_name=None): + self.match_name = match_name + + def then(self, matches, when_response, context): + if is_iterable(when_response): + ret = [] + when_response = list(when_response) + for match in when_response: + if match not in matches: + if self.match_name: + match.name = self.match_name + matches.append(match) + ret.append(match) + return ret + if self.match_name: + when_response.name = self.match_name + if when_response not in matches: + matches.append(when_response) + return when_response + + +class RenameMatch(Consequence): # pylint: disable=abstract-method + """ + Rename matches returned by then + """ + def __init__(self, match_name): + self.match_name = match_name + self.remove = RemoveMatch() + self.append = AppendMatch() + + def then(self, matches, when_response, context): + removed = self.remove.then(matches, when_response, context) + if is_iterable(removed): + removed = list(removed) + for match in removed: + match.name = self.match_name + elif removed: + removed.name = self.match_name + if removed: + self.append.then(matches, removed, context) + + +class AppendTags(Consequence): # pylint: disable=abstract-method + """ + Add tags to returned matches + """ + def __init__(self, tags): + self.tags = tags + self.remove = RemoveMatch() + self.append = AppendMatch() + + def then(self, matches, when_response, context): + removed = self.remove.then(matches, when_response, context) + if is_iterable(removed): + removed = list(removed) + for match in removed: + match.tags.extend(self.tags) + elif removed: + removed.tags.extend(self.tags) # pylint: disable=no-member + if removed: + self.append.then(matches, removed, context) + + +class RemoveTags(Consequence): # pylint: disable=abstract-method + """ + Remove tags from returned matches + """ + def __init__(self, tags): + self.tags = tags + self.remove = RemoveMatch() + self.append = AppendMatch() + + def then(self, matches, when_response, context): + removed = self.remove.then(matches, when_response, context) + if is_iterable(removed): + removed = list(removed) + for match in removed: + for tag in self.tags: + if tag in match.tags: + match.tags.remove(tag) + elif removed: + for tag in self.tags: + if tag in removed.tags: # pylint: disable=no-member + removed.tags.remove(tag) # pylint: disable=no-member + if removed: + self.append.then(matches, removed, context) + + +class Rules(list): + """ + list of rules ready to execute. + """ + + def __init__(self, *rules): + super(Rules, self).__init__() + self.load(*rules) + + def load(self, *rules): + """ + Load rules from a Rule module, class or instance + + :param rules: + :type rules: + :return: + :rtype: + """ + for rule in rules: + if inspect.ismodule(rule): + self.load_module(rule) + elif inspect.isclass(rule): + self.load_class(rule) + else: + self.append(rule) + + def load_module(self, module): + """ + Load a rules module + + :param module: + :type module: + :return: + :rtype: + """ + # pylint: disable=unused-variable + for name, obj in inspect.getmembers(module, + lambda member: hasattr(member, '__module__') + and member.__module__ == module.__name__ + and inspect.isclass): + self.load_class(obj) + + def load_class(self, class_): + """ + Load a Rule class. + + :param class_: + :type class_: + :return: + :rtype: + """ + self.append(class_()) + + def execute_all_rules(self, matches, context): + """ + Execute all rules from this rules list. All when condition with same priority will be performed before + calling then actions. + + :param matches: + :type matches: + :param context: + :type context: + :return: + :rtype: + """ + ret = [] + for priority, priority_rules in groupby(sorted(self), lambda rule: rule.priority): + sorted_rules = toposort_rules(list(priority_rules)) # Group by dependency graph toposort + for rules_group in sorted_rules: + rules_group = list(sorted(rules_group, key=self.index)) # Sort rules group based on initial ordering. + group_log_level = None + for rule in rules_group: + if group_log_level is None or group_log_level < rule.log_level: + group_log_level = rule.log_level + log(group_log_level, "%s independent rule(s) at priority %s.", len(rules_group), priority) + for rule in rules_group: + when_response = execute_rule(rule, matches, context) + if when_response is not None: + ret.append((rule, when_response)) + + return ret + + +def execute_rule(rule, matches, context): + """ + Execute the given rule. + :param rule: + :type rule: + :param matches: + :type matches: + :param context: + :type context: + :return: + :rtype: + """ + if rule.enabled(context): + log(rule.log_level, "Checking rule condition: %s", rule) + when_response = rule.when(matches, context) + if when_response: + log(rule.log_level, "Rule was triggered: %s", when_response) + log(rule.log_level, "Running rule consequence: %s %s", rule, when_response) + rule.then(matches, when_response, context) + return when_response + else: + log(rule.log_level, "Rule is disabled: %s", rule) + +def toposort_rules(rules): + """ + Sort given rules using toposort with dependency parameter. + :param rules: + :type rules: + :return: + :rtype: + """ + graph = {} + class_dict = {} + for rule in rules: + if rule.__class__ in class_dict: + raise ValueError("Duplicate class rules are not allowed: %s" % rule.__class__) + class_dict[rule.__class__] = rule + for rule in rules: + if not is_iterable(rule.dependency) and rule.dependency: + rule_dependencies = [rule.dependency] + else: + rule_dependencies = rule.dependency + dependencies = set() + if rule_dependencies: + for dependency in rule_dependencies: + if inspect.isclass(dependency): + dependency = class_dict.get(dependency) + if dependency: + dependencies.add(dependency) + graph[rule] = dependencies + return toposort(graph) diff --git a/lib/rebulk/toposort.py b/lib/rebulk/toposort.py new file mode 100755 index 00000000..2bcba9ae --- /dev/null +++ b/lib/rebulk/toposort.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2014 True Blade Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Original: +# - https://bitbucket.org/ericvsmith/toposort (1.4) +# Modifications: +# - merged Pull request #2 for CyclicDependency error +# - import reduce as original name +# - support python 2.6 dict comprehension + +# pylint: skip-file +from functools import reduce + + +class CyclicDependency(ValueError): + def __init__(self, cyclic): + s = 'Cyclic dependencies exist among these items: {0}'.format(', '.join(repr(x) for x in cyclic.items())) + super(CyclicDependency, self).__init__(s) + self.cyclic = cyclic + + +def toposort(data): + """ + Dependencies are expressed as a dictionary whose keys are items + and whose values are a set of dependent items. Output is a list of + sets in topological order. The first set consists of items with no + dependences, each subsequent set consists of items that depend upon + items in the preceeding sets. + :param data: + :type data: + :return: + :rtype: + """ + + # Special case empty input. + if len(data) == 0: + return + + # Copy the input so as to leave it unmodified. + data = data.copy() + + # Ignore self dependencies. + for k, v in data.items(): + v.discard(k) + # Find all items that don't depend on anything. + extra_items_in_deps = reduce(set.union, data.values()) - set(data.keys()) + # Add empty dependences where needed. + data.update(dict((item, set()) for item in extra_items_in_deps)) + while True: + ordered = set(item for item, dep in data.items() if len(dep) == 0) + if not ordered: + break + yield ordered + data = dict((item, (dep - ordered)) + for item, dep in data.items() + if item not in ordered) + if len(data) != 0: + raise CyclicDependency(data) + + +def toposort_flatten(data, sort=True): + """ + Returns a single list of dependencies. For any set returned by + toposort(), those items are sorted and appended to the result (just to + make the results deterministic). + :param data: + :type data: + :param sort: + :type sort: + :return: Single list of dependencies. + :rtype: list + """ + + result = [] + for d in toposort(data): + result.extend((sorted if sort else list)(d)) + return result diff --git a/lib/rebulk/utils.py b/lib/rebulk/utils.py new file mode 100755 index 00000000..85ddd41e --- /dev/null +++ b/lib/rebulk/utils.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Various utilities functions +""" +try: + from collections.abc import MutableSet +except ImportError: + from collections import MutableSet + +from types import GeneratorType + + +def find_all(string, sub, start=None, end=None, ignore_case=False, **kwargs): + """ + Return all indices in string s where substring sub is + found, such that sub is contained in the slice s[start:end]. + + >>> list(find_all('The quick brown fox jumps over the lazy dog', 'fox')) + [16] + + >>> list(find_all('The quick brown fox jumps over the lazy dog', 'mountain')) + [] + + >>> list(find_all('The quick brown fox jumps over the lazy dog', 'The')) + [0] + + >>> list(find_all( + ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', + ... 'an')) + [44, 51, 70] + + >>> list(find_all( + ... 'Carved symbols in a mountain hollow on the bank of an inlet irritated an eccentric person', + ... 'an', + ... 50, + ... 60)) + [51] + + :param string: the input string + :type string: str + :param sub: the substring + :type sub: str + :return: all indices in the input string + :rtype: __generator[str] + """ + #pylint: disable=unused-argument + if ignore_case: + sub = sub.lower() + string = string.lower() + while True: + start = string.find(sub, start, end) + if start == -1: + return + yield start + start += len(sub) + + +def get_first_defined(data, keys, default_value=None): + """ + Get the first defined key in data. + :param data: + :type data: + :param keys: + :type keys: + :param default_value: + :type default_value: + :return: + :rtype: + """ + for key in keys: + if key in data: + return data[key] + return default_value + + +def is_iterable(obj): + """ + Are we being asked to look up a list of things, instead of a single thing? + We check for the `__iter__` attribute so that this can cover types that + don't have to be known by this module, such as NumPy arrays. + + Strings, however, should be considered as atomic values to look up, not + iterables. + + We don't need to check for the Python 2 `unicode` type, because it doesn't + have an `__iter__` attribute anyway. + """ + # pylint: disable=consider-using-ternary + return hasattr(obj, '__iter__') and not isinstance(obj, str) or isinstance(obj, GeneratorType) + + +def extend_safe(target, source): + """ + Extends source list to target list only if elements doesn't exists in target list. + :param target: + :type target: list + :param source: + :type source: list + """ + for elt in source: + if elt not in target: + target.append(elt) + + +class _Ref(object): + """ + Reference for IdentitySet + """ + def __init__(self, value): + self.value = value + + def __eq__(self, other): + return self.value is other.value + + def __hash__(self): + return id(self.value) + + +class IdentitySet(MutableSet): # pragma: no cover + """ + Set based on identity + """ + def __init__(self, items=None): # pylint: disable=super-init-not-called + if items is None: + items = [] + self.refs = set(map(_Ref, items)) + + def __contains__(self, elem): + return _Ref(elem) in self.refs + + def __iter__(self): + return (ref.value for ref in self.refs) + + def __len__(self): + return len(self.refs) + + def add(self, value): + self.refs.add(_Ref(value)) + + def discard(self, value): + self.refs.discard(_Ref(value)) + + def update(self, iterable): + """ + Update set with iterable + :param iterable: + :type iterable: + :return: + :rtype: + """ + for elem in iterable: + self.add(elem) + + def __repr__(self): # pragma: no cover + return "%s(%s)" % (type(self).__name__, list(self)) diff --git a/lib/rebulk/validators.py b/lib/rebulk/validators.py new file mode 100755 index 00000000..b8959c54 --- /dev/null +++ b/lib/rebulk/validators.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Validator functions to use in patterns. + +All those function have last argument as match, so it's possible to use functools.partial to bind previous arguments. +""" + + +def chars_before(chars, match): + """ + Validate the match if left character is in a given sequence. + + :param chars: + :type chars: + :param match: + :type match: + :return: + :rtype: + """ + if match.start <= 0: + return True + return match.input_string[match.start - 1] in chars + + +def chars_after(chars, match): + """ + Validate the match if right character is in a given sequence. + + :param chars: + :type chars: + :param match: + :type match: + :return: + :rtype: + """ + if match.end >= len(match.input_string): + return True + return match.input_string[match.end] in chars + + +def chars_surround(chars, match): + """ + Validate the match if surrounding characters are in a given sequence. + + :param chars: + :type chars: + :param match: + :type match: + :return: + :rtype: + """ + return chars_before(chars, match) and chars_after(chars, match) + + +def validators(*chained_validators): + """ + Creates a validator chain from several validator functions. + + :param chained_validators: + :type chained_validators: + :return: + :rtype: + """ + + def validator_chain(match): # pylint:disable=missing-docstring + for chained_validator in chained_validators: + if not chained_validator(match): + return False + return True + + return validator_chain + + +def allways_true(match): # pylint:disable=unused-argument + """ + A validator which is allways true + :param match: + :return: + """ + return True diff --git a/lib/schedule.py b/lib/schedule.py new file mode 100644 index 00000000..67fb19d4 --- /dev/null +++ b/lib/schedule.py @@ -0,0 +1,617 @@ +""" +Python job scheduling for humans. + +github.com/dbader/schedule + +An in-process scheduler for periodic jobs that uses the builder pattern +for configuration. Schedule lets you run Python functions (or any other +callable) periodically at pre-determined intervals using a simple, +human-friendly syntax. + +Inspired by Addam Wiggins' article "Rethinking Cron" [1] and the +"clockwork" Ruby module [2][3]. + +Features: + - A simple to use API for scheduling jobs. + - Very lightweight and no external dependencies. + - Excellent test coverage. + - Tested on Python 2.7, 3.5 and 3.6 + +Usage: + >>> import schedule + >>> import time + + >>> def job(message='stuff'): + >>> print("I'm working on:", message) + + >>> schedule.every(10).minutes.do(job) + >>> schedule.every(5).to(10).days.do(job) + >>> schedule.every().hour.do(job, message='things') + >>> schedule.every().day.at("10:30").do(job) + + >>> while True: + >>> schedule.run_pending() + >>> time.sleep(1) + +[1] https://adam.herokuapp.com/past/2010/4/13/rethinking_cron/ +[2] https://github.com/Rykian/clockwork +[3] https://adam.herokuapp.com/past/2010/6/30/replace_cron_with_clockwork/ +""" +try: + from collections.abc import Hashable +except ImportError: + from collections import Hashable +import datetime +import functools +import logging +import random +import re +import time + +logger = logging.getLogger('schedule') + + +class ScheduleError(Exception): + """Base schedule exception""" + pass + + +class ScheduleValueError(ScheduleError): + """Base schedule value error""" + pass + + +class IntervalError(ScheduleValueError): + """An improper interval was used""" + pass + + +class CancelJob(object): + """ + Can be returned from a job to unschedule itself. + """ + pass + + +class Scheduler(object): + """ + Objects instantiated by the :class:`Scheduler <Scheduler>` are + factories to create jobs, keep record of scheduled jobs and + handle their execution. + """ + def __init__(self): + self.jobs = [] + + def run_pending(self): + """ + Run all jobs that are scheduled to run. + + Please note that it is *intended behavior that run_pending() + does not run missed jobs*. For example, if you've registered a job + that should run every minute and you only call run_pending() + in one hour increments then your job won't be run 60 times in + between but only once. + """ + runnable_jobs = (job for job in self.jobs if job.should_run) + for job in sorted(runnable_jobs): + self._run_job(job) + + def run_all(self, delay_seconds=0): + """ + Run all jobs regardless if they are scheduled to run or not. + + A delay of `delay` seconds is added between each job. This helps + distribute system load generated by the jobs more evenly + over time. + + :param delay_seconds: A delay added between every executed job + """ + logger.info('Running *all* %i jobs with %is delay inbetween', + len(self.jobs), delay_seconds) + for job in self.jobs[:]: + self._run_job(job) + time.sleep(delay_seconds) + + def clear(self, tag=None): + """ + Deletes scheduled jobs marked with the given tag, or all jobs + if tag is omitted. + + :param tag: An identifier used to identify a subset of + jobs to delete + """ + if tag is None: + del self.jobs[:] + else: + self.jobs[:] = (job for job in self.jobs if tag not in job.tags) + + def cancel_job(self, job): + """ + Delete a scheduled job. + + :param job: The job to be unscheduled + """ + try: + self.jobs.remove(job) + except ValueError: + pass + + def every(self, interval=1): + """ + Schedule a new periodic job. + + :param interval: A quantity of a certain time unit + :return: An unconfigured :class:`Job <Job>` + """ + job = Job(interval, self) + return job + + def _run_job(self, job): + ret = job.run() + if isinstance(ret, CancelJob) or ret is CancelJob: + self.cancel_job(job) + + @property + def next_run(self): + """ + Datetime when the next job should run. + + :return: A :class:`~datetime.datetime` object + """ + if not self.jobs: + return None + return min(self.jobs).next_run + + @property + def idle_seconds(self): + """ + :return: Number of seconds until + :meth:`next_run <Scheduler.next_run>`. + """ + return (self.next_run - datetime.datetime.now()).total_seconds() + + +class Job(object): + """ + A periodic job as used by :class:`Scheduler`. + + :param interval: A quantity of a certain time unit + :param scheduler: The :class:`Scheduler <Scheduler>` instance that + this job will register itself with once it has + been fully configured in :meth:`Job.do()`. + + Every job runs at a given fixed time interval that is defined by: + + * a :meth:`time unit <Job.second>` + * a quantity of `time units` defined by `interval` + + A job is usually created and returned by :meth:`Scheduler.every` + method, which also defines its `interval`. + """ + def __init__(self, interval, scheduler=None): + self.interval = interval # pause interval * unit between runs + self.latest = None # upper limit to the interval + self.job_func = None # the job job_func to run + self.unit = None # time units, e.g. 'minutes', 'hours', ... + self.at_time = None # optional time at which this job runs + self.last_run = None # datetime of the last run + self.next_run = None # datetime of the next run + self.period = None # timedelta between runs, only valid for + self.start_day = None # Specific day of the week to start on + self.tags = set() # unique set of tags for the job + self.scheduler = scheduler # scheduler to register with + + def __lt__(self, other): + """ + PeriodicJobs are sortable based on the scheduled time they + run next. + """ + return self.next_run < other.next_run + + def __str__(self): + return ( + "Job(interval={}, " + "unit={}, " + "do={}, " + "args={}, " + "kwargs={})" + ).format(self.interval, + self.unit, + self.job_func.__name__, + self.job_func.args, + self.job_func.keywords) + + def __repr__(self): + def format_time(t): + return t.strftime('%Y-%m-%d %H:%M:%S') if t else '[never]' + + def is_repr(j): + return not isinstance(j, Job) + + timestats = '(last run: %s, next run: %s)' % ( + format_time(self.last_run), format_time(self.next_run)) + + if hasattr(self.job_func, '__name__'): + job_func_name = self.job_func.__name__ + else: + job_func_name = repr(self.job_func) + args = [repr(x) if is_repr(x) else str(x) for x in self.job_func.args] + kwargs = ['%s=%s' % (k, repr(v)) + for k, v in self.job_func.keywords.items()] + call_repr = job_func_name + '(' + ', '.join(args + kwargs) + ')' + + if self.at_time is not None: + return 'Every %s %s at %s do %s %s' % ( + self.interval, + self.unit[:-1] if self.interval == 1 else self.unit, + self.at_time, call_repr, timestats) + else: + fmt = ( + 'Every %(interval)s ' + + ('to %(latest)s ' if self.latest is not None else '') + + '%(unit)s do %(call_repr)s %(timestats)s' + ) + + return fmt % dict( + interval=self.interval, + latest=self.latest, + unit=(self.unit[:-1] if self.interval == 1 else self.unit), + call_repr=call_repr, + timestats=timestats + ) + + @property + def second(self): + if self.interval != 1: + raise IntervalError('Use seconds instead of second') + return self.seconds + + @property + def seconds(self): + self.unit = 'seconds' + return self + + @property + def minute(self): + if self.interval != 1: + raise IntervalError('Use minutes instead of minute') + return self.minutes + + @property + def minutes(self): + self.unit = 'minutes' + return self + + @property + def hour(self): + if self.interval != 1: + raise IntervalError('Use hours instead of hour') + return self.hours + + @property + def hours(self): + self.unit = 'hours' + return self + + @property + def day(self): + if self.interval != 1: + raise IntervalError('Use days instead of day') + return self.days + + @property + def days(self): + self.unit = 'days' + return self + + @property + def week(self): + if self.interval != 1: + raise IntervalError('Use weeks instead of week') + return self.weeks + + @property + def weeks(self): + self.unit = 'weeks' + return self + + @property + def monday(self): + if self.interval != 1: + raise IntervalError('Use mondays instead of monday') + self.start_day = 'monday' + return self.weeks + + @property + def tuesday(self): + if self.interval != 1: + raise IntervalError('Use tuesdays instead of tuesday') + self.start_day = 'tuesday' + return self.weeks + + @property + def wednesday(self): + if self.interval != 1: + raise IntervalError('Use wednesdays instead of wednesday') + self.start_day = 'wednesday' + return self.weeks + + @property + def thursday(self): + if self.interval != 1: + raise IntervalError('Use thursdays instead of thursday') + self.start_day = 'thursday' + return self.weeks + + @property + def friday(self): + if self.interval != 1: + raise IntervalError('Use fridays instead of friday') + self.start_day = 'friday' + return self.weeks + + @property + def saturday(self): + if self.interval != 1: + raise IntervalError('Use saturdays instead of saturday') + self.start_day = 'saturday' + return self.weeks + + @property + def sunday(self): + if self.interval != 1: + raise IntervalError('Use sundays instead of sunday') + self.start_day = 'sunday' + return self.weeks + + def tag(self, *tags): + """ + Tags the job with one or more unique indentifiers. + + Tags must be hashable. Duplicate tags are discarded. + + :param tags: A unique list of ``Hashable`` tags. + :return: The invoked job instance + """ + if not all(isinstance(tag, Hashable) for tag in tags): + raise TypeError('Tags must be hashable') + self.tags.update(tags) + return self + + def at(self, time_str): + """ + Specify a particular time that the job should be run at. + + :param time_str: A string in one of the following formats: `HH:MM:SS`, + `HH:MM`,`:MM`, `:SS`. The format must make sense given how often + the job is repeating; for example, a job that repeats every minute + should not be given a string in the form `HH:MM:SS`. The difference + between `:MM` and `:SS` is inferred from the selected time-unit + (e.g. `every().hour.at(':30')` vs. `every().minute.at(':30')`). + :return: The invoked job instance + """ + if (self.unit not in ('days', 'hours', 'minutes') + and not self.start_day): + raise ScheduleValueError('Invalid unit') + if not isinstance(time_str, str): + raise TypeError('at() should be passed a string') + if self.unit == 'days' or self.start_day: + if not re.match(r'^([0-2]\d:)?[0-5]\d:[0-5]\d$', time_str): + raise ScheduleValueError('Invalid time format') + if self.unit == 'hours': + if not re.match(r'^([0-5]\d)?:[0-5]\d$', time_str): + raise ScheduleValueError(('Invalid time format for' + ' an hourly job')) + if self.unit == 'minutes': + if not re.match(r'^:[0-5]\d$', time_str): + raise ScheduleValueError(('Invalid time format for' + ' a minutely job')) + time_values = time_str.split(':') + if len(time_values) == 3: + hour, minute, second = time_values + elif len(time_values) == 2 and self.unit == 'minutes': + hour = 0 + minute = 0 + _, second = time_values + else: + hour, minute = time_values + second = 0 + if self.unit == 'days' or self.start_day: + hour = int(hour) + if not (0 <= hour <= 23): + raise ScheduleValueError('Invalid number of hours') + elif self.unit == 'hours': + hour = 0 + elif self.unit == 'minutes': + hour = 0 + minute = 0 + minute = int(minute) + second = int(second) + self.at_time = datetime.time(hour, minute, second) + return self + + def to(self, latest): + """ + Schedule the job to run at an irregular (randomized) interval. + + The job's interval will randomly vary from the value given + to `every` to `latest`. The range defined is inclusive on + both ends. For example, `every(A).to(B).seconds` executes + the job function every N seconds such that A <= N <= B. + + :param latest: Maximum interval between randomized job runs + :return: The invoked job instance + """ + self.latest = latest + return self + + def do(self, job_func, *args, **kwargs): + """ + Specifies the job_func that should be called every time the + job runs. + + Any additional arguments are passed on to job_func when + the job runs. + + :param job_func: The function to be scheduled + :return: The invoked job instance + """ + self.job_func = functools.partial(job_func, *args, **kwargs) + try: + functools.update_wrapper(self.job_func, job_func) + except AttributeError: + # job_funcs already wrapped by functools.partial won't have + # __name__, __module__ or __doc__ and the update_wrapper() + # call will fail. + pass + self._schedule_next_run() + self.scheduler.jobs.append(self) + return self + + @property + def should_run(self): + """ + :return: ``True`` if the job should be run now. + """ + return datetime.datetime.now() >= self.next_run + + def run(self): + """ + Run the job and immediately reschedule it. + + :return: The return value returned by the `job_func` + """ + logger.info('Running job %s', self) + ret = self.job_func() + self.last_run = datetime.datetime.now() + self._schedule_next_run() + return ret + + def _schedule_next_run(self): + """ + Compute the instant when this job should run next. + """ + if self.unit not in ('seconds', 'minutes', 'hours', 'days', 'weeks'): + raise ScheduleValueError('Invalid unit') + + if self.latest is not None: + if not (self.latest >= self.interval): + raise ScheduleError('`latest` is greater than `interval`') + interval = random.randint(self.interval, self.latest) + else: + interval = self.interval + + self.period = datetime.timedelta(**{self.unit: interval}) + self.next_run = datetime.datetime.now() + self.period + if self.start_day is not None: + if self.unit != 'weeks': + raise ScheduleValueError('`unit` should be \'weeks\'') + weekdays = ( + 'monday', + 'tuesday', + 'wednesday', + 'thursday', + 'friday', + 'saturday', + 'sunday' + ) + if self.start_day not in weekdays: + raise ScheduleValueError('Invalid start day') + weekday = weekdays.index(self.start_day) + days_ahead = weekday - self.next_run.weekday() + if days_ahead <= 0: # Target day already happened this week + days_ahead += 7 + self.next_run += datetime.timedelta(days_ahead) - self.period + if self.at_time is not None: + if (self.unit not in ('days', 'hours', 'minutes') + and self.start_day is None): + raise ScheduleValueError(('Invalid unit without' + ' specifying start day')) + kwargs = { + 'second': self.at_time.second, + 'microsecond': 0 + } + if self.unit == 'days' or self.start_day is not None: + kwargs['hour'] = self.at_time.hour + if self.unit in ['days', 'hours'] or self.start_day is not None: + kwargs['minute'] = self.at_time.minute + self.next_run = self.next_run.replace(**kwargs) + # If we are running for the first time, make sure we run + # at the specified time *today* (or *this hour*) as well + if not self.last_run: + now = datetime.datetime.now() + if (self.unit == 'days' and self.at_time > now.time() and + self.interval == 1): + self.next_run = self.next_run - datetime.timedelta(days=1) + elif self.unit == 'hours' \ + and self.at_time.minute > now.minute \ + or (self.at_time.minute == now.minute + and self.at_time.second > now.second): + self.next_run = self.next_run - datetime.timedelta(hours=1) + elif self.unit == 'minutes' \ + and self.at_time.second > now.second: + self.next_run = self.next_run - \ + datetime.timedelta(minutes=1) + if self.start_day is not None and self.at_time is not None: + # Let's see if we will still make that time we specified today + if (self.next_run - datetime.datetime.now()).days >= 7: + self.next_run -= self.period + + +# The following methods are shortcuts for not having to +# create a Scheduler instance: + +#: Default :class:`Scheduler <Scheduler>` object +default_scheduler = Scheduler() + +#: Default :class:`Jobs <Job>` list +jobs = default_scheduler.jobs # todo: should this be a copy, e.g. jobs()? + + +def every(interval=1): + """Calls :meth:`every <Scheduler.every>` on the + :data:`default scheduler instance <default_scheduler>`. + """ + return default_scheduler.every(interval) + + +def run_pending(): + """Calls :meth:`run_pending <Scheduler.run_pending>` on the + :data:`default scheduler instance <default_scheduler>`. + """ + default_scheduler.run_pending() + + +def run_all(delay_seconds=0): + """Calls :meth:`run_all <Scheduler.run_all>` on the + :data:`default scheduler instance <default_scheduler>`. + """ + default_scheduler.run_all(delay_seconds=delay_seconds) + + +def clear(tag=None): + """Calls :meth:`clear <Scheduler.clear>` on the + :data:`default scheduler instance <default_scheduler>`. + """ + default_scheduler.clear(tag) + + +def cancel_job(job): + """Calls :meth:`cancel_job <Scheduler.cancel_job>` on the + :data:`default scheduler instance <default_scheduler>`. + """ + default_scheduler.cancel_job(job) + + +def next_run(): + """Calls :meth:`next_run <Scheduler.next_run>` on the + :data:`default scheduler instance <default_scheduler>`. + """ + return default_scheduler.next_run + + +def idle_seconds(): + """Calls :meth:`idle_seconds <Scheduler.idle_seconds>` on the + :data:`default scheduler instance <default_scheduler>`. + """ + return default_scheduler.idle_seconds \ No newline at end of file diff --git a/lib/torrentool/__init__.py b/lib/torrentool/__init__.py new file mode 100644 index 00000000..f7f572ba --- /dev/null +++ b/lib/torrentool/__init__.py @@ -0,0 +1 @@ +VERSION = (1, 0, 2) \ No newline at end of file diff --git a/lib/torrentool/api.py b/lib/torrentool/api.py new file mode 100644 index 00000000..baac4b3b --- /dev/null +++ b/lib/torrentool/api.py @@ -0,0 +1,7 @@ +""" +Exposes commonly used classes and functions. + +""" +from .bencode import Bencode +from .torrent import Torrent +from .utils import upload_to_cache_server, get_open_trackers_from_local, get_open_trackers_from_remote diff --git a/lib/torrentool/bencode.py b/lib/torrentool/bencode.py new file mode 100644 index 00000000..6d431d13 --- /dev/null +++ b/lib/torrentool/bencode.py @@ -0,0 +1,204 @@ +from collections import OrderedDict +from operator import itemgetter +from codecs import encode +from sys import version_info + +from .exceptions import BencodeDecodingError, BencodeEncodingError + + +PY3 = version_info >= (3, 0) + +if PY3: + str_type = str + byte_types = (bytes, bytearray) + chr_ = chr + int_types = int +else: + str_type = basestring + byte_types = bytes + chr_ = lambda ch: ch + int_types = (int, long) + + +class Bencode(object): + """Exposes utilities for bencoding.""" + + @classmethod + def encode(cls, value): + """Encodes a value into bencoded bytes. + + :param value: Python object to be encoded (str, int, list, dict). + :param str val_encoding: Encoding used by strings in a given object. + :rtype: bytes + """ + val_encoding = 'utf-8' + + def encode_str(v): + try: + v_enc = encode(v, val_encoding) + + except UnicodeDecodeError: + if PY3: + raise + else: + # Suppose bytestring + v_enc = v + + prefix = encode('%s:' % len(v_enc), val_encoding) + return prefix + v_enc + + def encode_(val): + if isinstance(val, str_type): + result = encode_str(val) + + elif isinstance(val, int_types): + result = encode(('i%se' % val), val_encoding) + + elif isinstance(val, (list, set, tuple)): + result = encode('l', val_encoding) + for item in val: + result += encode_(item) + result += encode('e', val_encoding) + + elif isinstance(val, dict): + result = encode('d', val_encoding) + + # Dictionaries are expected to be sorted by key. + for k, v in OrderedDict(sorted(val.items(), key=itemgetter(0))).items(): + result += (encode_str(k) + encode_(v)) + + result += encode('e', val_encoding) + + elif isinstance(val, byte_types): + result = encode('%s:' % len(val), val_encoding) + result += val + + else: + raise BencodeEncodingError('Unable to encode `%s` %s' % (type(val), val)) + + return result + + return encode_(value) + + @classmethod + def decode(cls, encoded): + """Decodes bencoded data introduced as bytes. + + Returns decoded structure(s). + + :param bytes encoded: + """ + def create_dict(items): + # Let's guarantee that dictionaries are sorted. + k_v_pair = zip(*[iter(items)] * 2) + return OrderedDict(sorted(k_v_pair, key=itemgetter(0))) + + def create_list(items): + return list(items) + + stack_items = [] + stack_containers = [] + + def compress_stack(): + target_container = stack_containers.pop() + subitems = [] + + while True: + subitem = stack_items.pop() + subitems.append(subitem) + if subitem is target_container: + break + + container_creator = subitems.pop() + container = container_creator(reversed(subitems)) + stack_items.append(container) + + def parse_forward(till_char, sequence): + number = '' + char_sub_idx = 0 + + for char_sub_idx, char_sub in enumerate(sequence): + char_sub = chr_(char_sub) + if char_sub == till_char: + break + + number += char_sub + + number = int(number or 0) + char_sub_idx += 1 + + return number, char_sub_idx + + while encoded: + char = encoded[0] + char = chr_(char) + + if char == 'd': # Dictionary + stack_items.append(create_dict) + stack_containers.append(create_dict) + encoded = encoded[1:] + + elif char == 'l': # List + stack_items.append(create_list) + stack_containers.append(create_list) + encoded = encoded[1:] + + elif char == 'i': # Integer + number, char_sub_idx = parse_forward('e', encoded[1:]) + char_sub_idx += 1 + + stack_items.append(number) + encoded = encoded[char_sub_idx:] + + elif char.isdigit(): # String + str_len, char_sub_idx = parse_forward(':', encoded) + last_char_idx = char_sub_idx + str_len + + string = encoded[char_sub_idx:last_char_idx] + try: + string = string.decode('utf-8') + except UnicodeDecodeError: + # Considered bytestring (e.g. `pieces` hashes concatenation). + pass + + stack_items.append(string) + encoded = encoded[last_char_idx:] + + elif char == 'e': # End of a dictionary or a list. + compress_stack() + encoded = encoded[1:] + + else: + raise BencodeDecodingError('Unable to interpret `%s` char.' % char) + + if len(stack_items) == 1: + stack_items = stack_items.pop() + + return stack_items + + @classmethod + def read_string(cls, string): + """Decodes a given bencoded string or bytestring. + + Returns decoded structure(s). + + :param str string: + :rtype: list + """ + if PY3 and not isinstance(string, byte_types): + string = string.encode() + + return cls.decode(string) + + @classmethod + def read_file(cls, filepath): + """Decodes bencoded data of a given file. + + Returns decoded structure(s). + + :param str filepath: + :rtype: list + """ + with open(filepath, mode='rb') as f: + contents = f.read() + return cls.decode(contents) diff --git a/lib/torrentool/cli.py b/lib/torrentool/cli.py new file mode 100644 index 00000000..d07a8ee3 --- /dev/null +++ b/lib/torrentool/cli.py @@ -0,0 +1,94 @@ +from __future__ import division +import click +from os import path, getcwd + +from . import VERSION +from .api import Torrent +from .utils import humanize_filesize, upload_to_cache_server, get_open_trackers_from_remote, \ + get_open_trackers_from_local +from .exceptions import RemoteUploadError, RemoteDownloadError + + +@click.group() +@click.version_option(version='.'.join(map(str, VERSION))) +def start(): + """Torrentool command line utilities.""" + + +@start.group() +def torrent(): + """Torrent-related commands.""" + + +@torrent.command() +@click.argument('torrent_path', type=click.Path(exists=True, writable=False, dir_okay=False)) +def info(torrent_path): + """Print out information from .torrent file.""" + + my_torrent = Torrent.from_file(torrent_path) + + size = my_torrent.total_size + + click.secho('Name: %s' % my_torrent.name, fg='blue') + click.secho('Files:') + for file_tuple in my_torrent.files: + click.secho(file_tuple.name) + + click.secho('Hash: %s' % my_torrent.info_hash, fg='blue') + click.secho('Size: %s (%s)' % (humanize_filesize(size), size), fg='blue') + click.secho('Magnet: %s' % my_torrent.get_magnet(), fg='yellow') + + +@torrent.command() +@click.argument('source', type=click.Path(exists=True, writable=False)) +@click.option('--dest', default=getcwd, type=click.Path(file_okay=False), help='Destination path to put .torrent file into. Default: current directory.') +@click.option('--tracker', default=None, help='Tracker announce URL (multiple comma-separated values supported).') +@click.option('--open_trackers', default=False, is_flag=True, help='Add open trackers announce URLs.') +@click.option('--comment', default=None, help='Arbitrary comment.') +@click.option('--cache', default=False, is_flag=True, help='Upload file to torrent cache services.') +def create(source, dest, tracker, open_trackers, comment, cache): + """Create torrent file from a single file or a directory.""" + + source_title = path.basename(source).replace('.', '_').replace(' ', '_') + dest = '%s.torrent' % path.join(dest, source_title) + + click.secho('Creating torrent from %s ...' % source) + + my_torrent = Torrent.create_from(source) + + if comment: + my_torrent.comment = comment + + urls = [] + + if tracker: + urls = tracker.split(',') + + if open_trackers: + click.secho('Fetching an up-to-date open tracker list ...') + try: + urls.extend(get_open_trackers_from_remote()) + except RemoteDownloadError: + click.secho('Failed. Using built-in open tracker list.', fg='red', err=True) + urls.extend(get_open_trackers_from_local()) + + if urls: + my_torrent.announce_urls = urls + + my_torrent.to_file(dest) + + click.secho('Torrent file created: %s' % dest, fg='green') + click.secho('Torrent info hash: %s' % my_torrent.info_hash, fg='blue') + + if cache: + click.secho('Uploading to %s torrent cache service ...') + try: + result = upload_to_cache_server(dest) + click.secho('Cached torrent URL: %s' % result, fg='yellow') + + except RemoteUploadError as e: + click.secho('Failed: %s' % e, fg='red', err=True) + + +def main(): + start(obj={}) diff --git a/lib/torrentool/exceptions.py b/lib/torrentool/exceptions.py new file mode 100644 index 00000000..53118482 --- /dev/null +++ b/lib/torrentool/exceptions.py @@ -0,0 +1,27 @@ + +class TorrentoolException(Exception): + """Base torrentool exception. All others are inherited from it.""" + + +class BencodeError(TorrentoolException): + """Base exception for bencode related errors.""" + + +class BencodeDecodingError(BencodeError): + """Raised when torrentool is unable to decode bencoded data.""" + + +class BencodeEncodingError(BencodeError): + """Raised when torrentool is unable to encode data into bencode.""" + + +class TorrentError(TorrentoolException): + """Base exception for Torrent object related errors.""" + + +class RemoteUploadError(TorrentoolException): + """Base class for upload to remotes related issues.""" + + +class RemoteDownloadError(TorrentoolException): + """Base class for issues related to downloads from remotes.""" diff --git a/lib/torrentool/repo/open_trackers.ini b/lib/torrentool/repo/open_trackers.ini new file mode 100644 index 00000000..c0ca0aa0 --- /dev/null +++ b/lib/torrentool/repo/open_trackers.ini @@ -0,0 +1,8 @@ +udp://tracker.coppersurfer.tk:6969/announce +udp://tracker.internetwarriors.net:1337/announce +udp://tracker.leechers-paradise.org:6969/announce +udp://tracker.opentrackr.org:1337/announce +udp://tracker.openbittorrent.com:80/announce +udp://tracker.sktorrent.net:6969/announce +udp://tracker.zer0day.to:1337/announce +udp://exodus.desync.com:6969/announce diff --git a/lib/torrentool/torrent.py b/lib/torrentool/torrent.py new file mode 100644 index 00000000..8472c8cb --- /dev/null +++ b/lib/torrentool/torrent.py @@ -0,0 +1,436 @@ +from calendar import timegm +from collections import namedtuple +from datetime import datetime +from functools import reduce +from hashlib import sha1 +from os import walk, sep +from os.path import join, isdir, getsize, normpath, basename + +try: + from urllib.parse import urlencode +except ImportError: # Py2 + from urllib import urlencode + +from .bencode import Bencode +from .exceptions import TorrentError +from .utils import get_app_version + + +_ITERABLE_TYPES = (list, tuple, set) + + +TorrentFile = namedtuple('TorrentFile', ['name', 'length']) + + +class Torrent(object): + """Represents a torrent file, and exposes utilities to work with it.""" + + _filepath = None + + def __init__(self, dict_struct=None): + dict_struct = dict_struct or {'info': {}} + self._struct = dict_struct + + def __str__(self): + return 'Torrent: %s' % self.name + + announce_urls = property() + """List of lists of tracker announce URLs.""" + + comment = property() + """Optional. Free-form textual comments of the author.""" + + creation_date = property() + """Optional. The creation time of the torrent, in standard UNIX epoch format. UTC.""" + + created_by = property() + """Optional. Name and version of the program used to create the .torrent""" + + private = property() + """Optional. If True the client MUST publish its presence to get other peers + ONLY via the trackers explicitly described in the metainfo file. If False or is not present, + the client may obtain peer from other means, e.g. PEX peer exchange, dht. + + """ + + name = property() + """Torrent name (title).""" + + webseeds = property() + """A list of URLs where torrent data can be retrieved. + + See also: Torrent.httpseeds + + http://bittorrent.org/beps/bep_0019.html + """ + + httpseeds = property() + """A list of URLs where torrent data can be retrieved. + + See also and prefer Torrent.webseeds + + http://bittorrent.org/beps/bep_0017.html + """ + + def _list_getter(self, key): + return self._struct.get(key, []) + + def _list_setter(self, key, val): + if val is None: + try: + del self._struct[key] + return + except KeyError: + return + + if not isinstance(val, _ITERABLE_TYPES): + val = [val] + + self._struct[key] = val + + @webseeds.getter + def webseeds(self): + return self._list_getter('url-list') + + @webseeds.setter + def webseeds(self, val): + self._list_setter('url-list', val) + + @httpseeds.getter + def httpseeds(self): + return self._list_getter('httpseeds') + + @httpseeds.setter + def httpseeds(self, val): + self._list_setter('httpseeds', val) + + @property + def files(self): + """Files in torrent. + + List of namedtuples (filepath, size). + + :rtype: list[TorrentFile] + """ + files = [] + info = self._struct.get('info') + + if not info: + return files + + if 'files' in info: + base = info['name'] + + for f in info['files']: + files.append(TorrentFile(join(base, *f['path']), f['length'])) + + else: + files.append(TorrentFile(info['name'], info['length'])) + + return files + + @property + def total_size(self): + """Total size of all files in torrent.""" + return reduce(lambda prev, curr: prev + curr[1], self.files, 0) + + @property + def info_hash(self): + """Hash of torrent file info section. Also known as torrent hash.""" + info = self._struct.get('info') + + if not info: + return None + + return sha1(Bencode.encode(info)).hexdigest() + + @property + def magnet_link(self): + """Magnet link using BTIH (BitTorrent Info Hash) URN.""" + return self.get_magnet(detailed=False) + + @announce_urls.getter + def announce_urls(self): + """List of lists of announce (tracker) URLs. + + First inner list is considered as primary announcers list, + the following lists as back-ups. + + http://bittorrent.org/beps/bep_0012.html + + """ + urls = self._struct.get('announce-list') + + if not urls: + urls = self._struct.get('announce') + if not urls: + return [] + urls = [[urls]] + + return urls + + @announce_urls.setter + def announce_urls(self, val): + self._struct['announce'] = '' + self._struct['announce-list'] = [] + + def set_single(val): + del self._struct['announce-list'] + self._struct['announce'] = val + + if isinstance(val, _ITERABLE_TYPES): + length = len(val) + + if length: + if length == 1: + set_single(val[0]) + else: + for item in val: + if not isinstance(item, _ITERABLE_TYPES): + item = [item] + self._struct['announce-list'].append(item) + self._struct['announce'] = val[0] + + else: + set_single(val) + + @comment.getter + def comment(self): + return self._struct.get('comment') + + @comment.setter + def comment(self, val): + self._struct['comment'] = val + + @creation_date.getter + def creation_date(self): + date = self._struct.get('creation date') + if date is not None: + date = datetime.utcfromtimestamp(int(date)) + return date + + @creation_date.setter + def creation_date(self, val): + self._struct['creation date'] = timegm(val.timetuple()) + + @created_by.getter + def created_by(self): + return self._struct.get('created by') + + @created_by.setter + def created_by(self, val): + self._struct['created by'] = val + + @private.getter + def private(self): + return self._struct.get('info', {}).get('private', False) + + @private.setter + def private(self, val): + if not val: + try: + del self._struct['info']['private'] + except KeyError: + pass + else: + self._struct['info']['private'] = 1 + + @name.getter + def name(self): + return self._struct.get('info', {}).get('name', None) + + @name.setter + def name(self, val): + self._struct['info']['name'] = val + + def get_magnet(self, detailed=True): + """Returns torrent magnet link, consisting of BTIH (BitTorrent Info Hash) URN + anr optional other information. + + :param bool|list|tuple|set detailed: + For boolean - whether additional info (such as trackers) should be included. + For iterable - expected allowed parameter names: + tr - trackers + ws - webseeds + + """ + result = 'magnet:?xt=urn:btih:' + self.info_hash + + def add_tr(): + urls = self.announce_urls + if not urls: + return + + trackers = [] + + urls = urls[0] # Only primary announcers are enough. + for url in urls: + trackers.append(('tr', url)) + + if trackers: + return urlencode(trackers) + + def add_ws(): + webseeds = [('ws', url) for url in self.webseeds] + if webseeds: + return urlencode(webseeds) + + params_map = { + 'tr': add_tr, + 'ws': add_ws, + } + + if detailed: + details = [] + + if isinstance(detailed, _ITERABLE_TYPES): + requested_params = detailed + else: + requested_params = params_map.keys() + + for param in requested_params: + param_val = params_map[param]() + param_val and details.append(param_val) + + if details: + result += '&%s' % '&'.join(details) + + return result + + def to_file(self, filepath=None): + """Writes Torrent object into file, either + + :param filepath: + """ + if filepath is None and self._filepath is None: + raise TorrentError('Unable to save torrent to file: no filepath supplied.') + + if filepath is not None: + self._filepath = filepath + + with open(self._filepath, mode='wb') as f: + f.write(self.to_string()) + + def to_string(self): + """Returns bytes representing torrent file. + + :param str encoding: Encoding used by strings in Torrent object. + :rtype: bytearray + """ + return Bencode.encode(self._struct) + + @classmethod + def _get_target_files_info(cls, src_path): + src_path = u'%s' % src_path # Force walk() to return unicode names. + + is_dir = isdir(src_path) + target_files = [] + + if is_dir: + for base, _, files in walk(src_path): + target_files.extend([join(base, fname) for fname in sorted(files)]) + + else: + target_files.append(src_path) + + target_files_ = [] + total_size = 0 + for fpath in target_files: + file_size = getsize(fpath) + if not file_size: + continue + target_files_.append((fpath, file_size, normpath(fpath.replace(src_path, '')).strip(sep).split(sep))) + total_size += file_size + + return target_files_, total_size + + @classmethod + def create_from(cls, src_path): + """Returns Torrent object created from a file or a directory. + + :param str src_path: + :rtype: Torrent + """ + is_dir = isdir(src_path) + target_files, size_data = cls._get_target_files_info(src_path) + + SIZE_MIN = 32768 # 32 KiB + SIZE_DEFAULT = 262144 # 256 KiB + SIZE_MAX = 1048576 # 1 MiB + + CHUNKS_MIN = 1000 # todo use those limits as advised + CHUNKS_MAX = 2200 + + size_piece = SIZE_MIN + if size_data > SIZE_MIN: + size_piece = SIZE_DEFAULT + + if size_piece > SIZE_MAX: + size_piece = SIZE_MAX + + def read(filepath): + with open(filepath, 'rb') as f: + while True: + chunk = f.read(size_piece - len(pieces_buffer)) + chunk_size = len(chunk) + if chunk_size == 0: + break + yield chunk + + pieces = bytearray() + pieces_buffer = bytearray() + + for fpath, _, _ in target_files: + for chunk in read(fpath): + pieces_buffer += chunk + + if len(pieces_buffer) == size_piece: + pieces += sha1(pieces_buffer).digest()[:20] + pieces_buffer = bytearray() + + if len(pieces_buffer): + pieces += sha1(pieces_buffer).digest()[:20] + pieces_buffer = bytearray() + + info = { + 'name': basename(src_path), + 'pieces': bytes(pieces), + 'piece length': size_piece, + } + + if is_dir: + files = [] + + for _, length, path in target_files: + files.append({'length': length, 'path': path}) + + info['files'] = files + + else: + info['length'] = target_files[0][1] + + torrent = cls({'info': info}) + torrent.created_by = get_app_version() + torrent.creation_date = datetime.utcnow() + + return torrent + + @classmethod + def from_string(cls, string): + """Alternative constructor to get Torrent object from string. + + :param str string: + :rtype: Torrent + """ + return cls(Bencode.read_string(string)) + + @classmethod + def from_file(cls, filepath): + """Alternative constructor to get Torrent object from file. + + :param str filepath: + :rtype: Torrent + """ + torrent = cls(Bencode.read_file(filepath)) + torrent._filepath = filepath + return torrent diff --git a/lib/torrentool/utils.py b/lib/torrentool/utils.py new file mode 100644 index 00000000..7d346cd7 --- /dev/null +++ b/lib/torrentool/utils.py @@ -0,0 +1,91 @@ +import math +from os import path + +from .exceptions import RemoteUploadError, RemoteDownloadError + + +OPEN_TRACKERS_FILENAME = 'open_trackers.ini' +REMOTE_TIMEOUT = 4 + + +def get_app_version(): + """Returns full version string including application name + suitable for putting into Torrent.created_by. + + """ + from torrentool import VERSION + return 'torrentool/%s' % '.'.join(map(str, VERSION)) + + +def humanize_filesize(bytes_size): + """Returns human readable filesize. + + :param int bytes_size: + :rtype: str + """ + if not bytes_size: + return '0 B' + + names = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB') + + name_idx = int(math.floor(math.log(bytes_size, 1024))) + size = round(bytes_size / math.pow(1024, name_idx), 2) + + return '%s %s' % (size, names[name_idx]) + + +def upload_to_cache_server(fpath): + """Uploads .torrent file to a cache server. + + Returns upload file URL. + + :rtype: str + """ + url_base = 'http://torrage.info' + url_upload = '%s/autoupload.php' % url_base + url_download = '%s/torrent.php?h=' % url_base + file_field = 'torrent' + + try: + import requests + + response = requests.post(url_upload, files={file_field: open(fpath, 'rb')}, timeout=REMOTE_TIMEOUT) + response.raise_for_status() + + info_cache = response.text + return url_download + info_cache + + except (ImportError, requests.RequestException) as e: + + # Now trace is lost. `raise from` to consider. + raise RemoteUploadError('Unable to upload to %s: %s' % (url_upload, e)) + + +def get_open_trackers_from_remote(): + """Returns open trackers announce URLs list from remote repo.""" + + url_base = 'https://raw.githubusercontent.com/idlesign/torrentool/master/torrentool/repo' + url = '%s/%s' % (url_base, OPEN_TRACKERS_FILENAME) + + try: + import requests + + response = requests.get(url, timeout=REMOTE_TIMEOUT) + response.raise_for_status() + + open_trackers = response.text.splitlines() + + except (ImportError, requests.RequestException) as e: + + # Now trace is lost. `raise from` to consider. + raise RemoteDownloadError('Unable to download from %s: %s' % (url, e)) + + return open_trackers + + +def get_open_trackers_from_local(): + """Returns open trackers announce URLs list from local backup.""" + with open(path.join(path.dirname(__file__), 'repo', OPEN_TRACKERS_FILENAME)) as f: + open_trackers = map(str.strip, f.readlines()) + + return list(open_trackers) diff --git a/platformcode/config.py b/platformcode/config.py index f8df53a7..e94b0dc8 100644 --- a/platformcode/config.py +++ b/platformcode/config.py @@ -189,65 +189,7 @@ def get_all_settings_addon(): def open_settings(): - settings_pre = get_all_settings_addon() __settings__.openSettings() - settings_post = get_all_settings_addon() - - # cb_validate_config (util para validar cambios realizados en el cuadro de dialogo) - if settings_post.get('adult_aux_intro_password', None): - # Hemos accedido a la seccion de Canales para adultos - from platformcode import platformtools - if 'adult_password' not in settings_pre: - adult_password = set_setting('adult_password', '0000') - else: - adult_password = settings_pre['adult_password'] - - if settings_post['adult_aux_intro_password'] == adult_password: - # La contraseña de acceso es correcta - - # Cambio de contraseña - if settings_post['adult_aux_new_password1']: - if settings_post['adult_aux_new_password1'] == settings_post['adult_aux_new_password2']: - set_setting('adult_password', settings_post['adult_aux_new_password1']) - else: - platformtools.dialog_ok(get_localized_string(60305), - get_localized_string(60306), - get_localized_string(60307)) - - else: - platformtools.dialog_ok(get_localized_string(60305), get_localized_string(60309), - get_localized_string(60310)) - - # Deshacer cambios - set_setting("adult_mode", settings_pre.get("adult_mode", 0)) - set_setting("adult_request_password", settings_pre.get("adult_request_password", True)) - - # Borramos settings auxiliares - set_setting('adult_aux_intro_password', '') - set_setting('adult_aux_new_password1', '') - set_setting('adult_aux_new_password2', '') - - from specials import videolibrary - from platformcode import xbmc_videolibrary - if settings_pre.get('downloadpath', None) != settings_post.get('downloadpath', None): - xbmc_videolibrary.update_sources(settings_post.get('downloadpath', None), settings_pre.get('downloadpath', None)) - - # si se ha cambiado la ruta de la videoteca llamamos a comprobar directorios para que lo cree y pregunte - # automaticamente si configurar la videoteca - if settings_pre.get("videolibrarypath", None) != settings_post.get("videolibrarypath", None) or \ - settings_pre.get("folder_movies", None) != settings_post.get("folder_movies", None) or \ - settings_pre.get("folder_tvshows", None) != settings_post.get("folder_tvshows", None): - videolibrary.move_videolibrary(settings_pre.get("videolibrarypath", None), settings_post.get("videolibrarypath", None), - settings_pre.get("folder_movies", None), settings_post.get("folder_movies", None), - settings_pre.get("folder_tvshows", None), settings_post.get("folder_tvshows", None)) - - # si se ha puesto que se quiere autoconfigurar y se había creado el directorio de la videoteca - if not settings_pre.get("videolibrary_kodi", None) and settings_post.get("videolibrary_kodi", None): - xbmc_videolibrary.ask_set_content(silent=True) - elif settings_pre.get("videolibrary_kodi", None) and not settings_post.get("videolibrary_kodi", None): - strm_list = [] - strm_list.append(get_setting('videolibrarypath')) - xbmc_videolibrary.clean(strm_list) def get_setting(name, channel="", server="", default=None): @@ -310,15 +252,11 @@ def get_setting(name, channel="", server="", default=None): return False else: # special case return as str - if name in ["adult_password", "adult_aux_intro_password", "adult_aux_new_password1", - "adult_aux_new_password2"]: - return value - else: - try: - value = int(value) - except ValueError: - pass - return value + try: + value = int(value) + except ValueError: + pass + return value def set_setting(name, value, channel="", server=""): @@ -397,9 +335,9 @@ def get_localized_category(categ): categories = {'movie': get_localized_string(30122), 'tvshow': get_localized_string(30123), 'anime': get_localized_string(30124), 'documentary': get_localized_string(30125), 'vos': get_localized_string(30136), 'sub-ita': get_localized_string(70566), - 'adult': get_localized_string(30126), 'direct': get_localized_string(30137), - 'torrent': get_localized_string(70015), 'live': get_localized_string(30138), - 'music': get_localized_string(30139) } + 'direct': get_localized_string(30137), 'torrent': get_localized_string(70015), + 'live': get_localized_string(30138), 'music': get_localized_string(30139) + } return categories[categ] if categ in categories else categ diff --git a/platformcode/custom_code.py b/platformcode/custom_code.py index 46ce5009..0881057f 100644 --- a/platformcode/custom_code.py +++ b/platformcode/custom_code.py @@ -70,11 +70,11 @@ def init(): verify_Kodi_video_DB() #LIBTORRENT: se descarga el binario de Libtorrent cada vez que se actualiza Alfa - try: - threading.Thread(target=update_libtorrent).start() # Creamos un Thread independiente, hasta el fin de Kodi - time.sleep(2) # Dejamos terminar la inicialización... - except: # Si hay problemas de threading, nos vamos - logger.error(traceback.format_exc()) + # try: + # threading.Thread(target=update_libtorrent).start() # Creamos un Thread independiente, hasta el fin de Kodi + # time.sleep(2) # Dejamos terminar la inicialización... + # except: # Si hay problemas de threading, nos vamos + # logger.error(traceback.format_exc()) # #QUASAR: Preguntamos si se hacen modificaciones a Quasar # if not filetools.exists(filetools.join(config.get_data_path(), "quasar.json")) \ @@ -245,102 +245,102 @@ def update_external_addon(addon_name): return False -def update_libtorrent(): - logger.info() +# def update_libtorrent(): +# logger.info() - if not config.get_setting("mct_buffer", server="torrent", default=""): - default = config.get_setting("torrent_client", server="torrent", default=0) - config.set_setting("torrent_client", default, server="torrent") - config.set_setting("mct_buffer", "50", server="torrent") - if config.get_setting("mct_download_path", server="torrent", default=config.get_setting("downloadpath")): - config.set_setting("mct_download_path", config.get_setting("downloadpath"), server="torrent") - config.set_setting("mct_background_download", True, server="torrent") - config.set_setting("mct_rar_unpack", True, server="torrent") - config.set_setting("bt_buffer", "50", server="torrent") - if config.get_setting("bt_download_path", server="torrent", default=config.get_setting("downloadpath")): - config.set_setting("bt_download_path", config.get_setting("downloadpath"), server="torrent") - config.set_setting("mct_download_limit", "", server="torrent") - config.set_setting("magnet2torrent", False, server="torrent") +# if not config.get_setting("mct_buffer", server="torrent", default=""): +# default = config.get_setting("torrent_client", server="torrent", default=0) +# config.set_setting("torrent_client", default, server="torrent") +# config.set_setting("mct_buffer", "50", server="torrent") +# if config.get_setting("mct_download_path", server="torrent", default=config.get_setting("downloadpath")): +# config.set_setting("mct_download_path", config.get_setting("downloadpath"), server="torrent") +# config.set_setting("mct_background_download", True, server="torrent") +# config.set_setting("mct_rar_unpack", True, server="torrent") +# config.set_setting("bt_buffer", "50", server="torrent") +# if config.get_setting("bt_download_path", server="torrent", default=config.get_setting("downloadpath")): +# config.set_setting("bt_download_path", config.get_setting("downloadpath"), server="torrent") +# config.set_setting("mct_download_limit", "", server="torrent") +# config.set_setting("magnet2torrent", False, server="torrent") - if not filetools.exists(filetools.join(config.get_runtime_path(), "custom_code.json")) or not \ - config.get_setting("unrar_path", server="torrent", default=""): +# if not filetools.exists(filetools.join(config.get_runtime_path(), "custom_code.json")) or not \ +# config.get_setting("unrar_path", server="torrent", default=""): - path = filetools.join(config.get_runtime_path(), 'lib', 'rarfiles') - creationflags = '' - sufix = '' - unrar = '' - for device in filetools.listdir(path): - if xbmc.getCondVisibility("system.platform.android") and 'android' not in device: continue - if xbmc.getCondVisibility("system.platform.windows") and 'windows' not in device: continue - if not xbmc.getCondVisibility("system.platform.windows") and not xbmc.getCondVisibility("system.platform.android") \ - and ('android' in device or 'windows' in device): continue - if 'windows' in device: - creationflags = 0x08000000 - sufix = '.exe' - else: - creationflags = '' - sufix = '' - unrar = filetools.join(path, device, 'unrar%s') % sufix - if not filetools.exists(unrar): unrar = '' - if unrar: - if not xbmc.getCondVisibility("system.platform.windows"): - try: - if xbmc.getCondVisibility("system.platform.android"): - # Para Android copiamos el binario a la partición del sistema - unrar_org = unrar - unrar = filetools.join(xbmc.translatePath('special://xbmc/'), 'files').replace('/cache/apk/assets', '') - if not filetools.exists(unrar): - filetools.mkdir(unrar) - unrar = filetools.join(unrar, 'unrar') - filetools.copy(unrar_org, unrar, silent=True) +# path = filetools.join(config.get_runtime_path(), 'lib', 'rarfiles') +# creationflags = '' +# sufix = '' +# unrar = '' +# for device in filetools.listdir(path): +# if xbmc.getCondVisibility("system.platform.android") and 'android' not in device: continue +# if xbmc.getCondVisibility("system.platform.windows") and 'windows' not in device: continue +# if not xbmc.getCondVisibility("system.platform.windows") and not xbmc.getCondVisibility("system.platform.android") \ +# and ('android' in device or 'windows' in device): continue +# if 'windows' in device: +# creationflags = 0x08000000 +# sufix = '.exe' +# else: +# creationflags = '' +# sufix = '' +# unrar = filetools.join(path, device, 'unrar%s') % sufix +# if not filetools.exists(unrar): unrar = '' +# if unrar: +# if not xbmc.getCondVisibility("system.platform.windows"): +# try: +# if xbmc.getCondVisibility("system.platform.android"): +# # Para Android copiamos el binario a la partición del sistema +# unrar_org = unrar +# unrar = filetools.join(xbmc.translatePath('special://xbmc/'), 'files').replace('/cache/apk/assets', '') +# if not filetools.exists(unrar): +# filetools.mkdir(unrar) +# unrar = filetools.join(unrar, 'unrar') +# filetools.copy(unrar_org, unrar, silent=True) - command = ['chmod', '777', '%s' % unrar] - p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output_cmd, error_cmd = p.communicate() - command = ['ls', '-l', unrar] - p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output_cmd, error_cmd = p.communicate() - xbmc.log('######## UnRAR file: %s' % str(output_cmd), xbmc.LOGNOTICE) - except: - xbmc.log('######## UnRAR ERROR in path: %s' % str(unrar), xbmc.LOGNOTICE) - logger.error(traceback.format_exc(1)) +# command = ['chmod', '777', '%s' % unrar] +# p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) +# output_cmd, error_cmd = p.communicate() +# command = ['ls', '-l', unrar] +# p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) +# output_cmd, error_cmd = p.communicate() +# xbmc.log('######## UnRAR file: %s' % str(output_cmd), xbmc.LOGNOTICE) +# except: +# xbmc.log('######## UnRAR ERROR in path: %s' % str(unrar), xbmc.LOGNOTICE) +# logger.error(traceback.format_exc(1)) - try: - if xbmc.getCondVisibility("system.platform.windows"): - p = subprocess.Popen(unrar, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=creationflags) - else: - p = subprocess.Popen(unrar, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output_cmd, error_cmd = p.communicate() - if p.returncode != 0 or error_cmd: - xbmc.log('######## UnRAR returncode in module %s: %s, %s in %s' % \ - (device, str(p.returncode), str(error_cmd), unrar), xbmc.LOGNOTICE) - unrar = '' - else: - xbmc.log('######## UnRAR OK in %s: %s' % (device, unrar), xbmc.LOGNOTICE) - break - except: - xbmc.log('######## UnRAR ERROR in module %s: %s' % (device, unrar), xbmc.LOGNOTICE) - logger.error(traceback.format_exc(1)) - unrar = '' +# try: +# if xbmc.getCondVisibility("system.platform.windows"): +# p = subprocess.Popen(unrar, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=creationflags) +# else: +# p = subprocess.Popen(unrar, stdout=subprocess.PIPE, stderr=subprocess.PIPE) +# output_cmd, error_cmd = p.communicate() +# if p.returncode != 0 or error_cmd: +# xbmc.log('######## UnRAR returncode in module %s: %s, %s in %s' % \ +# (device, str(p.returncode), str(error_cmd), unrar), xbmc.LOGNOTICE) +# unrar = '' +# else: +# xbmc.log('######## UnRAR OK in %s: %s' % (device, unrar), xbmc.LOGNOTICE) +# break +# except: +# xbmc.log('######## UnRAR ERROR in module %s: %s' % (device, unrar), xbmc.LOGNOTICE) +# logger.error(traceback.format_exc(1)) +# unrar = '' - if unrar: config.set_setting("unrar_path", unrar, server="torrent") +# if unrar: config.set_setting("unrar_path", unrar, server="torrent") - if filetools.exists(filetools.join(config.get_runtime_path(), "custom_code.json")) and \ - config.get_setting("libtorrent_path", server="torrent", default="") : - return +# if filetools.exists(filetools.join(config.get_runtime_path(), "custom_code.json")) and \ +# config.get_setting("libtorrent_path", server="torrent", default="") : +# return - try: - from lib.python_libtorrent.python_libtorrent import get_libtorrent - except Exception as e: - logger.error(traceback.format_exc(1)) - if not PY3: - e = unicode(str(e), "utf8", errors="replace").encode("utf8") - config.set_setting("libtorrent_path", "", server="torrent") - if not config.get_setting("libtorrent_error", server="torrent", default=''): - config.set_setting("libtorrent_error", str(e), server="torrent") +# try: +# from lib.python_libtorrent.python_libtorrent import get_libtorrent +# except Exception as e: +# logger.error(traceback.format_exc(1)) +# if not PY3: +# e = unicode(str(e), "utf8", errors="replace").encode("utf8") +# config.set_setting("libtorrent_path", "", server="torrent") +# if not config.get_setting("libtorrent_error", server="torrent", default=''): +# config.set_setting("libtorrent_error", str(e), server="torrent") - return +# return def verify_Kodi_video_DB(): diff --git a/platformcode/envtal.py b/platformcode/envtal.py index f413d281..c68342e8 100644 --- a/platformcode/envtal.py +++ b/platformcode/envtal.py @@ -4,7 +4,6 @@ # ------------------------------------------------------------ from __future__ import division -# from builtins import str from past.utils import old_div import sys @@ -31,8 +30,8 @@ from platformcode import logger, config, platformtools def get_environment(): """ - Devuelve las variables de entorno del OS, de Kodi y de Alfa más habituales, - necesarias para el diagnóstico de fallos + Returns the most common OS, Kodi and Alpha environment variables, +    necessary for fault diagnosis """ try: @@ -56,9 +55,9 @@ def get_environment(): try: for label_a in subprocess.check_output('getprop').split('\n'): if 'build.version.release' in label_a: - environment['os_release'] = str(scrapertools.find_single_match(label_a, ':\s*\[(.*?)\]$')) + environment['os_release'] = str(scrapertools.find_single_match(label_a, r':\s*\[(.*?)\]$')) if 'product.model' in label_a: - environment['prod_model'] = str(scrapertools.find_single_match(label_a, ':\s*\[(.*?)\]$')) + environment['prod_model'] = str(scrapertools.find_single_match(label_a, r':\s*\[(.*?)\]$')) except: try: for label_a in filetools.read(os.environ['ANDROID_ROOT'] + '/build.prop').split(): @@ -196,87 +195,87 @@ def get_environment(): except: environment['videolab_free'] = '?' - environment['torrent_list'] = [] - environment['torrentcli_option'] = '' - environment['torrent_error'] = '' - environment['torrentcli_rar'] = config.get_setting("mct_rar_unpack", server="torrent", default=True) - environment['torrentcli_backgr'] = config.get_setting("mct_background_download", server="torrent", default=True) - environment['torrentcli_lib_path'] = config.get_setting("libtorrent_path", server="torrent", default="") - if environment['torrentcli_lib_path']: - lib_path = 'Activo' - else: - lib_path = 'Inactivo' - environment['torrentcli_unrar'] = config.get_setting("unrar_path", server="torrent", default="") - if environment['torrentcli_unrar']: - if xbmc.getCondVisibility("system.platform.Android"): - unrar = 'Android' - else: - unrar, bin = filetools.split(environment['torrentcli_unrar']) - unrar = unrar.replace('\\', '/') - if not unrar.endswith('/'): - unrar = unrar + '/' - unrar = scrapertools.find_single_match(unrar, '\/([^\/]+)\/$').capitalize() - else: - unrar = 'Inactivo' - torrent_id = config.get_setting("torrent_client", server="torrent", default=0) - environment['torrentcli_option'] = str(torrent_id) - torrent_options = platformtools.torrent_client_installed() - if lib_path == 'Activo': - torrent_options = ['MCT'] + torrent_options - torrent_options = ['BT'] + torrent_options - environment['torrent_list'].append({'Torrent_opt': str(torrent_id), 'Libtorrent': lib_path, \ - 'RAR_Auto': str(environment['torrentcli_rar']), \ - 'RAR_backgr': str(environment['torrentcli_backgr']), \ - 'UnRAR': unrar}) - environment['torrent_error'] = config.get_setting("libtorrent_error", server="torrent", default="") - if environment['torrent_error']: - environment['torrent_list'].append({'Libtorrent_error': environment['torrent_error']}) + # environment['torrent_list'] = [] + # environment['torrentcli_option'] = '' + # environment['torrent_error'] = '' + # environment['torrentcli_rar'] = config.get_setting("mct_rar_unpack", server="torrent", default=True) + # environment['torrentcli_backgr'] = config.get_setting("mct_background_download", server="torrent", default=True) + # environment['torrentcli_lib_path'] = config.get_setting("libtorrent_path", server="torrent", default="") + # if environment['torrentcli_lib_path']: + # lib_path = 'Activo' + # else: + # lib_path = 'Inactivo' + # environment['torrentcli_unrar'] = config.get_setting("unrar_path", server="torrent", default="") + # if environment['torrentcli_unrar']: + # if xbmc.getCondVisibility("system.platform.Android"): + # unrar = 'Android' + # else: + # unrar, bin = filetools.split(environment['torrentcli_unrar']) + # unrar = unrar.replace('\\', '/') + # if not unrar.endswith('/'): + # unrar = unrar + '/' + # unrar = scrapertools.find_single_match(unrar, '\/([^\/]+)\/$').capitalize() + # else: + # unrar = 'Inactivo' + # torrent_id = config.get_setting("torrent_client", server="torrent", default=0) + # environment['torrentcli_option'] = str(torrent_id) + # torrent_options = platformtools.torrent_client_installed() + # if lib_path == 'Activo': + # torrent_options = ['MCT'] + torrent_options + # torrent_options = ['BT'] + torrent_options + # environment['torrent_list'].append({'Torrent_opt': str(torrent_id), 'Libtorrent': lib_path, \ + # 'RAR_Auto': str(environment['torrentcli_rar']), \ + # 'RAR_backgr': str(environment['torrentcli_backgr']), \ + # 'UnRAR': unrar}) + # environment['torrent_error'] = config.get_setting("libtorrent_error", server="torrent", default="") + # if environment['torrent_error']: + # environment['torrent_list'].append({'Libtorrent_error': environment['torrent_error']}) - for torrent_option in torrent_options: - cliente = dict() - cliente['D_load_Path'] = '' - cliente['Libre'] = '?' - cliente['Plug_in'] = torrent_option.replace('Plugin externo: ', '') - if cliente['Plug_in'] == 'BT': - cliente['D_load_Path'] = str(config.get_setting("bt_download_path", server="torrent", default='')) - if not cliente['D_load_Path']: continue - cliente['Buffer'] = str(config.get_setting("bt_buffer", server="torrent", default=50)) - elif cliente['Plug_in'] == 'MCT': - cliente['D_load_Path'] = str(config.get_setting("mct_download_path", server="torrent", default='')) - if not cliente['D_load_Path']: continue - cliente['Buffer'] = str(config.get_setting("mct_buffer", server="torrent", default=50)) - elif xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % cliente['Plug_in']): - __settings__ = xbmcaddon.Addon(id="plugin.video.%s" % cliente['Plug_in']) - cliente['Plug_in'] = cliente['Plug_in'].capitalize() - if cliente['Plug_in'] == 'Torrenter': - cliente['D_load_Path'] = str(xbmc.translatePath(__settings__.getSetting('storage'))) - if not cliente['D_load_Path']: - cliente['D_load_Path'] = str(filetools.join(xbmc.translatePath("special://home/"), \ - "cache", "xbmcup", "plugin.video.torrenter", - "Torrenter")) - cliente['Buffer'] = str(__settings__.getSetting('pre_buffer_bytes')) - else: - cliente['D_load_Path'] = str(xbmc.translatePath(__settings__.getSetting('download_path'))) - cliente['Buffer'] = str(__settings__.getSetting('buffer_size')) - if __settings__.getSetting('download_storage') == '1' and __settings__.getSetting('memory_size'): - cliente['Memoria'] = str(__settings__.getSetting('memory_size')) + # for torrent_option in torrent_options: + # cliente = dict() + # cliente['D_load_Path'] = '' + # cliente['Libre'] = '?' + # cliente['Plug_in'] = torrent_option.replace('Plugin externo: ', '') + # if cliente['Plug_in'] == 'BT': + # cliente['D_load_Path'] = str(config.get_setting("bt_download_path", server="torrent", default='')) + # if not cliente['D_load_Path']: continue + # cliente['Buffer'] = str(config.get_setting("bt_buffer", server="torrent", default=50)) + # elif cliente['Plug_in'] == 'MCT': + # cliente['D_load_Path'] = str(config.get_setting("mct_download_path", server="torrent", default='')) + # if not cliente['D_load_Path']: continue + # cliente['Buffer'] = str(config.get_setting("mct_buffer", server="torrent", default=50)) + # elif xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % cliente['Plug_in']): + # __settings__ = xbmcaddon.Addon(id="plugin.video.%s" % cliente['Plug_in']) + # cliente['Plug_in'] = cliente['Plug_in'].capitalize() + # if cliente['Plug_in'] == 'Torrenter': + # cliente['D_load_Path'] = str(xbmc.translatePath(__settings__.getSetting('storage'))) + # if not cliente['D_load_Path']: + # cliente['D_load_Path'] = str(filetools.join(xbmc.translatePath("special://home/"), \ + # "cache", "xbmcup", "plugin.video.torrenter", + # "Torrenter")) + # cliente['Buffer'] = str(__settings__.getSetting('pre_buffer_bytes')) + # else: + # cliente['D_load_Path'] = str(xbmc.translatePath(__settings__.getSetting('download_path'))) + # cliente['Buffer'] = str(__settings__.getSetting('buffer_size')) + # if __settings__.getSetting('download_storage') == '1' and __settings__.getSetting('memory_size'): + # cliente['Memoria'] = str(__settings__.getSetting('memory_size')) - if cliente['D_load_Path']: - try: - if environment['os_name'].lower() == 'windows': - free_bytes = ctypes.c_ulonglong(0) - ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(cliente['D_load_Path']), - None, None, ctypes.pointer(free_bytes)) - cliente['Libre'] = str(round(float(free_bytes.value) / \ - (1024 ** 3), 3)).replace('.', ',') - else: - disk_space = os.statvfs(cliente['D_load_Path']) - if not disk_space.f_frsize: disk_space.f_frsize = disk_space.f_frsize.f_bsize - cliente['Libre'] = str(round((float(disk_space.f_bavail) / \ - (1024 ** 3)) * float(disk_space.f_frsize), 3)).replace('.', ',') - except: - pass - environment['torrent_list'].append(cliente) + # if cliente['D_load_Path']: + # try: + # if environment['os_name'].lower() == 'windows': + # free_bytes = ctypes.c_ulonglong(0) + # ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(cliente['D_load_Path']), + # None, None, ctypes.pointer(free_bytes)) + # cliente['Libre'] = str(round(float(free_bytes.value) / \ + # (1024 ** 3), 3)).replace('.', ',') + # else: + # disk_space = os.statvfs(cliente['D_load_Path']) + # if not disk_space.f_frsize: disk_space.f_frsize = disk_space.f_frsize.f_bsize + # cliente['Libre'] = str(round((float(disk_space.f_bavail) / \ + # (1024 ** 3)) * float(disk_space.f_frsize), 3)).replace('.', ',') + # except: + # pass + # environment['torrent_list'].append(cliente) environment['proxy_active'] = '' try: @@ -359,16 +358,16 @@ def get_environment(): def list_env(environment={}): + sep = '-----------------------------------------------------------' if not environment: environment = get_environment() if environment['debug'] == 'False': logger.log_enable(True) - logger.info('----------------------------------------------') - logger.info('Variables de entorno Alfa: ' + environment['addon_version'] + - ' Debug: ' + environment['debug']) - logger.info("----------------------------------------------") + logger.info(sep) + logger.info('KoD environment variables: ' + environment['addon_version'] + ' Debug: ' + environment['debug']) + logger.info(sep) logger.info(environment['os_name'] + ' ' + environment['prod_model'] + ' ' + environment['os_release'] + ' ' + environment['machine'] + ' ' + @@ -381,45 +380,45 @@ def list_env(environment={}): logger.info('CPU: ' + environment['cpu_usage']) if environment['mem_total'] or environment['mem_free']: - logger.info('Memoria: Total: ' + environment['mem_total'] + ' MB / Disp.: ' + - environment['mem_free'] + ' MB / Buffers: ' + - str(int(environment['kodi_buffer']) * 3) + ' MB / Buffermode: ' + - environment['kodi_bmode'] + ' / Readfactor: ' + + logger.info('Memory: Total: ' + environment['mem_total'] + ' MB | Disp.: ' + + environment['mem_free'] + ' MB | Buffers: ' + + str(int(environment['kodi_buffer']) * 3) + ' MB | Buffermode: ' + + environment['kodi_bmode'] + ' | Readfactor: ' + environment['kodi_rfactor']) - logger.info('Userdata: ' + environment['userdata_path'] + ' - Libre: ' + + logger.info('Userdata: ' + environment['userdata_path'] + ' - Free: ' + environment['userdata_free'].replace('.', ',') + ' GB') - logger.info('Videoteca: Series/Epis: ' + environment['videolab_series'] + '/' + + logger.info('Videolibrary: Series/Episodes: ' + environment['videolab_series'] + '/' + environment['videolab_episodios'] + ' - Pelis: ' + environment['videolab_pelis'] + ' - Upd: ' + environment['videolab_update'] + ' - Path: ' + - environment['videolab_path'] + ' - Libre: ' + + environment['videolab_path'] + ' - Free: ' + environment['videolab_free'].replace('.', ',') + ' GB') - if environment['torrent_list']: - for x, cliente in enumerate(environment['torrent_list']): - if x == 0: - cliente_alt = cliente.copy() - del cliente_alt['Torrent_opt'] - logger.info('Torrent: Opt: %s, %s' % (str(cliente['Torrent_opt']), \ - str(cliente_alt).replace('{', '').replace('}', '') \ - .replace("'", '').replace('_', ' '))) - elif x == 1 and environment['torrent_error']: - logger.info('- ' + str(cliente).replace('{', '').replace('}', '') \ - .replace("'", '').replace('_', ' ')) - else: - cliente_alt = cliente.copy() - del cliente_alt['Plug_in'] - cliente_alt['Libre'] = cliente_alt['Libre'].replace('.', ',') + ' GB' - logger.info('- %s: %s' % (str(cliente['Plug_in']), str(cliente_alt) \ - .replace('{', '').replace('}', '').replace("'", '') \ - .replace('\\\\', '\\'))) + # if environment['torrent_list']: + # for x, cliente in enumerate(environment['torrent_list']): + # if x == 0: + # cliente_alt = cliente.copy() + # del cliente_alt['Torrent_opt'] + # logger.info('Torrent: Opt: %s, %s' % (str(cliente['Torrent_opt']), \ + # str(cliente_alt).replace('{', '').replace('}', '') \ + # .replace("'", '').replace('_', ' '))) + # elif x == 1 and environment['torrent_error']: + # logger.info('- ' + str(cliente).replace('{', '').replace('}', '') \ + # .replace("'", '').replace('_', ' ')) + # else: + # cliente_alt = cliente.copy() + # del cliente_alt['Plug_in'] + # cliente_alt['Libre'] = cliente_alt['Libre'].replace('.', ',') + ' GB' + # logger.info('- %s: %s' % (str(cliente['Plug_in']), str(cliente_alt) \ + # .replace('{', '').replace('}', '').replace("'", '') \ + # .replace('\\\\', '\\'))) - logger.info('Proxy: ' + environment['proxy_active']) + # logger.info('Proxy: ' + environment['proxy_active']) - logger.info('TAMAÑO del LOG: ' + environment['log_size'].replace('.', ',') + ' MB') - logger.info("----------------------------------------------") + logger.info('LOG Size: ' + environment['log_size'].replace('.', ',') + ' MB') + logger.info(sep) if environment['debug'] == 'False': logger.log_enable(False) diff --git a/platformcode/keymaptools.py b/platformcode/keymaptools.py index 3f2ae5eb..bc3be98e 100644 --- a/platformcode/keymaptools.py +++ b/platformcode/keymaptools.py @@ -1,18 +1,14 @@ # -*- coding: utf-8 -*- from builtins import map -#from builtins import str -import sys +import sys, xbmc, xbmcaddon, xbmcgui, base64, json PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int from threading import Timer -import xbmc -import xbmcaddon -import xbmcgui - from channelselector import get_thumb from platformcode import config, logger +import channelselector class KeyListener(xbmcgui.WindowXMLDialog): @@ -66,23 +62,11 @@ def set_key(): from platformcode import platformtools import xbmc file_xml = "special://profile/keymaps/kod.xml" - data = '<keymap><global><keyboard><key id="%s">' % new_key + 'runplugin(plugin://' \ - 'plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIiwNCiAgICAib3BlbiI6IHRydWUNCn0=)</key></keyboard></global></keymap>' + data = '<keymap><global><keyboard><key id="%s">' % new_key + 'runplugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIiwNCiAgICAib3BlbiI6IHRydWUNCn0=)</key></keyboard></global></keymap>' filetools.write(xbmc.translatePath(file_xml), data) platformtools.dialog_notification(config.get_localized_string(70700),config.get_localized_string(70702)) config.set_setting("shortcut_key", new_key) - # file_idioma = filetools.join(config.get_runtime_path(), 'resources', 'language', 'Spanish', 'strings.xml') - # data = filetools.read(file_idioma) - # value_xml = scrapertools.find_single_match(data, '<string id="31100">([^<]+)<') - # if "tecla" in value_xml: - # data = data.replace(value_xml, 'Cambiar tecla/botón para abrir la ventana (Guardada: %s)' % new_key) - # elif "key" in value_xml: - # data = data.replace(value_xml, 'Change key/button to open the window (Saved: %s)' % new_key) - # else: - # data = data.replace(value_xml, - # 'Cambiamento di chiave/pulsante per aprire la finestra (Salvato: %s)' % new_key) - # filetools.write(file_idioma, data) return @@ -98,16 +82,6 @@ def delete_key(): config.set_setting("shortcut_key", '') -MAIN_MENU = { - "news": {"label": config.get_localized_string(30130), "icon": get_thumb("news.png"), "order": 0}, - "channels": {"label": config.get_localized_string(30118), "icon": get_thumb("channels.png"), "order": 1}, - "search": {"label": config.get_localized_string(70082), "icon": get_thumb("search.png"), "order": 2}, - "favorites": {"label": config.get_localized_string(30102), "icon": get_thumb("favorites.png"), "order": 3}, - "videolibrary": {"label": config.get_localized_string(30131), "icon": get_thumb("videolibrary.png"), "order": 4}, - "downloads": {"label": config.get_localized_string(60332), "icon": get_thumb("downloads.png"), "order": 5}, - "settings": {"label": config.get_localized_string(60333), "icon": get_thumb("setting_0.png"), "order": 6} -} - class Main(xbmcgui.WindowXMLDialog): def __init__(self, *args, **kwargs): @@ -118,61 +92,71 @@ class Main(xbmcgui.WindowXMLDialog): if config.get_platform(True)['num_version'] < 18: self.setCoordinateResolution(2) - for menuentry in list(MAIN_MENU.keys()): - item = xbmcgui.ListItem(MAIN_MENU[menuentry]["label"]) - item.setProperty("thumb", str(MAIN_MENU[menuentry]["icon"])) - item.setProperty("identifier", str(menuentry)) - item.setProperty("order", str(MAIN_MENU[menuentry]["order"])) - self.items.append(item) + for menuentry in menu: + if not menuentry.channel: menuentry.channel = prevchannel + item = xbmcgui.ListItem(menuentry.title) + if not submenu and menuentry.channel in ['news', 'channelselector', 'search', 'videolibrary']: + item.setProperty('sub', 'Controls/spinUp-Focus.png') + if menuentry.title != 'Redirect': + for key, value in json.loads(menuentry.tojson()).items(): + item.setProperty(key, str(value)) + item.setProperty('run', menuentry.tojson()) + self.items.append(item) - self.items.sort(key=lambda it: it.getProperty("order")) self.getControl(32500).addItems(self.items) self.setFocusId(32500) def onClick(self, control_id): if control_id == 32500: - identifier = self.getControl(32500).getSelectedItem().getProperty("identifier") - if identifier == "news": - xbmc.executebuiltin('Dialog.Close(all,true)') - xbmc.executebuiltin( - 'ActivateWindow(10025, "plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJuZXdzIg0KfQ==")') - elif identifier == "channels": - xbmc.executebuiltin('Dialog.Close(all,true)') - xbmc.executebuiltin( - 'ActivateWindow(10025, "plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAiZ2V0Y2hhbm5lbHR5cGVzIiwgDQogICAgImNoYW5uZWwiOiAiY2hhbm5lbHNlbGVjdG9yIg0KfQ==")') - elif identifier == "search": - xbmc.executebuiltin('Dialog.Close(all,true)') - xbmc.executebuiltin( - 'ActivateWindow(10025, "plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJzZWFyY2giDQp9")') - elif identifier == "favorites": - xbmc.executebuiltin('Dialog.Close(all,true)') - xbmc.executebuiltin( - 'ActivateWindow(10025, "plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJmYXZvcml0ZXMiDQp9")') - elif identifier == "videolibrary": - xbmc.executebuiltin('Dialog.Close(all,true)') - xbmc.executebuiltin( - 'ActivateWindow(10025, "plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJ2aWRlb2xpYnJhcnkiDQp9")') - elif identifier == "downloads": - xbmc.executebuiltin('Dialog.Close(all,true)') - xbmc.executebuiltin( - 'ActivateWindow(10025, "plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJkb3dubG9hZHMiDQp9")') - elif identifier == "settings": - xbmc.executebuiltin('Dialog.Close(all,true)') - xbmc.executebuiltin( - 'ActivateWindow(10025, "plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAibWFpbmxpc3QiLCANCiAgICAiY2hhbm5lbCI6ICJzZXR0aW5nIg0KfQ==")') + action = self.getControl(32500).getSelectedItem().getProperty('run') + self.close() + xbmc.executebuiltin('ActivateWindow(10025, "plugin://plugin.video.kod/?' + base64.b64encode(action) + '")') + def onAction(self, action): # exit if action.getId() in [xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK]: - # main.close() - xbmc.executebuiltin('Dialog.Close(all,true)') + self.close() + if submenu: open_shortcut_menu() if action.getId() == xbmcgui.ACTION_CONTEXT_MENU: config.open_settings() + focus = self.getFocusId() -def open_shortcut_menu(): + if action == 3: + if focus == 61: + self.setFocusId(32500) + elif submenu: + self.close() + open_shortcut_menu() + elif self.getControl(32500).getSelectedItem().getProperty('channel') in ['news', 'channelselector', 'search', 'videolibrary']: + channel_name = self.getControl(32500).getSelectedItem().getProperty('channel') + if channel_name == 'channelselector': + import channelselector + self.close() + open_shortcut_menu(channelselector.getchanneltypes(), channel_name) + else: + from core.item import Item + channel = __import__('specials.%s' % channel_name, fromlist=["specials.%s" % channel_name]) + self.close() + open_shortcut_menu(channel.mainlist(Item()), channel_name) + + + +def open_shortcut_menu(newmenu='', channel=''): + xbmc.executebuiltin('Dialog.Close(all,true)') + global menu + global submenu + global prevchannel + prevchannel = channel + if newmenu: + menu = newmenu + submenu = True + else: + menu = channelselector.getmainlist() + submenu = False XML = 'ShortCutMenu.xml' if config.get_setting('icon_set') == 'dark': XML = 'Dark' + XML diff --git a/platformcode/launcher.py b/platformcode/launcher.py index d9ab9f8b..c6a80bb6 100644 --- a/platformcode/launcher.py +++ b/platformcode/launcher.py @@ -10,13 +10,12 @@ import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int -if PY3: - import urllib.error as urllib2 # Es muy lento en PY2. En PY3 es nativo -else: - import urllib2 # Usamos el nativo de PY2 que es más rápido +# if PY3: +# import urllib.error as urllib2 # Es muy lento en PY2. En PY3 es nativo +# else: +# import urllib2 # Usamos el nativo de PY2 que es más rápido import os -import sys from core.item import Item from platformcode import config, logger @@ -163,17 +162,6 @@ def run(item=None): config.get_localized_string(70740) % short) # Action in certain channel specified in "action" and "channel" parameters else: - # Entry point for a channel is the "mainlist" action, so here we check parental control - if item.action == "mainlist": - from core import channeltools - #updater.checkforupdates() beta version checking for update, still disabled - - # Parental control - # If it is an adult channel, and user has configured pin, asks for it - if channeltools.is_adult(item.channel) and config.get_setting("adult_request_password"): - tecleado = platformtools.dialog_input("", config.get_localized_string(60334), True) - if tecleado is None or tecleado != config.get_setting("adult_password"): - return # # Actualiza el canal individual # if (item.action == "mainlist" and item.channel != "channelselector" and # config.get_setting("check_for_channel_updates") == True): @@ -183,16 +171,10 @@ def run(item=None): # Checks if channel exists if os.path.isfile(os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py")): CHANNELS = 'channels' - elif os.path.isfile(os.path.join(config.get_runtime_path(), 'channels', 'porn', item.channel + ".py")): - CHANNELS = 'channels.porn' else: CHANNELS = 'specials' - if CHANNELS != 'channels.porn': - channel_file = os.path.join(config.get_runtime_path(), CHANNELS, item.channel + ".py") - else: - channel_file = os.path.join(config.get_runtime_path(), 'channels', 'porn', - item.channel + ".py") + channel_file = os.path.join(config.get_runtime_path(), CHANNELS, item.channel + ".py") logger.info("channel_file= " + channel_file + ' - ' + CHANNELS + ' - ' + item.channel) @@ -329,21 +311,21 @@ def run(item=None): platformtools.render_items(itemlist, item) - except urllib2.URLError as e: - import traceback - logger.error(traceback.format_exc()) - - # Grab inner and third party errors - if hasattr(e, 'reason'): - logger.error("Reason for the error, code: %s | Reason: %s" % (str(e.reason[0]), str(e.reason[1]))) - texto = config.get_localized_string(30050) # "No se puede conectar con el sitio web" - platformtools.dialog_ok(config.get_localized_string(20000), texto) - - # Grab server response errors - elif hasattr(e, 'code'): - logger.error("HTTP error code: %d" % e.code) - # "El sitio web no funciona correctamente (error http %d)" - platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(30051) % e.code) + # except urllib2.URLError as e: + # import traceback + # logger.error(traceback.format_exc()) + # + # # Grab inner and third party errors + # if hasattr(e, 'reason'): + # logger.error("Reason for the error, code: %s | Reason: %s" % (str(e.reason[0]), str(e.reason[1]))) + # texto = config.get_localized_string(30050) # "No se puede conectar con el sitio web" + # platformtools.dialog_ok(config.get_localized_string(20000), texto) + # + # # Grab server response errors + # elif hasattr(e, 'code'): + # logger.error("HTTP error code: %d" % e.code) + # # "El sitio web no funciona correctamente (error http %d)" + # platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(30051) % e.code) except WebErrorException as e: import traceback from core import scrapertools diff --git a/platformcode/platformtools.py b/platformcode/platformtools.py index 0a187aa0..d9f85ae9 100644 --- a/platformcode/platformtools.py +++ b/platformcode/platformtools.py @@ -2,43 +2,31 @@ # ------------------------------------------------------------ # platformtools # ------------------------------------------------------------ -# Herramientas responsables de adaptar los diferentes -# cuadros de dialogo a una plataforma en concreto, -# en este caso Kodi. +# Tools responsible for adapting the different dialog boxes to a specific platform. # version 2.0 # ------------------------------------------------------------ from __future__ import division from __future__ import absolute_import from past.utils import old_div -#from builtins import str import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int if PY3: - #from future import standard_library - #standard_library.install_aliases() - import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo + import urllib.parse as urllib else: - import urllib # Usamos el nativo de PY2 que es más rápido + import urllib -import os - -import xbmc -import xbmcgui -import xbmcplugin -import xbmcaddon +import os, xbmc, xbmcgui, xbmcplugin from channelselector import get_thumb from core import channeltools from core import trakt_tools, scrapertools from core.item import Item -from platformcode import logger -from platformcode import config -from platformcode import unify +from platformcode import logger, config, unify -addon = xbmcaddon.Addon('plugin.video.kod') +addon = config.__settings__ addon_icon = os.path.join( addon.getAddonInfo( "path" ), "logo.png" ) class XBMCPlayer(xbmc.Player): @@ -86,8 +74,8 @@ def dialog_yesno(heading, line1, line2="", line3="", nolabel="No", yeslabel="Si" return dialog.yesno(heading, makeMessage(line1, line2, line3), nolabel=nolabel, yeslabel=yeslabel) -def dialog_select(heading, _list): - return xbmcgui.Dialog().select(heading, _list) +def dialog_select(heading, _list, preselect=0): + return xbmcgui.Dialog().select(heading, _list, preselect=preselect) def dialog_multiselect(heading, _list, autoclose=0, preselect=[], useDetails=False): @@ -124,7 +112,7 @@ def dialog_numeric(_type, heading, default=""): return d -def dialog_textviewer(heading, text): # disponible a partir de kodi 16 +def dialog_textviewer(heading, text): # available from kodi 16 return xbmcgui.Dialog().textviewer(heading, text) @@ -135,8 +123,16 @@ def dialog_browse(_type, heading, default=""): def itemlist_refresh(): + pos = Item().fromurl(xbmc.getInfoLabel('ListItem.FileNameAndPath')).itemlistPosition + logger.info('Current position: ' + str(pos)) xbmc.executebuiltin("Container.Refresh") + while Item().fromurl(xbmc.getInfoLabel('ListItem.FileNameAndPath')).itemlistPosition != pos: + win = xbmcgui.Window(xbmcgui.getCurrentWindowId()) + cid = win.getFocusId() + ctl = win.getControl(cid) + ctl.selectItem(pos) + def itemlist_update(item, replace=False): if replace: # reset the path history @@ -172,14 +168,15 @@ def render_items(itemlist, parent_item): itemlist.append(Item(title=config.get_localized_string(60347), thumbnail=get_thumb('nofolder.png'))) dirItems = [] - for item in itemlist: + for n, item in enumerate(itemlist): + item.itemlistPosition = n + 1 item_url = item.tourl() if item.category == "": item.category = parent_item.category if not item.title: item.title = '' - # Si no hay action o es findvideos/play, folder=False porque no se va a devolver ningún listado + # If there is no action or it is findvideos / play, folder = False because no listing will be returned if item.action in ['play', '']: item.folder = False if item.fanart == "": @@ -187,11 +184,15 @@ def render_items(itemlist, parent_item): if item.action == 'play' and thumb_type == 1 and not item.forcethumb: item.thumbnail = "https://github.com/kodiondemand/media/raw/master/resources/servers/" + item.server.lower() + '.png' - # if cloudflare, cookies are needed to display images taken from site + # if cloudflare and cloudscraper is used, cookies are needed to display images taken from site # before checking domain (time consuming), checking if tmdb failed (so, images scraped from website are used) - if item.action in ['findvideos'] and not item.infoLabels['tmdb_id'] and item.channel in httptools.channelsCF: - item.thumbnail = httptools.get_url_headers(item.thumbnail) - item.fanart = httptools.get_url_headers(item.fanart) + if item.action in ['findvideos'] and not item.infoLabels['tmdb_id']: + # faster but ugly way of checking + for d in httptools.FORCE_CLOUDSCRAPER_LIST: + if d + '/' in item.url: + item.thumbnail = httptools.get_url_headers(item.thumbnail) + item.fanart = httptools.get_url_headers(item.fanart) + break icon_image = "DefaultFolder.png" if item.folder else "DefaultVideo.png" listitem = xbmcgui.ListItem(item.title) @@ -205,8 +206,7 @@ def render_items(itemlist, parent_item): # context menu if parent_item.channel != 'special': - context_commands = def_context_commands + set_context_commands(item, item_url, parent_item, has_extendedinfo=has_extendedinfo, - superfavourites=superfavourites) + context_commands = def_context_commands + set_context_commands(item, item_url, parent_item, has_extendedinfo=has_extendedinfo, superfavourites=superfavourites) else: context_commands = def_context_commands listitem.addContextMenuItems(context_commands) @@ -233,316 +233,326 @@ def render_items(itemlist, parent_item): logger.info('END render_items') -def set_view_mode(item, parent_item): - def mode(content, Type): - mode = int(config.get_setting('view_mode_%s' % content).split(',')[-1]) - if mode == 0: - logger.info('default mode') - mode = 55 - elif not Type: - Type = 'addons' - xbmcplugin.setContent(handle=int(sys.argv[1]), content=Type) - xbmc.executebuiltin('Container.SetViewMode(%s)' % mode) - logger.info('TYPE: ' + Type + ' - ' + 'CONTENT: ' + content) +def getCurrentView(item=None, parent_item=None): + if not parent_item: + info = xbmc.getInfoLabel('Container.FolderPath') + if not info: + return None, None + parent_item = Item().fromurl(info) + if not item: + info = xbmc.getInfoLabel('Container.ListItem(1).FileNameAndPath') + if not info: + return None, None + item = Item().fromurl(info) if info else Item() + parent_actions = ['peliculas', 'novedades', 'search', 'get_from_temp', 'newest', 'discover_list', 'new_search', 'channel_search'] + if parent_item.action == 'findvideos' or (parent_item.action in ['channel_search', 'new_search'] and parent_item.infoLabels['tmdb_id']): + return 'server', 'addons' if config.get_setting('touch_view') else '' + + elif parent_item.action == 'mainlist': + return 'channel', 'addons' if config.get_setting('touch_view') else '' + + elif (item.contentType in ['movie'] and parent_item.action in parent_actions) \ + or (item.channel in ['videolibrary'] and parent_item.action in ['list_movies']) \ + or (parent_item.channel in ['favorites'] and parent_item.action in ['mainlist']) \ + or parent_item.action in ['now_on_tv', 'now_on_misc', 'now_on_misc_film', 'mostrar_perfil']: + return 'movie', 'movies' + + elif (item.contentType in ['tvshow'] and parent_item.action in parent_actions) \ + or (item.channel in ['videolibrary'] and parent_item.action in ['list_tvshows']): + return 'tvshow', 'tvshows' + + elif parent_item.action in ['get_seasons']: + return 'season', 'tvshows' + + elif parent_item.action in ['episodios', 'get_episodes'] or item.contentType == 'episode': + return 'episode', 'tvshows' + + else: + return 'addon', 'addons' if config.get_setting('touch_view') else '' + + +def set_view_mode(item, parent_item): def reset_view_mode(): for mode in ['addon','channel','movie','tvshow','season','episode','server']: config.set_setting('skin_name', xbmc.getSkinDir()) config.set_setting('view_mode_%s' % mode, config.get_localized_string(70003) + ' , 0') - parent_actions = ['peliculas', 'novedades', 'search', 'get_from_temp', 'channel_search', 'newest', 'discover_list', 'new_search'] - if xbmc.getSkinDir() != config.get_setting('skin_name') or not config.get_setting('skin_name'): reset_view_mode() xbmcplugin.setContent(handle=int(sys.argv[1]), content='') xbmc.executebuiltin('Container.SetViewMode(%s)' % 55) - elif parent_item.action == 'findvideos': - mode('server', '') - - elif parent_item.action == 'mainlist': - mode('channel', '') - - elif (item.contentType in ['movie'] and parent_item.action in parent_actions ) \ - or (item.channel in ['videolibrary'] and parent_item.action in ['list_movies']) \ - or (parent_item.channel in ['favorites'] and parent_item.action in ['mainlist']) \ - or parent_item.action in ['now_on_tv', 'now_on_misc', 'now_on_misc_film', 'mostrar_perfil']: - mode('movie', 'movies') - - elif (item.contentType in ['tvshow'] and parent_item.action in parent_actions ) \ - or (item.channel in ['videolibrary'] and parent_item.action in ['list_tvshows']): - mode('tvshow', 'tvshows') - - elif parent_item.action in ['get_seasons']: - mode('season', 'tvshows') - - elif parent_item.action in ['episodios', 'get_episodes'] or item.contentType == 'episode': - mode('episode', 'tvshows') - - else: - mode('addon', '') + content, Type = getCurrentView(item, parent_item) + if content: + mode = int(config.get_setting('view_mode_%s' % content).split(',')[-1]) + if mode == 0: + logger.info('default mode') + mode = 55 + xbmcplugin.setContent(handle=int(sys.argv[1]), content=Type) + xbmc.executebuiltin('Container.SetViewMode(%s)' % mode) + logger.info('TYPE: ' + Type + ' - ' + 'CONTENT: ' + content) -def render_items_old(itemlist, parent_item): - """ - Función encargada de mostrar el itemlist en kodi, se pasa como parametros el itemlist y el item del que procede - @type itemlist: list - @param itemlist: lista de elementos a mostrar +# def render_items_old(itemlist, parent_item): +# """ +# Function responsible for displaying the itemlist in kodi, the itemlist and the item it comes from are passed as parameters +# @type itemlist: list +# @param itemlist: list of elements to show - @type parent_item: item - @param parent_item: elemento padre - """ - logger.info('INICIO render_items') - from core import httptools +# @type parent_item: item +# @param parent_item: parent element +# """ +# logger.info('START render_items') +# from core import httptools - # Si el itemlist no es un list salimos - if not isinstance(itemlist, list): - return +# # If the itemlist is not a list we leave +# if not isinstance(itemlist, list): +# return - if parent_item.start: - menu_icon = get_thumb('menu.png') - menu = Item(channel="channelselector", action="getmainlist", viewmode="movie", thumbnail=menu_icon, - title='Menu') - itemlist.insert(0, menu) +# if parent_item.start: +# menu_icon = get_thumb('menu.png') +# menu = Item(channel="channelselector", action="getmainlist", viewmode="movie", thumbnail=menu_icon, title='Menu') +# itemlist.insert(0, menu) - # Si no hay ningun item, mostramos un aviso - if not len(itemlist): - itemlist.append(Item(title=config.get_localized_string(60347), thumbnail=get_thumb('nofolder.png'))) +# # If there is no item, we show a notice +# if not len(itemlist): +# itemlist.append(Item(title=config.get_localized_string(60347), thumbnail=get_thumb('nofolder.png'))) - genre = False - if 'nero' in parent_item.title: - genre = True - anime = False - if 'anime' in channeltools.get_channel_parameters(parent_item.channel)['categories']: - anime = True - # try: - # force_unify = channeltools.get_channel_parameters(parent_item.channel)['force_unify'] - # except: - force_unify = False +# genre = False +# if 'nero' in parent_item.title: +# genre = True +# anime = False +# if 'anime' in channeltools.get_channel_parameters(parent_item.channel)['categories']: +# anime = True +# # try: +# # force_unify = channeltools.get_channel_parameters(parent_item.channel)['force_unify'] +# # except: +# force_unify = False - unify_enabled = False +# unify_enabled = False - has_extendedinfo = xbmc.getCondVisibility('System.HasAddon(script.extendedinfo)') +# has_extendedinfo = xbmc.getCondVisibility('System.HasAddon(script.extendedinfo)') - # Añadir SuperFavourites al menu contextual (1.0.53 o superior necesario) - sf_file_path = xbmc.translatePath("special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py") - check_sf = os.path.exists(sf_file_path) - superfavourites = check_sf and xbmc.getCondVisibility('System.HasAddon("plugin.program.super.favourites")') - # try: - # if channeltools.get_channel_parameters(parent_item.channel)['adult']: - # unify_enabled = False - # except: - # pass - # logger.debug('unify_enabled: %s' % unify_enabled) +# # Add SuperFavourites to context menu (1.0.53 or higher required) +# sf_file_path = xbmc.translatePath("special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py") +# check_sf = os.path.exists(sf_file_path) +# superfavourites = check_sf and xbmc.getCondVisibility('System.HasAddon("plugin.program.super.favourites")') +# # try: +# # if channeltools.get_channel_parameters(parent_item.channel)['adult']: +# # unify_enabled = False +# # except: +# # pass +# # logger.debug('unify_enabled: %s' % unify_enabled) - # Recorremos el itemlist - for item in itemlist: - # logger.debug(item) - # Si el item no contiene categoria, le ponemos la del item padre - item_url = item.tourl() - if item.category == "": - item.category = parent_item.category +# # We go through the itemlist +# for item in itemlist: +# # logger.debug(item) +# # If the item does not contain a category, we will add the parent item +# item_url = item.tourl() +# if item.category == "": +# item.category = parent_item.category - # Si title no existe, lo iniciamos como str, para evitar errones "NoType" - if not item.title: - item.title = '' +# # If title does not exist, we start it as str, to avoid "NoType" mistakes +# if not item.title: +# item.title = '' - # Si no hay action o es findvideos/play, folder=False porque no se va a devolver ningún listado - if item.action in ['play', '']: - item.folder = False +# # If there is no action or it is findvideos / play, folder = False because no listing will be returned +# if item.action in ['play', '']: +# item.folder = False - # Si el item no contiene fanart, le ponemos el del item padre - if item.fanart == "": - item.fanart = parent_item.fanart +# # If the item does not contain fanart, we put the one of the parent item +# if item.fanart == "": +# item.fanart = parent_item.fanart - if genre: - valid_genre = True - thumb = get_thumb(item.title, auto=True) - if thumb != '': - item.thumbnail = thumb - valid_genre = True - elif anime: - valid_genre = True - elif (('siguiente' in item.title.lower() and '>' in item.title) or ('pagina:' in item.title.lower())): - item.thumbnail = get_thumb("next.png") - elif 'add' in item.action: - if 'pelicula' in item.action: - item.thumbnail = get_thumb("add_to_videolibrary.png") - elif 'serie' in item.action: - item.thumbnail = get_thumb("add_to_videolibrary.png") +# if genre: +# valid_genre = True +# thumb = get_thumb(item.title, auto=True) +# if thumb != '': +# item.thumbnail = thumb +# valid_genre = True +# elif anime: +# valid_genre = True +# elif (('siguiente' in item.title.lower() and '>' in item.title) or ('pagina:' in item.title.lower())): +# item.thumbnail = get_thumb("next.png") +# elif 'add' in item.action: +# if 'pelicula' in item.action: +# item.thumbnail = get_thumb("add_to_videolibrary.png") +# elif 'serie' in item.action: +# item.thumbnail = get_thumb("add_to_videolibrary.png") - if (unify_enabled or force_unify) and parent_item.channel not in ['kodfavourites']: - # Formatear titulo con unify - item = unify.title_format(item) - else: - # Formatear titulo metodo old school - if item.text_color: - item.title = '[COLOR %s]%s[/COLOR]' % (item.text_color, item.title) - if item.text_bold: - item.title = '[B]%s[/B]' % item.title - if item.text_italic: - item.title = '[I]%s[/I]' % item.title +# if (unify_enabled or force_unify) and parent_item.channel not in ['kodfavourites']: +# # Format title with unify +# item = unify.title_format(item) +# else: +# # Format title method old school +# if item.text_color: +# item.title = '[COLOR %s]%s[/COLOR]' % (item.text_color, item.title) +# if item.text_bold: +# item.title = '[B]%s[/B]' % item.title +# if item.text_italic: +# item.title = '[I]%s[/I]' % item.title - # Añade headers a las imagenes si estan en un servidor con cloudflare - if item.action == 'play': - item.thumbnail = unify.thumbnail_type(item) - else: - item.thumbnail = httptools.get_url_headers(item.thumbnail) - item.fanart = httptools.get_url_headers(item.fanart) +# # Add headers to images if they are on a server with cloudflare +# if item.action == 'play': +# item.thumbnail = unify.thumbnail_type(item) +# else: +# item.thumbnail = httptools.get_url_headers(item.thumbnail) +# item.fanart = httptools.get_url_headers(item.fanart) - # IconImage para folder y video - if item.folder: - icon_image = "DefaultFolder.png" - else: - icon_image = "DefaultVideo.png" +# # Icon Image for folder and video +# if item.folder: +# icon_image = "DefaultFolder.png" +# else: +# icon_image = "DefaultVideo.png" - # Ponemos el fanart - if item.fanart: - fanart = item.fanart - else: - fanart = config.get_fanart() +# # fanart +# if item.fanart: +# fanart = item.fanart +# else: +# fanart = config.get_fanart() - # Creamos el listitem - listitem = xbmcgui.ListItem(item.title) +# # Create listitem +# listitem = xbmcgui.ListItem(item.title) - # values icon, thumb or poster are skin dependent.. so we set all to avoid problems - # if not exists thumb it's used icon value - if config.get_platform(True)['num_version'] >= 16.0: - listitem.setArt({'icon': icon_image, 'thumb': item.thumbnail, 'poster': item.thumbnail, - 'fanart': fanart}) - else: - listitem.setIconImage(icon_image) - listitem.setThumbnailImage(item.thumbnail) - listitem.setProperty('fanart_image', fanart) +# # values icon, thumb or poster are skin dependent.. so we set all to avoid problems +# # if not exists thumb it's used icon value +# if config.get_platform(True)['num_version'] >= 16.0: +# listitem.setArt({'icon': icon_image, 'thumb': item.thumbnail, 'poster': item.thumbnail, +# 'fanart': fanart}) +# else: +# listitem.setIconImage(icon_image) +# listitem.setThumbnailImage(item.thumbnail) +# listitem.setProperty('fanart_image', fanart) - # No need it, use fanart instead - # xbmcplugin.setPluginFanart(int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg")) +# # No need it, use fanart instead +# # xbmcplugin.setPluginFanart(int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg")) - # Esta opcion es para poder utilizar el xbmcplugin.setResolvedUrl() - # if item.isPlayable == True or (config.get_setting("player_mode") == 1 and item.action == "play"): - if config.get_setting("player_mode") == 1 and item.action == "play": - listitem.setProperty('IsPlayable', 'true') +# # This option is to be able to use the xbmcplugin.setResolvedUrl() +# # if item.isPlayable == True or (config.get_setting("player_mode") == 1 and item.action == "play"): +# if config.get_setting("player_mode") == 1 and item.action == "play": +# listitem.setProperty('IsPlayable', 'true') - # Añadimos los infoLabels - set_infolabels(listitem, item) +# # Add infoLabels +# set_infolabels(listitem, item) - # No arrastrar plot si no es una peli/serie/temporada/episodio - if item.plot and item.contentType not in ['movie', 'tvshow', 'season', 'episode']: - item.__dict__['infoLabels'].pop('plot') +# # Do not drag plot if it is not a movie / series / season / episode +# if item.plot and item.contentType not in ['movie', 'tvshow', 'season', 'episode']: +# item.__dict__['infoLabels'].pop('plot') - # Montamos el menu contextual - if parent_item.channel != 'special': - context_commands = set_context_commands(item, item_url, parent_item, has_extendedinfo=has_extendedinfo, superfavourites=superfavourites) - else: - context_commands = [] - # Añadimos el menu contextual - if config.get_platform(True)['num_version'] >= 17.0 and parent_item.list_type == '': - listitem.addContextMenuItems(context_commands) - elif parent_item.list_type == '': - listitem.addContextMenuItems(context_commands, replaceItems=True) +# # Mount context menu +# if parent_item.channel != 'special': +# context_commands = set_context_commands(item, item_url, parent_item, has_extendedinfo=has_extendedinfo, superfavourites=superfavourites) +# else: +# context_commands = [] +# # Add context menu +# if config.get_platform(True)['num_version'] >= 17.0 and parent_item.list_type == '': +# listitem.addContextMenuItems(context_commands) +# elif parent_item.list_type == '': +# listitem.addContextMenuItems(context_commands, replaceItems=True) - from specials import shortcuts - context_commands += shortcuts.context() +# from specials import shortcuts +# context_commands += shortcuts.context() - if not item.totalItems: - item.totalItems = 0 - xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url='%s?%s' % (sys.argv[0], item_url), - listitem=listitem, isFolder=item.folder, - totalItems=item.totalItems) +# if not item.totalItems: +# item.totalItems = 0 +# xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url='%s?%s' % (sys.argv[0], item_url), +# listitem=listitem, isFolder=item.folder, +# totalItems=item.totalItems) - # Fijar los tipos de vistas... - if config.get_setting("forceview"): # ...forzamos segun el viewcontent - xbmcplugin.setContent(int(sys.argv[1]), parent_item.viewcontent) +# # Set types of views ... +# if config.get_setting("forceview"): # ...force according to the viewcontent +# xbmcplugin.setContent(int(sys.argv[1]), parent_item.viewcontent) - elif parent_item.channel not in ["channelselector", "", "kodfavourites"]: # ... o segun el canal - xbmcplugin.setContent(int(sys.argv[1]), "movies") +# elif parent_item.channel not in ["channelselector", "", "kodfavourites"]: # ... or according to the channel +# xbmcplugin.setContent(int(sys.argv[1]), "movies") - elif parent_item.channel == "kodfavourites" and parent_item.action == 'mostrar_perfil': - xbmcplugin.setContent(int(sys.argv[1]), "movies") +# elif parent_item.channel == "kodfavourites" and parent_item.action == 'mostrar_perfil': +# xbmcplugin.setContent(int(sys.argv[1]), "movies") - # Fijamos el "breadcrumb" - if parent_item.list_type == '': - breadcrumb = parent_item.category.capitalize() - else: - if 'similar' in parent_item.list_type: - if parent_item.contentTitle != '': - breadcrumb = config.get_localized_string(70693) + parent_item.contentTitle - else: - breadcrumb = config.get_localized_string(70693) + parent_item.contentSerieName - else: - breadcrumb = config.get_localized_string(70693) +# # set "breadcrumb" +# if parent_item.list_type == '': +# breadcrumb = parent_item.category.capitalize() +# else: +# if 'similar' in parent_item.list_type: +# if parent_item.contentTitle != '': +# breadcrumb = config.get_localized_string(70693) + parent_item.contentTitle +# else: +# breadcrumb = config.get_localized_string(70693) + parent_item.contentSerieName +# else: +# breadcrumb = config.get_localized_string(70693) - xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=breadcrumb) +# xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=breadcrumb) - # No ordenar items - xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_NONE) +# # Do not sort items +# xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_NONE) - # Cerramos el directorio - xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True) +# # We close the directory +# xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True) - # Fijar la vista - # if config.get_setting("forceview"): - # viewmode_id = get_viewmode_id(parent_item) - # xbmc.executebuiltin("Container.SetViewMode(%s)" % viewmode_id) - # if parent_item.mode in ['silent', 'get_cached', 'set_cache', 'finish']: - # xbmc.executebuiltin("Container.SetViewMode(500)") +# # Fix the view +# # if config.get_setting("forceview"): +# # viewmode_id = get_viewmode_id(parent_item) +# # xbmc.executebuiltin("Container.SetViewMode(%s)" % viewmode_id) +# # if parent_item.mode in ['silent', 'get_cached', 'set_cache', 'finish']: +# # xbmc.executebuiltin("Container.SetViewMode(500)") - logger.info('END render_items') +# logger.info('END render_items') -def get_viewmode_id(parent_item): - # viewmode_json habria q guardarlo en un archivo y crear un metodo para q el user fije sus preferencias en: - # user_files, user_movies, user_tvshows, user_season y user_episodes. - viewmode_json = {'skin.confluence': {'default_files': 50, - 'default_movies': 515, - 'default_tvshows': 508, - 'default_seasons': 503, - 'default_episodes': 504, - 'view_list': 50, - 'view_thumbnails': 500, - 'view_movie_with_plot': 503}, - 'skin.estuary': {'default_files': 50, - 'default_movies': 54, - 'default_tvshows': 502, - 'default_seasons': 500, - 'default_episodes': 53, - 'view_list': 50, - 'view_thumbnails': 500, - 'view_movie_with_plot': 54}} +# def get_viewmode_id(parent_item): +# # viewmode_json would have to save it in a file and create a method for the user to set their preferences in: +# # user_files, user_movies, user_tvshows, user_season and user_episodes. +# viewmode_json = {'skin.confluence': {'default_files': 50, +# 'default_movies': 515, +# 'default_tvshows': 508, +# 'default_seasons': 503, +# 'default_episodes': 504, +# 'view_list': 50, +# 'view_thumbnails': 500, +# 'view_movie_with_plot': 503}, +# 'skin.estuary': {'default_files': 50, +# 'default_movies': 54, +# 'default_tvshows': 502, +# 'default_seasons': 500, +# 'default_episodes': 53, +# 'view_list': 50, +# 'view_thumbnails': 500, +# 'view_movie_with_plot': 54}} - # Si el parent_item tenia fijado un viewmode usamos esa vista... - if parent_item.viewmode == 'movie': - # Remplazamos el antiguo viewmode 'movie' por 'thumbnails' - parent_item.viewmode = 'thumbnails' +# # If the parent_item had a viewmode set we use that view ... +# if parent_item.viewmode == 'movie': +# # We replace the old viewmode 'movie' with 'thumbnails' +# parent_item.viewmode = 'thumbnails' - if parent_item.viewmode in ["list", "movie_with_plot", "thumbnails"]: - view_name = "view_" + parent_item.viewmode +# if parent_item.viewmode in ["list", "movie_with_plot", "thumbnails"]: +# view_name = "view_" + parent_item.viewmode - '''elif isinstance(parent_item.viewmode, int): - # only for debug - viewName = parent_item.viewmode''' +# '''elif isinstance(parent_item.viewmode, int): +# # only for debug +# viewName = parent_item.viewmode''' - # ...sino ponemos la vista por defecto en funcion del viewcontent - else: - view_name = "default_" + parent_item.viewcontent +# # ...otherwise we put the default view according to the viewcontent +# else: +# view_name = "default_" + parent_item.viewcontent - skin_name = xbmc.getSkinDir() - if skin_name not in viewmode_json: - skin_name = 'skin.confluence' - view_skin = viewmode_json[skin_name] - return view_skin.get(view_name, 50) +# skin_name = xbmc.getSkinDir() +# if skin_name not in viewmode_json: +# skin_name = 'skin.confluence' +# view_skin = viewmode_json[skin_name] +# return view_skin.get(view_name, 50) def set_infolabels(listitem, item, player=False): """ - Metodo para pasar la informacion al listitem (ver tmdb.set_InfoLabels() ) - item.infoLabels es un dicionario con los pares de clave/valor descritos en: - http://mirrors.xbmc.org/docs/python-docs/14.x-helix/xbmcgui.html#ListItem-setInfo - https://kodi.wiki/view/InfoLabels - @param listitem: objeto xbmcgui.ListItem - @type listitem: xbmcgui.ListItem - @param item: objeto Item que representa a una pelicula, serie o capitulo - @type item: item + Method to pass the information to the listitem (see tmdb.set_InfoLabels()) +    item.infoLabels is a dictionary with the key / value pairs described in: +    http://mirrors.xbmc.org/docs/python-docs/14.x-helix/xbmcgui.html#ListItem-setInfo +    https://kodi.wiki/view/InfoLabels +    @param listitem: xbmcgui.ListItem object +    @type listitem: xbmcgui.ListItem +    @param item: Item object that represents a movie, series or chapter +    @type item: item """ infoLabels_dict = {'aired': 'aired', 'album': 'album', 'artist': 'artist', 'cast': 'cast', @@ -602,38 +612,37 @@ def set_infolabels(listitem, item, player=False): def set_context_commands(item, item_url, parent_item, **kwargs): """ - Función para generar los menus contextuales. - 1. Partiendo de los datos de item.context - a. Metodo antiguo item.context tipo str separando las opciones por "|" (ejemplo: item.context = "1|2|3") - (solo predefinidos) - b. Metodo list: item.context es un list con las diferentes opciones del menu: - - Predefinidos: Se cargara una opcion predefinida con un nombre. - item.context = ["1","2","3"] + Function to generate context menus. +        1. Based on the data in item.context +            a. Old method item.context type str separating options by "|" (example: item.context = "1 | 2 | 3") +                (only predefined) +            b. List method: item.context is a list with the different menu options: +                - Predefined: A predefined option will be loaded with a name. +                    item.context = ["1", "2", "3"] - - dict(): Se cargara el item actual modificando los campos que se incluyan en el dict() en caso de - modificar los campos channel y action estos serán guardados en from_channel y from_action. - item.context = [{"title":"Nombre del menu", "action": "action del menu", - "channel":"channel del menu"}, {...}] +                - dict (): The current item will be loaded modifying the fields included in the dict () in case of +                    modify the channel and action fields these will be saved in from_channel and from_action. +                    item.context = [{"title": "Name of the menu", "action": "action of the menu", "channel": "menu channel"}, {...}] - 2. Añadiendo opciones segun criterios - Se pueden añadir opciones al menu contextual a items que cumplan ciertas condiciones. +        2. Adding options according to criteria +            Options can be added to the context menu to items that meet certain conditions. - 3. Añadiendo opciones a todos los items - Se pueden añadir opciones al menu contextual para todos los items +        3. Adding options to all items +            Options can be added to the context menu for all items - 4. Se pueden deshabilitar las opciones del menu contextual añadiendo un comando 'no_context' al item.context. - Las opciones que Kodi, el skin u otro añadido añada al menu contextual no se pueden deshabilitar. +        4. You can disable the context menu options by adding a command 'no_context' to the item.context. +            The options that Kodi, the skin or another added add to the contextual menu cannot be disabled. - @param item: elemento que contiene los menu contextuales - @type item: item - @param parent_item: - @type parent_item: item +    @param item: element that contains the contextual menus +    @type item: item +    @param parent_item: +    @type parent_item: item """ context_commands = [] # num_version_xbmc = config.get_platform(True)['num_version'] - # Creamos un list con las diferentes opciones incluidas en item.context + # Create a list with the different options included in item.context if isinstance(item.context, str): context = item.context.split("|") elif isinstance(item.context, list): @@ -641,43 +650,41 @@ def set_context_commands(item, item_url, parent_item, **kwargs): else: context = [] - # Opciones segun item.context + # Options according to item.context for command in context: - # Predefinidos + # Predefined if isinstance(command, str): if command == "no_context": return [] - # Formato dict + # Dict format if isinstance(command, dict): - # Los parametros del dict, se sobreescriben al nuevo context_item en caso de sobreescribir "action" y - # "channel", los datos originales se guardan en "from_action" y "from_channel" + # The dict parameters are overwritten to the new context_item in case of overwriting "action" and + # "channel", the original data is saved in "from_action" and "from_channel" if "action" in command: command["from_action"] = item.action if "channel" in command: command["from_channel"] = item.channel - # Si no se está dentro de Alfavoritos y hay los contextos de alfavoritos, descartarlos. - # (pasa al ir a un enlace de alfavoritos, si este se clona en el canal) + # If you are not inside Alphavorites and there are the contexts for Alphavorites, discard them. + # (it happens when going to a link of alfavoritos, if this is cloned in the channel) if parent_item.channel != 'kodfavorites' and 'i_perfil' in command and 'i_enlace' in command: continue if "goto" in command: - context_commands.append((command["title"], "XBMC.Container.Refresh (%s?%s)" % - (sys.argv[0], item.clone(**command).tourl()))) + context_commands.append((command["title"], "XBMC.Container.Refresh (%s?%s)" % (sys.argv[0], item.clone(**command).tourl()))) else: - context_commands.append( - (command["title"], "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(**command).tourl()))) - # No añadir más opciones predefinidas si se está dentro de Alfavoritos + context_commands.append((command["title"], "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(**command).tourl()))) + # Do not add more predefined options if you are inside kodfavoritos if parent_item.channel == 'kodfavorites': return context_commands - # Opciones segun criterios, solo si el item no es un tag (etiqueta), ni es "Añadir a la videoteca", etc... + # Options according to criteria, only if the item is not a tag, nor is it "Add to the video library", etc... if item.action and item.action not in ["add_pelicula_to_library", "add_serie_to_library", "buscartrailer", "actualizar_titulos"]: - # Mostrar informacion: si el item tiene plot suponemos q es una serie, temporada, capitulo o pelicula + # Show information: if the item has a plot, we assume that it is a series, season, chapter or movie # if item.infoLabels['plot'] and (num_version_xbmc < 17.0 or item.contentType == 'season'): # context_commands.append((config.get_localized_string(60348), "XBMC.Action(Info)")) - # ExtendedInfo: Si está instalado el addon y se cumplen una serie de condiciones + # ExtendedInfo: If the addon is installed and a series of conditions are met if kwargs.get('has_extendedinfo') \ and config.get_setting("extended_info") == True: if item.contentType == "episode" and item.contentEpisodeNumber and item.contentSeason \ @@ -685,31 +692,22 @@ def set_context_commands(item, item_url, parent_item, **kwargs): param = "tvshow_id =%s, tvshow=%s, season=%s, episode=%s" \ % (item.infoLabels['tmdb_id'], item.contentSerieName, item.contentSeason, item.contentEpisodeNumber) - context_commands.append(("ExtendedInfo", - "XBMC.RunScript(script.extendedinfo,info=extendedepisodeinfo,%s)" % param)) + context_commands.append(("ExtendedInfo", "XBMC.RunScript(script.extendedinfo,info=extendedepisodeinfo,%s)" % param)) elif item.contentType == "season" and item.contentSeason \ and (item.infoLabels['tmdb_id'] or item.contentSerieName): param = "tvshow_id =%s,tvshow=%s, season=%s" \ % (item.infoLabels['tmdb_id'], item.contentSerieName, item.contentSeason) - context_commands.append(("ExtendedInfo", - "XBMC.RunScript(script.extendedinfo,info=seasoninfo,%s)" % param)) + context_commands.append(("ExtendedInfo", "XBMC.RunScript(script.extendedinfo,info=seasoninfo,%s)" % param)) - elif item.contentType == "tvshow" and (item.infoLabels['tmdb_id'] or item.infoLabels['tvdb_id'] or - item.infoLabels['imdb_id'] or item.contentSerieName): - param = "id =%s,tvdb_id=%s,imdb_id=%s,name=%s" \ - % (item.infoLabels['tmdb_id'], item.infoLabels['tvdb_id'], item.infoLabels['imdb_id'], - item.contentSerieName) - context_commands.append(("ExtendedInfo", - "XBMC.RunScript(script.extendedinfo,info=extendedtvinfo,%s)" % param)) + elif item.contentType == "tvshow" and (item.infoLabels['tmdb_id'] or item.infoLabels['tvdb_id'] or item.infoLabels['imdb_id'] or item.contentSerieName): + param = "id =%s,tvdb_id=%s,imdb_id=%s,name=%s" % (item.infoLabels['tmdb_id'], item.infoLabels['tvdb_id'], item.infoLabels['imdb_id'], item.contentSerieName) + context_commands.append(("ExtendedInfo", "XBMC.RunScript(script.extendedinfo,info=extendedtvinfo,%s)" % param)) - elif item.contentType == "movie" and (item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or - item.contentTitle): - param = "id =%s,imdb_id=%s,name=%s" \ - % (item.infoLabels['tmdb_id'], item.infoLabels['imdb_id'], item.contentTitle) + elif item.contentType == "movie" and (item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or item.contentTitle): + param = "id =%s,imdb_id=%s,name=%s" % (item.infoLabels['tmdb_id'], item.infoLabels['imdb_id'], item.contentTitle) - context_commands.append(("ExtendedInfo", - "XBMC.RunScript(script.extendedinfo,info=extendedinfo,%s)" % param)) + context_commands.append(("ExtendedInfo", "XBMC.RunScript(script.extendedinfo,info=extendedinfo,%s)" % param)) # InfoPlus if config.get_setting("infoplus"): #if item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or item.infoLabels['tvdb_id'] or \ @@ -718,33 +716,25 @@ def set_context_commands(item, item_url, parent_item, **kwargs): context_commands.append(("InfoPlus", "XBMC.RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'channel=infoplus&action=start&from_channel=' + item.channel))) - # Ir al Menu Principal (channel.mainlist) - if parent_item.channel not in ["news", "channelselector", "downloads"] and item.action != "mainlist" \ - and parent_item.action != "mainlist": - context_commands.insert(0, (config.get_localized_string(60349), "XBMC.Container.Refresh (%s?%s)" % - (sys.argv[0], Item(channel=item.channel, action="mainlist").tourl()))) - context_commands.insert(1, (config.get_localized_string(70739), - "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="open_browser", - url=item.url).tourl()))) + # Go to the Main Menu (channel.mainlist) + if parent_item.channel not in ["news", "channelselector", "downloads"] and item.action != "mainlist" and parent_item.action != "mainlist": + context_commands.insert(0, (config.get_localized_string(60349), "XBMC.Container.Refresh (%s?%s)" % (sys.argv[0], Item(channel=item.channel, action="mainlist").tourl()))) + context_commands.insert(1, (config.get_localized_string(70739), "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="open_browser", url=item.url).tourl()))) - # Añadir a Favoritos + # Add to Favorites # if num_version_xbmc < 17.0 and \ # ((item.channel not in ["favorites", "videolibrary", "help", ""] # or item.action in ["update_videolibrary"]) and parent_item.channel != "favorites"): # context_commands.append((config.get_localized_string(30155), "XBMC.RunPlugin(%s?%s&%s)" % # (sys.argv[0], item_url, 'channel=favorites&action=addFavourite&from_channel=' + item.channel + '&from_action=' + item.action))) - # Añadir a Alfavoritos (Mis enlaces) + # Add to kodfavoritos (My links) if item.channel not in ["favorites", "videolibrary", "help", ""] and parent_item.channel != "favorites": - context_commands.append( - (config.get_localized_string(70557), "XBMC.RunPlugin(%s?%s&%s)" % - (sys.argv[0], item_url, urllib.urlencode({'channel': "kodfavourites", 'action': "addFavourite", - 'from_channel': item.channel, - 'from_action': item.action})))) - # Buscar en otros canales + context_commands.append( (config.get_localized_string(70557), "XBMC.RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, urllib.urlencode({'channel': "kodfavorites", 'action': "addFavourite", 'from_channel': item.channel, 'from_action': item.action})))) + # Search in other channels if item.contentType in ['movie', 'tvshow'] and item.channel != 'search' and item.action not in ['play'] and parent_item.action != 'mainlist': - # Buscar en otros canales + # Search in other channels if item.contentSerieName != '': item.wanted = item.contentSerieName else: @@ -755,88 +745,63 @@ def set_context_commands(item, item_url, parent_item, **kwargs): else: mediatype = item.contentType - context_commands.append((config.get_localized_string(60350), - "XBMC.Container.Update (%s?%s&%s)" % (sys.argv[0], - item_url, urllib.urlencode({'channel': 'search', - 'action': "from_context", - 'from_channel': item.channel, - 'contextual': True, - 'text': item.wanted})))) + context_commands.append((config.get_localized_string(60350), "XBMC.Container.Update (%s?%s&%s)" % (sys.argv[0], item_url, urllib.urlencode({'channel': 'search', 'action': "from_context", 'from_channel': item.channel, 'contextual': True, 'text': item.wanted})))) - context_commands.append( - (config.get_localized_string(70561), "XBMC.Container.Update (%s?%s&%s)" % ( - sys.argv[0], item_url, 'channel=search&action=from_context&search_type=list&page=1&list_type=%s/%s/similar' % (mediatype, item.infoLabels['tmdb_id'])))) - # Definir como Pagina de inicio + context_commands.append( (config.get_localized_string(70561), "XBMC.Container.Update (%s?%s&%s)" % (sys.argv[0], item_url, 'channel=search&action=from_context&search_type=list&page=1&list_type=%s/%s/similar' % (mediatype, item.infoLabels['tmdb_id'])))) + # Set as Home Page if config.get_setting('start_page'): if item.action not in ['episodios', 'seasons', 'findvideos', 'play']: - context_commands.insert(0, (config.get_localized_string(60351), - "XBMC.RunPlugin(%s?%s)" % ( - sys.argv[0], Item(channel='side_menu', - action="set_custom_start", - parent=item.tourl()).tourl()))) + context_commands.insert(0, (config.get_localized_string(60351), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], Item(channel='side_menu', action="set_custom_start", parent=item.tourl()).tourl()))) if item.channel != "videolibrary": - # Añadir Serie a la videoteca + # Add Series to the video library if item.action in ["episodios", "get_episodios", "get_seasons"] and item.contentSerieName: - context_commands.append((config.get_localized_string(60352), "XBMC.RunPlugin(%s?%s&%s)" % - (sys.argv[0], item_url, 'action=add_serie_to_library&from_action=' + item.action))) - # Añadir Pelicula a videoteca + context_commands.append((config.get_localized_string(60352), "XBMC.RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_serie_to_library&from_action=' + item.action))) + # Add Movie to Video Library elif item.action in ["detail", "findvideos"] and item.contentType == 'movie' and item.contentTitle: - context_commands.append((config.get_localized_string(60353), "XBMC.RunPlugin(%s?%s&%s)" % - (sys.argv[0], item_url, 'action=add_pelicula_to_library&from_action=' + item.action))) + context_commands.append((config.get_localized_string(60353), "XBMC.RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_pelicula_to_library&from_action=' + item.action))) - if not item.local and item.channel not in ["downloads"] and item.server != 'torrent' and parent_item.action != 'mainlist' and config.get_setting('downloadenabled'): - # Descargar pelicula + if not item.local and item.channel not in ["downloads", "filmontv"] and item.server != 'torrent' and parent_item.action != 'mainlist' and config.get_setting('downloadenabled'): + # Download movie if item.contentType == "movie": - context_commands.append((config.get_localized_string(60354), "XBMC.RunPlugin(%s?%s&%s)" % - (sys.argv[0], item_url, 'channel=downloads&action=save_download&from_channel=' + item.channel + '&from_action=' + item.action))) + context_commands.append((config.get_localized_string(60354), "XBMC.RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'channel=downloads&action=save_download&from_channel=' + item.channel + '&from_action=' + item.action))) elif item.contentSerieName: - # Descargar serie - if item.contentType == "tvshow": + # Descargar series + if item.contentType == "tvshow" and item.action not in ['findvideos']: if item.channel == 'videolibrary': - context_commands.append((config.get_localized_string(60003), "XBMC.RunPlugin(%s?%s&%s)" % - (sys.argv[0], item_url, - 'channel=downloads&action=save_download&unseen=true&from_channel=' + item.channel + '&from_action=' + item.action))) - context_commands.append((config.get_localized_string(60355), "XBMC.RunPlugin(%s?%s&%s)" % - (sys.argv[0], item_url, 'channel=downloads&action=save_download&from_channel=' + item.channel + '&from_action=' + item.action))) - context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s&%s)" % - (sys.argv[0], item_url, 'channel=downloads&action=save_download&download=season&from_channel=' + item.channel + '&from_action=' + item.action))) - # Descargar episodio - elif item.contentType == "episode": - context_commands.append((config.get_localized_string(60356), "XBMC.RunPlugin(%s?%s&%s)" % - (sys.argv[0], item_url, 'channel=downloads&action=save_download&from_channel=' + item.channel + '&from_action=' + item.action))) - # Descargar temporada + context_commands.append((config.get_localized_string(60003), "XBMC.RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'channel=downloads&action=save_download&unseen=true&from_channel=' + item.channel + '&from_action=' + item.action))) + context_commands.append((config.get_localized_string(60355), "XBMC.RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'channel=downloads&action=save_download&from_channel=' + item.channel + '&from_action=' + item.action))) + context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'channel=downloads&action=save_download&download=season&from_channel=' + item.channel + '&from_action=' + item.action))) + # Download episode + elif item.contentType == "episode" or item.action in ['findvideos']: + context_commands.append((config.get_localized_string(60356), "XBMC.RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'channel=downloads&action=save_download&from_channel=' + item.channel + '&from_action=' + item.action))) + # Download season elif item.contentType == "season": - context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s&%s)" % - (sys.argv[0], item_url, 'channel=downloads&action=save_download&download=season&from_channel=' + item.channel + '&from_action=' + item.action))) + context_commands.append((config.get_localized_string(60357), "XBMC.RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'channel=downloads&action=save_download&download=season&from_channel=' + item.channel + '&from_action=' + item.action))) - # # Abrir configuración + # # Open settings # if parent_item.channel not in ["setting", "news", "search"] and item.action == "play": # context_commands.append((config.get_localized_string(60358), "XBMC.Container.Update(%s?%s)" % # (sys.argv[0], Item(channel="setting", action="mainlist").tourl()))) - # Buscar Trailer - if item.action in ["findvideos", 'episodios', 'check'] or "buscar_trailer" in context: - context_commands.append( - (config.get_localized_string(60359), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], urllib.urlencode({ - 'channel': "trailertools", 'action': "buscartrailer", 'search_title': item.fulltitle if item.fulltitle else item.contentTitle, 'contextual': True})))) + # Open settings... + if item.action in ["findvideos", 'episodios', 'check', 'new_search'] or "buscar_trailer" in context: + context_commands.append((config.get_localized_string(60359), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], urllib.urlencode({ 'channel': "trailertools", 'action': "buscartrailer", 'search_title': item.contentTitle if item.contentTitle else item.fulltitle, 'contextual': True})))) if kwargs.get('superfavourites'): - context_commands.append((config.get_localized_string(60361), - "XBMC.RunScript(special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py)")) + context_commands.append((config.get_localized_string(60361), "XBMC.RunScript(special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py)")) # context_commands = sorted(context_commands, key=lambda comand: comand[0]) - # Menu Rapido + # Quick Menu # context_commands.insert(0, (config.get_localized_string(60360), # "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(channel='side_menu', # action="open_menu", # parent=parent_item.tourl()).tourl( # )))) if config.dev_mode(): - context_commands.insert(0, ("item info", - "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="itemInfo", parent=item.tojson()).tourl()))) + context_commands.insert(0, ("item info", "XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="itemInfo", parent=item.tojson()).tourl()))) return context_commands @@ -865,32 +830,32 @@ def play_video(item, strm=False, force_direct=False, autoplay=False): default_action = config.get_setting("default_action") logger.info("default_action=%s" % default_action) - # Abre el diálogo de selección para ver las opciones disponibles + # Open the selection dialog to see the available options opciones, video_urls, seleccion, salir = get_dialogo_opciones(item, default_action, strm, autoplay) if salir: return - # se obtienen la opción predeterminada de la configuración del addon + # get default option of addon configuration seleccion = get_seleccion(default_action, opciones, seleccion, video_urls) - if seleccion < 0: # Cuadro cancelado + if seleccion < 0: # Canceled box return - logger.info("seleccion=%d" % seleccion) - logger.info("seleccion=%s" % opciones[seleccion]) + logger.info("selection=%d" % seleccion) + logger.info("selection=%s" % opciones[seleccion]) - # se ejecuta la opcion disponible, jdwonloader, descarga, favoritos, añadir a la videoteca... SI NO ES PLAY + # run the available option, jdwonloader, download, favorites, add to the video library ... IF IT IS NOT PLAY salir = set_opcion(item, seleccion, opciones, video_urls) if salir: return - # obtenemos el video seleccionado + # we get the selected video mediaurl, view, mpd = get_video_seleccionado(item, seleccion, video_urls) if mediaurl == "": return # # no certificate verification # mediaurl = mediaurl.replace('https://', 'http://') - # se obtiene la información del video. + # video information is obtained. if not item.contentThumbnail: thumb = item.thumbnail else: @@ -904,15 +869,15 @@ def play_video(item, strm=False, force_direct=False, autoplay=False): set_infolabels(xlistitem, item, True) - # si se trata de un vídeo en formato mpd, se configura el listitem para reproducirlo - # con el addon inpustreamaddon implementado en Kodi 17 + # if it is a video in mpd format, the listitem is configured to play it + # with the inpustreamaddon addon implemented in Kodi 17 if mpd: xlistitem.setProperty('inputstreamaddon', 'inputstream.adaptive') xlistitem.setProperty('inputstream.adaptive.manifest_type', 'mpd') - # se lanza el reproductor - if force_direct: # cuando viene de una ventana y no directamente de la base del addon - # Añadimos el listitem a una lista de reproducción (playlist) + # player launches + if force_direct: # when it comes from a window and not directly from the addon base + # We add the listitem to a playlist playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) playlist.clear() playlist.add(mediaurl, xlistitem) @@ -929,18 +894,18 @@ def stop_video(): def get_seleccion(default_action, opciones, seleccion, video_urls): fixpri = False - # para conocer en que prioridad se trabaja + # to know what priority you work on priority = int(config.get_setting("resolve_priority")) - # se usara para comprobar si hay links premium o de debriders + # will be used to check for premium or debrider links check = [] - # Comprueba si resolve stop esta desactivado + # Check if resolve stop is disabled if config.get_setting("resolve_stop") == False: fixpri = True - # preguntar + # ask if default_action == 0: - # "Elige una opción" + # "Choose an option" seleccion = dialog_select(config.get_localized_string(30163), opciones) - # Ver en calidad baja + # View in low quality elif default_action == 1: resolutions = [] for url in video_urls: @@ -958,7 +923,7 @@ def get_seleccion(default_action, opciones, seleccion, video_urls): seleccion = resolutions.index(min(resolutions)) else: seleccion = 0 - # Ver en alta calidad + # See in high quality elif default_action == 2: resolutions = [] for url in video_urls: @@ -1009,11 +974,10 @@ def calcResolution(option): def show_channel_settings(**kwargs): """ - Muestra un cuadro de configuracion personalizado para cada canal y guarda los datos al cerrarlo. + It shows a customized configuration box for each channel and saves the data when closing it. + The parameters passed to it can be seen in the method that is called - Los parámetros que se le pasan se puede ver en la el método al que se llama - - @return: devuelve la ventana con los elementos + @return: returns the window with the elements @rtype: SettingsWindow """ from platformcode.xbmc_config_menu import SettingsWindow @@ -1022,11 +986,10 @@ def show_channel_settings(**kwargs): def show_video_info(*args, **kwargs): """ - Muestra una ventana con la info del vídeo. + It shows a window with the info of the video. +    The parameters passed to it can be seen in the method that is called - Los parámetros que se le pasan se puede ver en la el método al que se llama - - @return: devuelve la ventana con los elementos +    @return: returns the window with the elements @rtype: InfoWindow """ @@ -1040,13 +1003,12 @@ def show_recaptcha(key, referer): def alert_no_disponible_server(server): - # 'El vídeo ya no está en %s' , 'Prueba en otro servidor o en otro canal' - dialog_ok(config.get_localized_string(30055), (config.get_localized_string(30057) % server), - config.get_localized_string(30058)) + # 'The video is no longer in %s', 'Try another server or another channel' + dialog_ok(config.get_localized_string(30055), (config.get_localized_string(30057) % server), config.get_localized_string(30058)) def alert_unsopported_server(): - # 'Servidor no soportado o desconocido' , 'Prueba en otro servidor o en otro canal' + # 'Unsupported or unknown server ',' Test on another server or on another channel' dialog_ok(config.get_localized_string(30065), config.get_localized_string(30058)) @@ -1070,10 +1032,10 @@ def handle_wait(time_to_wait, title, text): break if cancelled: - logger.info('Espera cancelada') + logger.info('Wait canceled') return False else: - logger.info('Espera finalizada') + logger.info('Wait finished') return True @@ -1093,11 +1055,11 @@ def get_dialogo_opciones(item, default_action, strm, autoplay): if item.server == "": item.server = "directo" - # Si no es el modo normal, no muestra el diálogo porque cuelga XBMC + # If it is not the normal mode, it does not show the dialog because XBMC hangs muestra_dialogo = (config.get_setting("player_mode") == 0 and not strm) - # Extrae las URL de los vídeos, y si no puedes verlo te dice el motivo - # Permitir varias calidades para server "directo" + # Extract the URLs of the videos, and if you can't see it, it tells you the reason + # Allow multiple qualities for "direct" server if item.video_urls: video_urls, puedes, motivo = item.video_urls, True, "" @@ -1106,7 +1068,7 @@ def get_dialogo_opciones(item, default_action, strm, autoplay): item.server, item.url, item.password, muestra_dialogo) seleccion = 0 - # Si puedes ver el vídeo, presenta las opciones + # If you can see the video, present the options if puedes: for video_url in video_urls: opciones.append(config.get_localized_string(60221) + " " + video_url[0]) @@ -1114,48 +1076,45 @@ def get_dialogo_opciones(item, default_action, strm, autoplay): if item.server == "local": opciones.append(config.get_localized_string(30164)) else: - # "Descargar" + # "Download" downloadenabled = config.get_setting('downloadenabled') if downloadenabled != False and item.channel != 'videolibrary': opcion = config.get_localized_string(30153) opciones.append(opcion) if item.isFavourite: - # "Quitar de favoritos" + # "Remove from favorites" opciones.append(config.get_localized_string(30154)) else: - # "Añadir a favoritos" + # "Add to Favorites" opciones.append(config.get_localized_string(30155)) - if not strm and item.contentType == 'movie': - # "Añadir a videoteca" + if not strm and item.contentType == 'movie' and item.channel != 'videolibrary': + # "Add to video library" opciones.append(config.get_localized_string(30161)) if default_action == 3: seleccion = len(opciones) - 1 - # Busqueda de trailers en youtube + # Search for trailers on youtube if item.channel not in ["Trailer", "ecarteleratrailers"]: - # "Buscar Trailer" + # "Search Trailer" opciones.append(config.get_localized_string(30162)) - # Si no puedes ver el vídeo te informa + # If you can't see the video it informs you else: if not autoplay: if item.server != "": if "<br/>" in motivo: - ret = dialog_yesno(config.get_localized_string(60362), motivo.split("<br/>")[0], motivo.split("<br/>")[1], - item.url, nolabel='ok', yeslabel=config.get_localized_string(70739)) + ret = dialog_yesno(config.get_localized_string(60362), motivo.split("<br/>")[0], motivo.split("<br/>")[1], item.url, nolabel='ok', yeslabel=config.get_localized_string(70739)) else: ret = dialog_yesno(config.get_localized_string(60362), motivo, item.url, nolabel='ok', yeslabel=config.get_localized_string(70739)) else: - ret = dialog_yesno(config.get_localized_string(60362), config.get_localized_string(60363), - config.get_localized_string(60364), item.url, nolabel='ok', yeslabel=config.get_localized_string(70739)) + ret = dialog_yesno(config.get_localized_string(60362), config.get_localized_string(60363), config.get_localized_string(60364), item.url, nolabel='ok', yeslabel=config.get_localized_string(70739)) if ret: - xbmc.executebuiltin("XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="open_browser", - url=item.url).tourl())) + xbmc.executebuiltin("XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(action="open_browser", url=item.url).tourl())) if item.channel == "favorites": - # "Quitar de favoritos" + # "Remove from favorites" opciones.append(config.get_localized_string(30154)) if len(opciones) == 0: @@ -1168,10 +1127,10 @@ def set_opcion(item, seleccion, opciones, video_urls): logger.info() # logger.debug(item.tostring('\n')) salir = False - # No ha elegido nada, lo más probable porque haya dado al ESC + # You have not chosen anything, most likely because you have given the ESC if seleccion == -1: - # Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm + # To avoid the error "One or more elements failed" when deselecting from strm file listitem = xbmcgui.ListItem(item.title) if config.get_platform(True)['num_version'] >= 16.0: @@ -1182,7 +1141,7 @@ def set_opcion(item, seleccion, opciones, video_urls): xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, listitem) - # "Descargar" + # "Download" elif opciones[seleccion] == config.get_localized_string(30153): from specials import downloads @@ -1192,25 +1151,23 @@ def set_opcion(item, seleccion, opciones, video_urls): downloads.save_download(item) salir = True - # "Quitar de favoritos" + # "Remove from favorites" elif opciones[seleccion] == config.get_localized_string(30154): from specials import favorites favorites.delFavourite(item) salir = True - # "Añadir a favoritos": + # "Add to Favorites": elif opciones[seleccion] == config.get_localized_string(30155): from specials import favorites item.from_channel = "favorites" favorites.addFavourite(item) salir = True - # "Buscar Trailer": + # "Search Trailer": elif opciones[seleccion] == config.get_localized_string(30162): config.set_setting("subtitulo", False) - xbmc.executebuiltin("XBMC.RunPlugin(%s?%s)" % - (sys.argv[0], item.clone(channel="trailertools", action="buscartrailer", - contextual=True).tourl())) + xbmc.executebuiltin("XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="trailertools", action="buscartrailer", contextual=True).tourl())) salir = True return salir @@ -1223,7 +1180,7 @@ def get_video_seleccionado(item, seleccion, video_urls): wait_time = 0 mpd = False - # Ha elegido uno de los vídeos + # You have chosen one of the videos if seleccion < len(video_urls): mediaurl = video_urls[seleccion][1] if len(video_urls[seleccion]) > 4: @@ -1239,7 +1196,7 @@ def get_video_seleccionado(item, seleccion, video_urls): wait_time = video_urls[seleccion][2] view = True - # Si no hay mediaurl es porque el vídeo no está :) + # If there is no mediaurl it is because the video is not there :) logger.info("mediaurl=" + mediaurl) if mediaurl == "": if item.server == "unknown": @@ -1247,7 +1204,7 @@ def get_video_seleccionado(item, seleccion, video_urls): else: alert_no_disponible_server(item.server) - # Si hay un tiempo de espera (como en megaupload), lo impone ahora + # If there is a timeout (like in megaupload), impose it now if wait_time > 0: continuar = handle_wait(wait_time, item.server, config.get_localized_string(60365)) if not continuar: @@ -1259,12 +1216,12 @@ def get_video_seleccionado(item, seleccion, video_urls): def set_player(item, xlistitem, mediaurl, view, strm): logger.info() logger.debug("item:\n" + item.tostring('\n')) - # Movido del conector "torrent" aqui + # Moved del conector "torrent" here if item.server == "torrent": play_torrent(item, xlistitem, mediaurl) return - # Si es un fichero strm no hace falta el play + # If it is a strm file, play is not necessary elif strm: xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xlistitem) if item.subtitle != "": @@ -1281,7 +1238,7 @@ def set_player(item, xlistitem, mediaurl, view, strm): elif config.get_setting("player_mode") == 0 or item.play_from == 'window' or \ (config.get_setting("player_mode") == 3 and mediaurl.startswith("rtmp")): - # Añadimos el listitem a una lista de reproducción (playlist) + # We add the listitem to a playlist playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) playlist.clear() playlist.add(mediaurl, xlistitem) @@ -1295,7 +1252,7 @@ def set_player(item, xlistitem, mediaurl, view, strm): # elif config.get_setting("player_mode") == 1 or item.isPlayable: elif config.get_setting("player_mode") == 1: logger.info("Tras setResolvedUrl") - # si es un archivo de la videoteca enviar a marcar como visto + # if it is a video library file send to mark as seen if strm or item.strm_path: from platformcode import xbmc_videolibrary @@ -1308,226 +1265,73 @@ def set_player(item, xlistitem, mediaurl, view, strm): elif config.get_setting("player_mode") == 2: xbmc.executebuiltin("PlayMedia(" + mediaurl + ")") - # TODO MIRAR DE QUITAR VIEW + # ALL LOOKING TO REMOVE VIEW if item.subtitle != "" and view: logger.info("Subtítulos externos: " + item.subtitle) xbmc.sleep(2000) xbmc_player.setSubtitles(item.subtitle) - # si es un archivo de la videoteca enviar a marcar como visto + # if it is a video library file send to mark as seen if strm or item.strm_path: from platformcode import xbmc_videolibrary xbmc_videolibrary.mark_auto_as_watched(item) def torrent_client_installed(show_tuple=False): - # Plugins externos se encuentra en servers/torrent.json nodo clients + # External plugins found in servers / torrent.json node clients from core import filetools from core import jsontools - torrent_clients = jsontools.get_node_from_file("torrent.json", "clients", filetools.join(config.get_runtime_path(),"servers")) + torrent_clients = jsontools.get_node_from_file("torrent.json", "clients", filetools.join(config.get_runtime_path(), "servers")) torrent_options = [] for client in torrent_clients: if xbmc.getCondVisibility('System.HasAddon("%s")' % client["id"]): if show_tuple: - torrent_options.append([config.get_localized_string(60366) % client["name"], client["url"]]) + torrent_options.append([client["name"], client["url"]]) else: - torrent_options.append(config.get_localized_string(60366) % client["name"]) + torrent_options.append(client["name"]) return torrent_options def play_torrent(item, xlistitem, mediaurl): logger.info() import time - import traceback - - from core import filetools - from core import httptools - from lib import generictools from servers import torrent - - # Si Libtorrent ha dado error de inicialización, no se pueden usar los clientes internos - UNRAR = config.get_setting("unrar_path", server="torrent", default="") - LIBTORRENT = config.get_setting("libtorrent_path", server="torrent", default='') - size_rar = 2 - rar_files = [] - if item.password: - size_rar = 3 - - - # Opciones disponibles para Reproducir torrents torrent_options = torrent_client_installed(show_tuple=True) - torrent_client = config.get_setting("torrent_client", server="torrent") - - # Si es Libtorrent y no está soportado, se ofrecen alternativas, si las hay... - - if len(torrent_options) > 1: + if len(torrent_options) == 0: + from specials import elementum_download + elementum_download.download() + return play_torrent(item, xlistitem, mediaurl) + elif len(torrent_options) > 1: selection = dialog_select(config.get_localized_string(70193), [opcion[0] for opcion in torrent_options]) else: selection = 0 - # Si es Torrenter o Elementum con opción de Memoria, se ofrece la posibilidad ee usar Libtorrent temporalemente - if selection >= 0 and LIBTORRENT and UNRAR and 'RAR-' in item.torrent_info and ( - "torrenter" in torrent_options[selection][0] \ - or ("elementum" in torrent_options[selection][0] and xbmcaddon.Addon(id="plugin.video.%s" % torrent_options[selection][0].replace('Plugin externo: ','')).getSetting('download_storage') == '1')): - if dialog_yesno(torrent_options[selection][0], config.get_localized_string(70777), config.get_localized_string(70778), config.get_localized_string(70779) % size_rar): - selection = 1 - else: - return - # Si es Elementum pero con opción de Memoria, se muestras los Ajustes de Elementum y se pide al usuario que cambie a "Usar Archivos" - elif selection >= 0 and not LIBTORRENT and UNRAR and 'RAR-' in item.torrent_info and "elementum" in \ - torrent_options[selection][0] \ - and xbmcaddon.Addon(id="plugin.video.%s" % torrent_options[selection][0].capitalize()) \ - .getSetting('download_storage') == '1': - if dialog_yesno(torrent_options[selection][0], config.get_localized_string(70780) % size_rar, config.get_localized_string(70781)): - __settings__ = xbmcaddon.Addon( - id="plugin.video.%s" % torrent_options[selection][0].capitalize()) - __settings__.openSettings() # Se visulizan los Ajustes de Elementum - elementum_dl = xbmcaddon.Addon( - id="plugin.video.%s" % torrent_options[selection][0].capitalize()) \ - .getSetting('download_storage') - if elementum_dl != '1': - config.set_setting("elementum_dl", "1", server="torrent") # Salvamos el cambio para restaurarlo luego - return # Se sale, porque habrá refresco y cancelaría Kodi si no - - # Descarga de torrents a local if selection >= 0: - #### Compatibilidad con Kodi 18: evita cuelgues/cancelaciones cuando el .torrent se lanza desde pantalla convencional - # if xbmc.getCondVisibility('Window.IsMedia'): - xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) # Preparamos el entorno para evitar error Kod1 18 - time.sleep(0.5) # Dejamos tiempo para que se ejecute - - # Nuevo método de descarga previa del .torrent. Si da error, miramos si hay alternatica local. - # Si ya es local, lo usamos - url = '' - url_stat = False - torrents_path = '' - referer = None - post = None - rar = False - size = '' - password = '' - if item.password: - password = item.password - - videolibrary_path = config.get_videolibrary_path() # Calculamos el path absoluto a partir de la Videoteca - if scrapertools.find_single_match(videolibrary_path, '(^\w+:\/\/)'): # Si es una conexión REMOTA, usamos userdata local - videolibrary_path = config.get_data_path() # Calculamos el path absoluto a partir de Userdata - if not filetools.exists(videolibrary_path): # Si no existe el path, pasamos al modo clásico - videolibrary_path = False - else: - torrents_path = filetools.join(videolibrary_path, 'temp_torrents', 'client_torrent.torrent') # path descarga temporal - if not videolibrary_path or not filetools.exists(filetools.join(videolibrary_path, 'temp_torrents')): # Si no existe la carpeta temporal, la creamos - filetools.mkdir(filetools.join(videolibrary_path, 'temp_torrents')) - - # Si hay headers, se pasar a la petición de descarga del .torrent - headers = {} - if item.headers: - headers = item.headers - - # identificamos si es una url o un path de archivo - if not item.url.startswith("\\") and not item.url.startswith("/") and not url_stat: - timeout = 10 - if item.torrent_alt: - timeout = 5 - # Si es una llamada con POST, lo preparamos - if item.referer: referer = item.referer - if item.post: post = item.post - # Descargamos el .torrent - size, url, torrent_f, rar_files = generictools.get_torrent_size(item.url, referer, post, torrents_path=torrents_path, timeout=timeout, lookup=False, headers=headers, short_pad=True) - if url: - url_stat = True - item.url = url - if "torrentin" in torrent_options[selection][0]: - item.url = 'file://' + item.url - - if not url and item.torrent_alt: # Si hay error, se busca un .torrent alternativo - if (item.torrent_alt.startswith("\\") or item.torrent_alt.startswith("/")) and videolibrary_path: - item.url = item.torrent_alt # El .torrent alternativo puede estar en una url o en local - elif not item.url.startswith("\\") and not item.url.startswith("/"): - item.url = item.torrent_alt - - # Si es un archivo .torrent local, actualizamos el path relativo a path absoluto - if (item.url.startswith("\\") or item.url.startswith("/")) and not url_stat and videolibrary_path: # .torrent alternativo local - movies = config.get_setting("folder_movies") - series = config.get_setting("folder_tvshows") - if item.contentType == 'movie': - folder = movies # películas - else: - folder = series # o series - item.url = filetools.join(config.get_videolibrary_path(), folder, item.url) # dirección del .torrent local en la Videoteca - if filetools.copy(item.url, torrents_path, silent=True): # se copia a la carpeta generíca para evitar problemas de encode - item.url = torrents_path - if "torrentin" in torrent_options[selection][0]: # Si es Torrentin, hay que añadir un prefijo - item.url = 'file://' + item.url - size, rar_files = generictools.get_torrent_size('', file_list=True, local_torr=torrents_path,short_pad=True) - - mediaurl = item.url - - if selection >= 0: + xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, xlistitem) + time.sleep(1) mediaurl = urllib.quote_plus(item.url) - # Llamada con más parámetros para completar el título - if ("quasar" in torrent_options[selection][1] or "elementum" in torrent_options[selection][1]) and item.infoLabels['tmdb_id']: - if item.contentType == 'episode' and "elementum" not in torrent_options[selection][1]: - mediaurl += "&episode=%s&library=&season=%s&show=%s&tmdb=%s&type=episode" % ( - item.infoLabels['episode'], item.infoLabels['season'], item.infoLabels['tmdb_id'], - item.infoLabels['tmdb_id']) + torr_client = torrent_options[selection][0] + + if torr_client in ['elementum'] and item.infoLabels['tmdb_id']: + if item.contentType == 'episode' and "elementum" not in torr_client: + mediaurl += "&episode=%s&library=&season=%s&show=%s&tmdb=%s&type=episode" % (item.infoLabels['episode'], item.infoLabels['season'], item.infoLabels['tmdb_id'], item.infoLabels['tmdb_id']) elif item.contentType == 'movie': mediaurl += "&library=&tmdb=%s&type=movie" % (item.infoLabels['tmdb_id']) - xbmc.executebuiltin("PlayMedia(" + torrent_options[selection][1] % mediaurl + ")") + if torr_client in ['elementum'] and item.downloadFilename: + torrent.elementum_download(item) + else: + time.sleep(3) + xbmc.executebuiltin("PlayMedia(" + torrent_options[selection][1] % mediaurl + ")") - # Si es un archivo RAR, monitorizamos el cliente Torrent hasta que haya descargado el archivo, - # y después lo extraemos, incluso con RAR's anidados y con contraseña - torr_client = torrent_options[selection][0].capitalize() - if 'RAR-' in size and torr_client in ['quasar', 'elementum'] and UNRAR: - rar_file, save_path_videos, folder_torr = torrent.wait_for_download(item, mediaurl, rar_files, torr_client) # Esperamos mientras se descarga el RAR - if rar_file and save_path_videos: # Si se ha descargado el RAR... - dp = dialog_progress_bg('KoD %s' % torr_client) - video_file, rar, video_path, erase_file_path = torrent.extract_files(rar_file, save_path_videos, password, dp, item, torr_client) # ... extraemos el vídeo del RAR - dp.close() + torrent.mark_auto_as_watched(item) - # Reproducimos el vídeo extraido, si no hay nada en reproducción - while is_playing() and rar and not xbmc.abortRequested: - time.sleep(3) # Repetimos cada intervalo - if rar and not xbmc.abortRequested: - time.sleep(1) - video_play = filetools.join(video_path, video_file) - log("##### video_play: %s" % video_play) - playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) - playlist.clear() - playlist.add(video_play, xlistitem) - xbmc_player.play(playlist) - - # selectionamos que clientes torrent soportamos para el marcado de vídeos vistos: asumimos que todos funcionan - torrent.mark_auto_as_watched(item) - - # Si se ha extraido un RAR, se pregunta para borrar los archivos después de reproducir el vídeo (plugins externos) - while is_playing() and rar and not xbmc.abortRequested: - time.sleep(3) # Repetimos cada intervalo - if rar and not xbmc.abortRequested: - if dialog_yesno('KoD %s' % torr_client, config.get_localized_string(30031)): - log("##### erase_file_path: %s" % erase_file_path) - try: - torr_data, deamon_url, index = torrent.get_tclient_data(folder_torr, torr_client) - if torr_data and deamon_url: - data = httptools.downloadpage('%sdelete/%s' % (deamon_url, index), timeout=5, alfa_s=True).data - time.sleep(1) - if filetools.isdir(erase_file_path): - filetools.rmdirtree(erase_file_path) - elif filetools.exists(erase_file_path) and filetools.isfile(erase_file_path): - filetools.remove(erase_file_path) - except: - logger.error(traceback.format_exc(1)) - elementum_dl = config.get_setting("elementum_dl", server="torrent", default='') # Si salvamos el cambio de Elementum - if elementum_dl: - config.set_setting("elementum_dl", "", server="torrent") # lo reseteamos en Alfa - xbmcaddon.Addon(id="plugin.video.%s" % torrent_options[selection][0].replace('Plugin externo: ', '')) \ - .setSetting('download_storage', elementum_dl) # y lo reseteamos en Elementum + while is_playing() and not xbmc.abortRequested: + time.sleep(3) def log(texto): - xbmc.log(texto, xbmc.LOGNOTICE) - + xbmc.log(texto, xbmc.LOGNOTICE) \ No newline at end of file diff --git a/platformcode/updater.py b/platformcode/updater.py index cf51243e..b12cad12 100644 --- a/platformcode/updater.py +++ b/platformcode/updater.py @@ -283,8 +283,8 @@ def getShaStr(str): -def updateFromZip(message='Installazione in corso...'): - dp = platformtools.dialog_progress_bg('Kodi on Demand', message) +def updateFromZip(message=config.get_localized_string(80050)): + dp = platformtools.dialog_progress_bg(config.get_localized_string(20000), message) dp.update(0) remotefilename = 'https://github.com/' + user + "/" + repo + "/archive/" + branch + ".zip" @@ -304,8 +304,7 @@ def updateFromZip(message='Installazione in corso...'): urllib.urlretrieve(remotefilename, localfilename, lambda nb, bs, fs, url=remotefilename: _pbhook(nb, bs, fs, url, dp)) except Exception as e: - platformtools.dialog_ok('Kodi on Demand', 'Non riesco a scaricare il file d\'installazione da github, questo è probabilmente dovuto ad una mancanza di connessione (o qualcosa impedisce di raggiungere github).\n' - 'Controlla bene e quando hai risolto riapri KoD.') + platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(80031)) logger.info('Non sono riuscito a scaricare il file zip') logger.info(e) dp.close() @@ -318,6 +317,8 @@ def updateFromZip(message='Installazione in corso...'): if os.path.isfile(localfilename): logger.info('il file esiste') + dp.update(80, config.get_localized_string(20000), config.get_localized_string(80032)) + import zipfile try: hash = fixZipGetHash(localfilename) @@ -329,7 +330,7 @@ def updateFromZip(message='Installazione in corso...'): for member in zip.infolist(): zip.extract(member, destpathname) cur_size += member.file_size - dp.update(int(80 + cur_size * 19 / size)) + dp.update(int(80 + cur_size * 15 / size)) except Exception as e: logger.info('Non sono riuscito ad estrarre il file zip') @@ -341,7 +342,7 @@ def updateFromZip(message='Installazione in corso...'): return False - dp.update(99) + dp.update(95) # puliamo tutto global addonDir @@ -356,8 +357,9 @@ def updateFromZip(message='Installazione in corso...'): remove(localfilename) dp.update(100) + xbmc.sleep(1000) dp.close() - if message != 'Installazione in corso...': + if message != config.get_localized_string(80050): xbmc.executebuiltin("UpdateLocalAddons") refreshLang() @@ -448,9 +450,9 @@ def fOpen(file, mode = 'r'): def _pbhook(numblocks, blocksize, filesize, url, dp): try: - percent = min((numblocks*blocksize*90)/filesize, 100) + percent = min((numblocks*blocksize*80)/filesize, 80) dp.update(int(percent)) except Exception as e: logger.error(e) - percent = 90 + percent = 80 dp.update(percent) diff --git a/platformcode/xbmc_config_menu.py b/platformcode/xbmc_config_menu.py index 290622c4..bfdd8ced 100644 --- a/platformcode/xbmc_config_menu.py +++ b/platformcode/xbmc_config_menu.py @@ -4,39 +4,34 @@ # ------------------------------------------------------------ from __future__ import division -#from builtins import str -import sys +import sys, os, inspect, xbmcgui, xbmc PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int from builtins import range from past.utils import old_div -import inspect -import os - -import xbmcgui - -from core import channeltools -from core import servertools, scrapertools -from platformcode import config, logger +from core import channeltools, servertools, scrapertools +from platformcode import config, logger, platformtools +from core.support import log, dbg, match class SettingsWindow(xbmcgui.WindowXMLDialog): - """ Clase derivada que permite utilizar cuadros de configuracion personalizados. + """ + Derived class that allows you to use custom configuration boxes. - Esta clase deriva de xbmcgui.WindowXMLDialog y permite crear un cuadro de dialogo con controles del tipo: - Radio Button (bool), Cuadro de texto (text), Lista (list) y Etiquetas informativas (label). - Tambien podemos personalizar el cuadro añadiendole un titulo (title). + This class is derived from xbmcgui.WindowXMLDialog and allows you to create a dialog box with controls of the type: + Radio Button (bool), Text Box (text), List (list) and Information Labels (label). + We can also customize the box by adding a title (title). - Metodo constructor: - SettingWindow(listado_controles, dict_values, title, callback, item) + Construction method: + SettingWindow(list_controls, dict_values, title, callback, item) Parametros: - listado_controles: (list) Lista de controles a incluir en la ventana, segun el siguiente esquema: + list_controls: (list) Lista de controles a incluir en la ventana, segun el siguiente esquema: (opcional)list_controls= [ {'id': "nameControl1", 'type': "bool", # bool, text, list, label 'label': "Control 1: tipo RadioButton", - 'color': '0xFFee66CC', # color del texto en formato ARGB hexadecimal + 'color': '0xFFee66CC', # text color in hexadecimal ARGB format 'default': True, 'enabled': True, 'visible': True @@ -46,8 +41,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): 'label': "Control 2: tipo Cuadro de texto", 'color': '0xFFee66CC', 'default': "Valor por defecto", - 'hidden': False, # only for type = text Indica si hay que ocultar - el texto (para passwords) + 'hidden': False, # only for type = text Indicates whether to hide the text (for passwords) 'enabled': True, 'visible': True }, @@ -55,7 +49,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): 'type': "list", # bool, text, list, label 'label': "Control 3: tipo Lista", 'color': '0xFFee66CC', - 'default': 0, # Indice del valor por defecto en lvalues + 'default': 0, # Default value index in lvalues 'enabled': True, 'visible': True, 'lvalues':["item1", "item2", "item3", "item4"], # only for type = list @@ -67,10 +61,9 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): 'enabled': True, 'visible': True }] - Si no se incluye el listado_controles, se intenta obtener del json del canal desde donde se hace la - llamada. + If the controls list is not included, an attempt is made to obtain the json of the channel from which the call is made. - El formato de los controles en el json es: + The format of the controls in the json is: { ... ... @@ -120,49 +113,40 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): ] } - Los campos 'label', 'default', 'enabled' y 'lvalues' pueden ser un numero precedido de '@'. En cuyo caso se - buscara el literal en el archivo string.xml del idioma seleccionado. - Los campos 'enabled' y 'visible' admiten los comparadores eq(), gt() e it() y su funcionamiento se - describe en: http://kodi.wiki/view/Add-on_settings#Different_types + The fields 'label', 'default', 'enabled' and 'lvalues' can be a number preceded by '@'. In which case + it will look for the literal in the string.xml file of the selected language. + The 'enabled' and 'visible' fields support the comparators eq (), gt () and it () and their operation is + described at: http://kodi.wiki/view/Add-on_settings#Different_types - (opcional)dict_values: (dict) Diccionario que representa el par (id: valor) de los controles de la - lista. - Si algun control de la lista no esta incluido en este diccionario se le asignara el valor por - defecto. + (opcional) dict_values: (dict) Dictionary representing the pair (id: value) of the controls in the list. + If any control in the list is not included in this dictionary, it will be assigned the default value. dict_values={"nameControl1": False, "nameControl2": "Esto es un ejemplo"} - (opcional) caption: (str) Titulo de la ventana de configuracion. Se puede localizar mediante un numero - precedido de '@' - (opcional) callback (str) Nombre de la funcion, del canal desde el que se realiza la llamada, que sera - invocada al pulsar el boton aceptar de la ventana. A esta funcion se le pasara como parametros el - objeto 'item' y el dicionario 'dict_values'. Si este parametro no existe, se busca en el canal una - funcion llamada 'cb_validate_config' y si existe se utiliza como callback. + (opcional) caption: (str) Configuration window title. It can be located by a number preceded by '@' + (opcional) callback (str) Name of the function, of the channel from which the call is made, which will be + invoked when pressing the accept button in the window. This function will be passed as parameters the + object 'item' and the 'dict_values' dictionary. If this parameter does not exist, the channel is searched for + function called 'cb_validate_config' and if it exists it is used as a callback. - Retorno: Si se especifica 'callback' o el canal incluye 'cb_validate_config' se devolvera lo que devuelva - esa funcion. Si no devolvera None + Retorno: If 'callback' is specified or the channel includes 'cb_validate_config' what that function returns will be returned. If not return none - Ejemplos de uso: - platformtools.show_channel_settings(): Así tal cual, sin pasar ningún argumento, la ventana detecta de que canal - se ha hecho la llamada, - y lee los ajustes del json y carga los controles, cuando le das a Aceptar los vuelve a guardar. + Usage examples: + platformtools.show_channel_settings(): As such, without passing any argument, the window detects which channel the call has been made, + and read the json settings and load the controls, when you click OK, save them again. - return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, callback='cb', - item=item): - Así abre la ventana con los controles pasados y los valores de dict_values, si no se pasa dict_values, carga - los valores por defecto de los controles, - cuando le das a aceptar, llama a la función 'callback' del canal desde donde se ha llamado, pasando como - parámetros, el item y el dict_values + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, callback='cb', item=item): + This opens the window with the passed controls and the dict_values values, if dict_values is not passed, it loads the default values of the controls, + when you accept, it calls the 'callback' function of the channel from where it was called, passing as parameters, item and dict_values """ - def start(self, list_controls=None, dict_values=None, caption="", callback=None, item=None, - custom_button=None, channelpath=None): - logger.info() + def start(self, list_controls=None, dict_values=None, caption="", callback=None, item=None, custom_button=None, channelpath=None): + log() - # Ruta para las imagenes de la ventana + # Media Path self.mediapath = os.path.join(config.get_runtime_path(), 'resources', 'skins', 'Default', 'media') - # Capturamos los parametros + # Params self.list_controls = list_controls self.values = dict_values self.caption = caption @@ -178,45 +162,48 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): else: self.custom_button = None - # Obtenemos el canal desde donde se ha echo la llamada y cargamos los settings disponibles para ese canal + # Load Channel Settings if not channelpath: channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename self.channel = os.path.basename(channelpath).replace(".py", "") self.ch_type = os.path.basename(os.path.dirname(channelpath)) - logger.info('PATH= ' + channelpath) - # Si no tenemos list_controls, hay que sacarlos del json del canal + # If list_controls does not exist, it is removed from the channel json if not self.list_controls: - # Si la ruta del canal esta en la carpeta "channels", obtenemos los controles y valores mediante chaneltools - if os.path.join(config.get_runtime_path(), "channels") or os.path.join(config.get_runtime_path(), "specials") in channelpath: + # If the channel path is in the "channels" folder, we get the controls and values using chaneltools + if os.path.join(config.get_runtime_path(), "channels") or os.path.join(config.get_runtime_path(), "specials") in channelpath: - # La llamada se hace desde un canal + # The call is made from a channel self.list_controls, default_values = channeltools.get_channel_controls_settings(self.channel) self.kwargs = {"channel": self.channel} + self.channelName = channeltools.get_channel_json(self.channel)['name'] - # Si la ruta del canal esta en la carpeta "servers", obtenemos los controles y valores mediante servertools + # If the channel path is in the "servers" folder, we get the controls and values through servertools elif os.path.join(config.get_runtime_path(), "servers") in channelpath: - # La llamada se hace desde un canal + # The call is made from a channel self.list_controls, default_values = servertools.get_server_controls_settings(self.channel) self.kwargs = {"server": self.channel} + self.channelName = servertools.get_server_json(self.channel)['name'] - # En caso contrario salimos + # Else Exit else: return None - # Si no se pasan dict_values, creamos un dict en blanco + # If dict_values are not passed, create a blank dict if self.values is None: self.values = {} - # Ponemos el titulo + # Make title if self.caption == "": - self.caption = str(config.get_localized_string(30100)) + " -- " + self.channel.capitalize() + self.caption = str(config.get_localized_string(30100)) + ' - ' + self.channelName - elif self.caption.startswith('@') and unicode(self.caption[1:]).isnumeric(): - self.caption = config.get_localized_string(int(self.caption[1:])) + matches = match(self.caption, patron=r'@(\d+)').matches + if matches: + for m in matches: + self.caption = self.caption.replace('@' + match, config.get_localized_string(int(m))) - # Muestra la ventana + # Show Window self.return_value = None self.doModal() return self.return_value @@ -225,8 +212,6 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): def set_enabled(c, val): if c["type"] == "list": c["control"].setEnabled(val) - c["downBtn"].setEnabled(val) - c["upBtn"].setEnabled(val) c["label"].setEnabled(val) else: c["control"].setEnabled(val) @@ -235,8 +220,6 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): def set_visible(c, val): if c["type"] == "list": c["control"].setVisible(val) - c["downBtn"].setVisible(val) - c["upBtn"].setVisible(val) c["label"].setVisible(val) else: c["control"].setVisible(val) @@ -255,41 +238,37 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): ok = False - # Si la condicion es True o False, no hay mas que evaluar, ese es el valor + # If the condition is True or False, there is nothing else to evaluate, that is the value if isinstance(cond, bool): return cond - # Obtenemos las condiciones - # conditions = re.compile("(!?eq|!?gt|!?lt)?\(([^,]+),[\"|']?([^)|'|\"]*)['|\"]?\)[ ]*([+||])?").findall(cond) + # Get the conditions conditions = re.compile(r'''(!?eq|!?gt|!?lt)?\s*\(\s*([^, ]+)\s*,\s*["']?([^"'\)]+)["']?\)([+|])?''').findall(cond) - # conditions = scrapertools.find_multiple_matches(cond, r"(!?eq|!?gt|!?lt)?\(([^,]+),[\"|']?([^)|'|\"]*)['|\"]?\)[ ]*([+||])?") for operator, id, value, next in conditions: - # El id tiene que ser un numero, sino, no es valido y devuelve False + # The id must be a number, otherwise it is not valid and returns False try: id = int(id) except: return False - # El control sobre el que evaluar, tiene que estar dentro del rango, sino devuelve False + # The control to evaluate on has to be within range, otherwise it returns False if index + id < 0 or index + id >= len(self.list_controls): return False else: - # Obtenemos el valor del control sobre el que se compara + # Obtain the value of the control on which it is compared c = self.list_controls[index + id] - if c["type"] == "bool": - control_value = bool(c["control"].isSelected()) - if c["type"] == "text": - control_value = c["control"].getText() - if c["type"] == "list": - control_value = c["label"].getLabel() - if c["type"] == "label": - control_value = c["control"].getLabel() + if c["type"] == "bool": control_value = bool(c["control"].isSelected()) + if c["type"] == "text": control_value = c["control"].getText() + if c["type"] == "list": control_value = c["label"].getLabel() + if c["type"] == "label": control_value = c["control"].getLabel() - if value.startswith('@') and unicode(value[1:]).isnumeric(): - value = config.get_localized_string(int(value[1:])) + matches = match(self.caption, patron=r'@(\d+)').matches + if matches: + for m in matches: + self.caption = self.caption.replace('@' + match, config.get_localized_string(int(m))) - # Operaciones lt "menor que" y gt "mayor que", requieren que las comparaciones sean numeros, sino devuelve + # Operations lt "less than" and gt "greater than" require comparisons to be numbers, otherwise it returns # False if operator in ["lt", "!lt", "gt", "!gt"]: try: @@ -297,127 +276,106 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): except ValueError: return False - # Operacion eq "igual a" + # Operation eq "equal to" if operator in ["eq", "!eq"]: - # valor int + # int try: value = int(value) except ValueError: pass - # valor bool + # bool if not isinstance(value, int) and value.lower() == "true": value = True elif not isinstance(value, int) and value.lower() == "false": value = False - # operacion "eq" "igual a" + # Operation eq "equal to" if operator == "eq": if control_value == value: ok = True else: ok = False - # operacion "!eq" "no igual a" + # Operation !eq "not equal to" if operator == "!eq": if not control_value == value: ok = True else: ok = False - # operacion "gt" "mayor que" + # operation "gt" "greater than" if operator == "gt": if control_value > value: ok = True else: ok = False - # operacion "!gt" "no mayor que" + # operation "!gt" "not greater than" if operator == "!gt": if not control_value > value: ok = True else: ok = False - # operacion "lt" "menor que" + # operation "lt" "less than" if operator == "lt": if control_value < value: ok = True else: ok = False - # operacion "!lt" "no menor que" + # operation "!lt" "not less than" if operator == "!lt": if not control_value < value: ok = True else: ok = False - # Siguiente operación, si es "|" (or) y el resultado es True, no tiene sentido seguir, es True + # Next operation, if it is "|" (or) and the result is True, there is no sense to follow, it is True if next == "|" and ok is True: break - # Siguiente operación, si es "+" (and) y el resultado es False, no tiene sentido seguir, es False + # Next operation, if it is "+" (and) and the result is False, there is no sense to follow, it is False if next == "+" and ok is False: break - # Siguiente operación, si es "+" (and) y el resultado es True, Seguira, para comprobar el siguiente valor - # Siguiente operación, si es "|" (or) y el resultado es False, Seguira, para comprobar el siguiente valor - return ok + def add_control_label(self, c): - control = xbmcgui.ControlLabel(0, -100, self.controls_width, 40, "", alignment=4, font=self.font, - textColor=c["color"]) + control = xbmcgui.ControlLabel(0, -100, self.controls_width + 20, 40, "", alignment=4, font=self.font, textColor=c["color"]) self.addControl(control) control.setVisible(False) control.setLabel(c["label"]) - - # Lo añadimos al listado c["control"] = control + def add_control_list(self, c): - control = xbmcgui.ControlButton(0, -100, self.controls_width + 20, self.height_control, - c["label"], os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), + control = xbmcgui.ControlButton(0, -100, self.controls_width + 10, self.height_control, c["label"], + os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png'), - 10, textColor=c["color"], - font=self.font) - - label = xbmcgui.ControlLabel(0, -100, self.controls_width - 80, self.height_control, - "", font=self.font, textColor=c["color"], alignment= 1 | 4) - - upBtn = xbmcgui.ControlButton(0, -100, 15, 7, "", - focusTexture=os.path.join(self.mediapath, 'Controls', 'spinUp-Focus.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', 'spinUp-noFocus.png')) - - downBtn = xbmcgui.ControlButton(0, -100 + 15, 15, 7, "", - focusTexture=os.path.join(self.mediapath, 'Controls', 'spinDown-Focus.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', 'spinDown-noFocus.png')) + 10, textColor=c["color"], font=self.font) + label = xbmcgui.ControlLabel(0, -100, self.controls_width, self.height_control, "", font=self.font, textColor=c["color"], alignment= 1 | 4) self.addControl(control) self.addControl(label) - self.addControl(upBtn) - self.addControl(downBtn) control.setVisible(False) label.setVisible(False) - upBtn.setVisible(False) - downBtn.setVisible(False) label.setLabel(c["lvalues"][self.values[c["id"]]]) c["control"] = control c["label"] = label - c["downBtn"] = downBtn - c["upBtn"] = upBtn + def add_control_text(self, c): if xbmcgui.ControlEdit == ControlEdit: - control = xbmcgui.ControlEdit(0, -100, self.controls_width, self.height_control, - c["label"], os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), - os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png'), - 0, textColor=c["color"], - font=self.font, isPassword=c["hidden"], window=self) + control = xbmcgui.ControlEdit(0, -100, self.controls_width, self.height_control, c["label"], + os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), + os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png'), 0, + textColor=c["color"], font=self.font, isPassword=c["hidden"], window=self) else: control = xbmcgui.ControlEdit(0, -100, self.controls_width - 5, self.height_control, @@ -441,36 +399,27 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): c["control"] = control def add_control_bool(self, c): - # Versiones antiguas no admite algunas texturas + # Old versions do not support some textures if xbmcgui.__version__ in ["1.2", "2.0"]: control = xbmcgui.ControlRadioButton(0, -100, self.controls_width + 20, self.height_control, label=c["label"], font=self.font, textColor=c["color"], - focusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemFO.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemNF.png')) + focusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png')) else: control = xbmcgui.ControlRadioButton(0, -100, self.controls_width + 20, self.height_control, label=c["label"], font=self.font, textColor=c["color"], - focusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemFO.png'), - noFocusTexture=os.path.join(self.mediapath, 'Controls', - 'MenuItemNF.png'), - focusOnTexture=os.path.join(self.mediapath, 'Controls', - 'radiobutton-focus.png'), - noFocusOnTexture=os.path.join(self.mediapath, 'Controls', - 'radiobutton-focus.png'), - focusOffTexture=os.path.join(self.mediapath, 'Controls', - 'radiobutton-nofocus.png'), - noFocusOffTexture=os.path.join(self.mediapath, 'Controls', - 'radiobutton-nofocus.png')) + focusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png'), + focusOnTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-focus.png'), + noFocusOnTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-focus.png'), + focusOffTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-nofocus.png'), + noFocusOffTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-nofocus.png')) self.addControl(control) control.setVisible(False) - control.setRadioDimension(x=self.controls_width + 10 - (self.height_control - 5), y=0, - width=self.height_control - 5, height=self.height_control - 5) + control.setRadioDimension(x=self.controls_width - (self.height_control - 5), y=0, width=self.height_control - 5, height=self.height_control - 5) control.setSelected(self.values[c["id"]]) c["control"] = control @@ -482,14 +431,14 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): self.ok_enabled = False self.default_enabled = False - #### Compatibilidad con Kodi 18 #### + # Kodi 18 compatibility if config.get_platform(True)['num_version'] < 18: if xbmcgui.__version__ == "1.2": self.setCoordinateResolution(1) else: self.setCoordinateResolution(5) - # Ponemos el título + # Title self.getControl(10002).setLabel(self.caption) if self.custom_button is not None: @@ -497,95 +446,66 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): self.getControl(10006).setLabel(self.custom_button['label']) else: self.getControl(10006).setVisible(False) - self.getControl(10004).setPosition(self.getControl(10004).getPosition()[0] + 80, - self.getControl(10004).getPosition()[1]) - self.getControl(10005).setPosition(self.getControl(10005).getPosition()[0] + 80, - self.getControl(10005).getPosition()[1]) + self.getControl(10004).setPosition(self.getControl(10004).getPosition()[0] + 80, self.getControl(10004).getPosition()[1]) + self.getControl(10005).setPosition(self.getControl(10005).getPosition()[0] + 80, self.getControl(10005).getPosition()[1]) - # Obtenemos las dimensiones del area de controles + # Control Area Dimensions self.controls_width = self.getControl(10007).getWidth() - 30 self.controls_height = self.getControl(10007).getHeight() -100 self.controls_pos_x = self.getControl(10007).getPosition()[0] + self.getControl(10001).getPosition()[0] + 10 self.controls_pos_y = self.getControl(10007).getPosition()[1] + self.getControl(10001).getPosition()[1] - self.height_control = 40 - self.font = "font12" + self.height_control = 60 + self.font = "font16" - # En versiones antiguas: creamos 5 controles, de lo conrtario al hacer click al segundo control, - # automaticamente cambia el label del tercero a "Short By: Name" no se porque... + # In old versions: we create 5 controls, from the contrary when clicking the second control, + # automatically change third party label to "Short By: Name" I don't know why ... if xbmcgui.ControlEdit == ControlEdit: for x in range(5): control = xbmcgui.ControlRadioButton(-500, 0, 0, 0, "") self.addControl(control) for c in self.list_controls: - # Saltamos controles que no tengan los valores adecuados - if "type" not in c: - continue - if "label" not in c: - continue - if c["type"] != "label" and "id" not in c: - continue - if c["type"] == "list" and "lvalues" not in c: - continue - if c["type"] == "list" and not isinstance(c["lvalues"], list): - continue - if c["type"] == "list" and not len(c["lvalues"]) > 0: - continue - if c["type"] != "label" and len( - [control.get("id") for control in self.list_controls if c["id"] == control.get("id")]) > 1: - continue + # Skip controls that do not have the appropriate values + if "type" not in c: continue + if "label" not in c: continue + if c["type"] != "label" and "id" not in c: continue + if c["type"] == "list" and "lvalues" not in c: continue + if c["type"] == "list" and not isinstance(c["lvalues"], list): continue + if c["type"] == "list" and not len(c["lvalues"]) > 0: continue + if c["type"] != "label" and len([control.get("id") for control in self.list_controls if c["id"] == control.get("id")]) > 1: continue - # Translation label y lvalues - if c['label'].startswith('@') and unicode(c['label'][1:]).isnumeric(): - c['label'] = config.get_localized_string(int(c['label'][1:])) + # Translation label and lvalues + if c['label'].startswith('@') and unicode(c['label'][1:]).isnumeric(): c['label'] = config.get_localized_string(int(c['label'][1:])) if c['type'] == 'list': lvalues = [] for li in c['lvalues']: - if li.startswith('@') and unicode(li[1:]).isnumeric(): - lvalues.append(config.get_localized_string(int(li[1:]))) - else: - lvalues.append(li) + if li.startswith('@') and unicode(li[1:]).isnumeric(): lvalues.append(config.get_localized_string(int(li[1:]))) + else: lvalues.append(li) c['lvalues'] = lvalues - # Valores por defecto en caso de que el control no disponga de ellos - if c["type"] == "bool": - default = False - elif c["type"] == "list": - default = 0 - else: - # label or text - default = "" + # Default values in case the control does not have them + if c["type"] == "bool": default = False + elif c["type"] == "list": default = 0 + else: default = "" # label or text c["default"] = c.get("default", default) c["color"] = c.get("color", "0xFFFFFFFF") c["visible"] = c.get("visible", True) c["enabled"] = c.get("enabled", True) - if c["type"] == "label" and "id" not in c: - c["id"] = None + if c["type"] == "label" and "id" not in c: c["id"] = None + if c["type"] == "text": c["hidden"] = c.get("hidden", False) - if c["type"] == "text": - c["hidden"] = c.get("hidden", False) - - # Decidimos si usar el valor por defecto o el valor guardado + # Decide whether to use the default value or the saved value if c["type"] in ["bool", "text", "list"]: if c["id"] not in self.values: - if not self.callback: - self.values[c["id"]] = config.get_setting(c["id"], **self.kwargs) - else: - self.values[c["id"]] = c["default"] + if not self.callback: self.values[c["id"]] = config.get_setting(c["id"], **self.kwargs) + else: self.values[c["id"]] = c["default"] - if c["type"] == "bool": - self.add_control_bool(c) - - elif c["type"] == 'text': - self.add_control_text(c) - - elif c["type"] == 'list': - self.add_control_list(c) - - elif c["type"] == 'label': - self.add_control_label(c) + if c["type"] == "bool": self.add_control_bool(c) + elif c["type"] == 'text': self.add_control_text(c) + elif c["type"] == 'list': self.add_control_list(c) + elif c["type"] == 'label': self.add_control_label(c) self.list_controls = [c for c in self.list_controls if "control" in c] @@ -608,16 +528,11 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): if focus: if not index >= self.index or not index <= self.index + show_controls: - if index < self.index: - new_index = index - else: - new_index = index - show_controls - else: - new_index = self.index + if index < self.index: new_index = index + else: new_index = index - show_controls + else:new_index = self.index else: - - if index + show_controls >= len(self.visible_controls): index = len( - self.visible_controls) - show_controls - 1 + if index + show_controls >= len(self.visible_controls): index = len(self.visible_controls) - show_controls - 1 if index < 0: index = 0 new_index = index @@ -630,23 +545,17 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): visible_count += 1 if c["type"] != "list": - if c["type"] == "bool": - c["control"].setPosition(self.controls_pos_x, c["y"]) - else: - c["control"].setPosition(self.controls_pos_x, c["y"]) + if c["type"] == "bool": c["control"].setPosition(self.controls_pos_x, c["y"]) + else: c["control"].setPosition(self.controls_pos_x, c["y"]) else: c["control"].setPosition(self.controls_pos_x, c["y"]) - if xbmcgui.__version__ == "1.2": - c["label"].setPosition(self.controls_pos_x + self.controls_width - 30, c["y"]) - else: - c["label"].setPosition(self.controls_pos_x, c["y"]) - c["upBtn"].setPosition(self.controls_pos_x + c["control"].getWidth() - 40, c["y"] + 15) - c["downBtn"].setPosition(self.controls_pos_x + c["control"].getWidth() - 25, c["y"] + 15) + if xbmcgui.__version__ == "1.2": c["label"].setPosition(self.controls_pos_x + self.controls_width - 30, c["y"]) + else: c["label"].setPosition(self.controls_pos_x, c["y"]) self.set_visible(c, True) - # Calculamos la posicion y tamaño del ScrollBar + # Calculate the position and size of the ScrollBar hidden_controls = len(self.visible_controls) - show_controls - 1 if hidden_controls < 0: hidden_controls = 0 @@ -687,34 +596,26 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): self.default_enabled = True def onClick(self, id): - # Valores por defecto + # Default values if id == 10006: if self.custom_button is not None: if self.custom_button["close"]: self.close() - if '.' in self.callback: - package, self.callback = self.callback.rsplit('.', 1) - else: - package = '%s.%s' % (self.ch_type, self.channel) + if '.' in self.callback: package, self.callback = self.callback.rsplit('.', 1) + else: package = '%s.%s' % (self.ch_type, self.channel) - try: - cb_channel = __import__(package, None, None, [package]) - except ImportError: - logger.error('Imposible importar %s' % package) + try: cb_channel = __import__(package, None, None, [package]) + except ImportError: logger.error('Imposible importar %s' % package) else: self.return_value = getattr(cb_channel, self.custom_button['function'])(self.item, self.values) if not self.custom_button["close"]: - if isinstance(self.return_value, dict) and "label" in self.return_value: - self.getControl(10006).setLabel(self.return_value['label']) + if isinstance(self.return_value, dict) and "label" in self.return_value: self.getControl(10006).setLabel(self.return_value['label']) for c in self.list_controls: - if c["type"] == "text": - c["control"].setText(self.values[c["id"]]) - if c["type"] == "bool": - c["control"].setSelected(self.values[c["id"]]) - if c["type"] == "list": - c["label"].setLabel(c["lvalues"][self.values[c["id"]]]) + if c["type"] == "text": c["control"].setText(self.values[c["id"]]) + if c["type"] == "bool": c["control"].setSelected(self.values[c["id"]]) + if c["type"] == "list": c["label"].setLabel(c["lvalues"][self.values[c["id"]]]) self.evaluate_conditions() self.dispose_controls(self.index, force=True) @@ -736,78 +637,57 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): self.check_default() self.check_ok() - # Boton Cancelar y [X] + # Cancel button [X] if id == 10003 or id == 10005: self.close() - # Boton Aceptar + # OK button if id == 10004: self.close() - if self.callback and '.' in self.callback: - package, self.callback = self.callback.rsplit('.', 1) - else: - package = '%s.%s' % (self.ch_type, self.channel) + if self.callback and '.' in self.callback: package, self.callback = self.callback.rsplit('.', 1) + else: package = '%s.%s' % (self.ch_type, self.channel) cb_channel = None - try: - cb_channel = __import__(package, None, None, [package]) - except ImportError: - logger.error('Imposible importar %s' % package) + + try: cb_channel = __import__(package, None, None, [package]) + except ImportError:logger.error('Impossible to import %s' % package) if self.callback: - # Si existe una funcion callback la invocamos ... + # If there is a callback function we invoke it... self.return_value = getattr(cb_channel, self.callback)(self.item, self.values) else: - # si no, probamos si en el canal existe una funcion 'cb_validate_config' ... + # if not, we test if there is a 'cb_validate_config' function in the channel... try: self.return_value = getattr(cb_channel, 'cb_validate_config')(self.item, self.values) except AttributeError: - # ... si tampoco existe 'cb_validate_config'... + # if 'cb_validate_config' doesn't exist either ... for v in self.values: config.set_setting(v, self.values[v], **self.kwargs) - # Controles de ajustes, si se cambia el valor de un ajuste, cambiamos el valor guardado en el diccionario de - # valores - # Obtenemos el control sobre el que se ha echo click + # Adjustment controls, if the value of an adjustment is changed, we change the value saved in the value dictionary + # Get the control that has been clicked # control = self.getControl(id) - # Lo buscamos en el listado de controles + # We look it up in the list of controls for cont in self.list_controls: - # Si el control es un "downBtn" o "upBtn" son los botones del "list" - # en este caso cambiamos el valor del list - if cont["type"] == "list" and (cont["downBtn"].getId() == id or cont["upBtn"].getId() == id): + if cont['type'] == "list" and cont["control"].getId() == id: + select = platformtools.dialog_select(config.get_localized_string(30041), cont["lvalues"], self.values[cont["id"]]) + if select >= 0: + cont["label"].setLabel(cont["lvalues"][select]) + self.values[cont["id"]] = cont["lvalues"].index(cont["label"].getLabel()) - # Para bajar una posicion - if cont["downBtn"].getId() == id: - index = cont["lvalues"].index(cont["label"].getLabel()) - if index > 0: - cont["label"].setLabel(cont["lvalues"][index - 1]) + # If the control is a "bool", we save the new value True / False + if cont["type"] == "bool" and cont["control"].getId() == id: self.values[cont["id"]] = bool(cont["control"].isSelected()) - # Para subir una posicion - elif cont["upBtn"].getId() == id: - index = cont["lvalues"].index(cont["label"].getLabel()) - if index < len(cont["lvalues"]) - 1: - cont["label"].setLabel(cont["lvalues"][index + 1]) - - # Guardamos el nuevo valor en el diccionario de valores - self.values[cont["id"]] = cont["lvalues"].index(cont["label"].getLabel()) - - # Si esl control es un "bool", guardamos el nuevo valor True/False - if cont["type"] == "bool" and cont["control"].getId() == id: - self.values[cont["id"]] = bool(cont["control"].isSelected()) - - # Si esl control es un "text", guardamos el nuevo valor + # If the control is a "text", we save the new value if cont["type"] == "text" and cont["control"].getId() == id: - # Versiones antiguas requieren abrir el teclado manualmente + # Older versions require opening the keyboard manually if xbmcgui.ControlEdit == ControlEdit: - import xbmc - keyboard = xbmc.Keyboard(cont["control"].getText(), cont["control"].getLabel(), - cont["control"].isPassword) + keyboard = xbmc.Keyboard(cont["control"].getText(), cont["control"].getLabel(), cont["control"].isPassword) keyboard.setHiddenInput(cont["control"].isPassword) keyboard.doModal() - if keyboard.isConfirmed(): - cont["control"].setText(keyboard.getText()) + if keyboard.isConfirmed(): cont["control"].setText(keyboard.getText()) self.values[cont["id"]] = cont["control"].getText() @@ -816,70 +696,43 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): self.check_default() self.check_ok() - # Versiones antiguas requieren esta funcion + # Older versions require this feature def onFocus(self, a): pass def onAction(self, raw_action): - # Obtenemos el foco + # Get Focus focus = self.getFocusId() action = raw_action.getId() - # Accion 1: Flecha izquierda + # On Left if action == 1: - # Si el foco no está en ninguno de los tres botones inferiores, y esta en un "list" cambiamos el valor - if focus not in [10004, 10005, 10006]: - control = self.getFocus().getId() - for cont in self.list_controls: - if cont["type"] == "list" and cont["control"].getId() == control: - index = cont["lvalues"].index(cont["label"].getLabel()) - if index > 0: - cont["label"].setLabel(cont["lvalues"][index - 1]) + # if Focus is on close button + if focus == 10003: + self.dispose_controls(0, True) + self.setFocusId(3001) - # Guardamos el nuevo valor en el listado de controles - self.values[cont["id"]] = cont["lvalues"].index(cont["label"].getLabel()) - - self.evaluate_conditions() - self.dispose_controls(self.index, force=True) - self.check_default() - self.check_ok() - - # Si el foco está en alguno de los tres botones inferiores, movemos al siguiente + # if focus is on List else: - if focus == 10006: - self.setFocusId(10005) - if focus == 10005 and self.ok_enabled: + if self.ok_enabled: self.setFocusId(10004) - - # Accion 1: Flecha derecha - elif action == 2: - # Si el foco no está en ninguno de los tres botones inferiores, y esta en un "list" cambiamos el valor - if focus not in [10004, 10005, 10006]: - control = self.getFocus().getId() - for cont in self.list_controls: - if cont["type"] == "list" and cont["control"].getId() == control: - index = cont["lvalues"].index(cont["label"].getLabel()) - if index < len(cont["lvalues"]) - 1: - cont["label"].setLabel(cont["lvalues"][index + 1]) - - # Guardamos el nuevo valor en el listado de controles - self.values[cont["id"]] = cont["lvalues"].index(cont["label"].getLabel()) - - self.evaluate_conditions() - self.dispose_controls(self.index, force=True) - self.check_default() - self.check_ok() - - # Si el foco está en alguno de los tres botones inferiores, movemos al siguiente - else: - if focus == 10004: + else: self.setFocusId(10005) - if focus == 10005 and self.default_enabled: - self.setFocusId(10006) - # Accion 4: Flecha abajo + # On Right + elif action == 2: + # if Focus is on button + if focus in [10004, 10005, 10006]: + self.dispose_controls(0, True) + self.setFocusId(3001) + + # if focus is on List + else: + self.setFocusId(10003) + + # On Down elif action == 4: - # Si el foco no está en ninguno de los tres botones inferiores, bajamos el foco en los controles de ajustes + # if focus is on List if focus not in [10004, 10005, 10006]: try: focus_control = [self.visible_controls.index(c) for c in self.visible_controls if c["control"].getId() == self.getFocus().getId()][0] @@ -888,20 +741,28 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): except: focus_control = 0 - while not focus_control == len(self.visible_controls) and ( - self.visible_controls[focus_control]["type"] == "label" or not - self.visible_controls[focus_control]["active"]): + while not focus_control == len(self.visible_controls) and (self.visible_controls[focus_control]["type"] == "label" or not self.visible_controls[focus_control]["active"]): focus_control += 1 if focus_control >= len(self.visible_controls): - self.setFocusId(10005) - return + focus_control = 0 + self.setFocusId(3001) self.dispose_controls(focus_control, True) - # Accion 4: Flecha arriba + # Else navigate on main buttons + elif focus in [10004]: + self.setFocusId(10005) + elif focus in [10005]: + if self.default_enabled: self.setFocusId(10006) + elif self.ok_enabled: self.setFocusId(10004) + elif focus in [10006]: + if self.ok_enabled: self.setFocusId(10004) + else: self.setFocusId(10005) + + # On Up elif action == 3: - # Si el foco no está en ninguno de los tres botones inferiores, subimos el foco en los controles de ajustes + # if focus is on List if focus not in [10003, 10004, 10005, 10006]: try: focus_control = \ @@ -912,27 +773,31 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): self.visible_controls[focus_control]["active"]): focus_control -= 1 - if focus_control < 0: focus_control = 0 + if focus_control < 0: + focus_control = len(self.visible_controls) - 1 + except: focus_control = 0 self.dispose_controls(focus_control, True) - # Si el foco está en alguno de los tres botones inferiores, ponemos el foco en el ultimo ajuste. - else: - focus_control = len(self.visible_controls) - 1 - while not focus_control == -1 and (self.visible_controls[focus_control]["type"] == "label" or not - self.visible_controls[focus_control]["active"]): - focus_control -= 1 - if focus_control < 0: focus_control = 0 + # Else navigate on main buttons + elif focus in [10004]: + if self.default_enabled: self.setFocusId(10006) + else: self.setFocusId(10005) + elif focus in [10005]: + if self.ok_enabled: self.setFocusId(10004) + elif self.default_enabled: self.setFocusId(10006) + elif focus in [10006]: + self.setFocusId(10005) - self.setFocus(self.visible_controls[focus_control]["control"]) - # Accion 104: Scroll arriba + + # Accion 104: Scroll Down elif action == 104: self.dispose_controls(self.index - 1) - # Accion 105: Scroll abajo + # Accion 105: Scroll Up elif action == 105: self.dispose_controls(self.index + 1) @@ -941,6 +806,9 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): elif action in [10, 92]: self.close() + elif action == 501: + self.xx = int(raw_action.getAmount2()) + elif action == 504: if self.xx > raw_action.getAmount2(): @@ -953,8 +821,6 @@ class SettingsWindow(xbmcgui.WindowXMLDialog): self.dispose_controls(self.index - 1) return - elif action == 501: - self.xx = int(raw_action.getAmount2()) class ControlEdit(xbmcgui.ControlButton): diff --git a/platformcode/xbmc_info_window.py b/platformcode/xbmc_info_window.py index 07c689fc..1fb9c859 100644 --- a/platformcode/xbmc_info_window.py +++ b/platformcode/xbmc_info_window.py @@ -226,14 +226,12 @@ class InfoWindow(xbmcgui.WindowXMLDialog): self.getControl(100014).setLabel(config.get_localized_string(60384)) self.getControl(100015).setLabel(self.result.get("temporada_nombre", "N/A")) self.getControl(100016).setLabel(config.get_localized_string(60385)) - self.getControl(100017).setLabel(self.result.get("season", "N/A") + " de " + - self.result.get("seasons", "N/A")) + self.getControl(100017).setLabel(self.result.get("season", "N/A") + " de " + self.result.get("seasons", "N/A")) if self.result.get("episode"): self.getControl(100014).setLabel(config.get_localized_string(60377)) self.getControl(100015).setLabel(self.result.get("episode_title", "N/A")) self.getControl(100018).setLabel(config.get_localized_string(60386)) - self.getControl(100019).setLabel(self.result.get("episode", "N/A") + " de " + - self.result.get("episodes", "N/A")) + self.getControl(100019).setLabel(self.result.get("episode", "N/A") + " de " + self.result.get("episodes", "N/A")) self.getControl(100020).setLabel(config.get_localized_string(60387)) self.getControl(100021).setLabel(self.result.get("date", "N/A")) @@ -287,10 +285,10 @@ class InfoWindow(xbmcgui.WindowXMLDialog): logger.info("action=" + repr(action.getId())) action = action.getId() - # Obtenemos el foco + # Find Focus focus = self.getFocusId() - # Accion 1: Flecha izquierda + # Left if action == 1: if focus == ID_BUTTON_OK: @@ -298,26 +296,26 @@ class InfoWindow(xbmcgui.WindowXMLDialog): elif focus == ID_BUTTON_CANCEL: if self.indexList + 1 != len(self.listData): - # vamos al botón Siguiente + # Next self.setFocus(self.getControl(ID_BUTTON_NEXT)) elif self.indexList > 0: - # vamos al botón Anterior ya que Siguiente no está activo (estamos al final de la lista) + # Previous self.setFocus(self.getControl(ID_BUTTON_PREVIOUS)) elif focus == ID_BUTTON_NEXT: if self.indexList > 0: - # vamos al botón Anterior + # Next self.setFocus(self.getControl(ID_BUTTON_PREVIOUS)) - # Accion 2: Flecha derecha + # Right elif action == 2: if focus == ID_BUTTON_PREVIOUS: if self.indexList + 1 != len(self.listData): - # vamos al botón Siguiente + # Next self.setFocus(self.getControl(ID_BUTTON_NEXT)) else: - # vamos al botón Cancelar ya que Siguiente no está activo (estamos al final de la lista) + # Cancel self.setFocus(self.getControl(ID_BUTTON_CANCEL)) elif focus == ID_BUTTON_NEXT: @@ -326,6 +324,13 @@ class InfoWindow(xbmcgui.WindowXMLDialog): elif focus == ID_BUTTON_CANCEL: self.setFocus(self.getControl(ID_BUTTON_OK)) + # Up + elif action == 3: + self.setFocus(self.getControl(ID_BUTTON_CLOSE)) + + # Down + elif action == 4: + self.setFocus(self.getControl(ID_BUTTON_OK)) # Pulsa ESC o Atrás, simula click en boton cancelar if action in [10, 92]: self.onClick(ID_BUTTON_CANCEL) diff --git a/platformcode/xbmc_videolibrary.py b/platformcode/xbmc_videolibrary.py index baecce5c..86b2ed4a 100644 --- a/platformcode/xbmc_videolibrary.py +++ b/platformcode/xbmc_videolibrary.py @@ -13,6 +13,7 @@ import os import threading import time import re +import math import xbmc from core import filetools @@ -503,7 +504,7 @@ def update(folder_content=config.get_setting("folder_tvshows"), folder=""): #update_path = filetools.join(videolibrarypath, folder_content, folder) + "/" # Problemas de encode en "folder" update_path = filetools.join(videolibrarypath, folder_content, ' ').rstrip() - if not scrapertools.find_single_match(update_path, '(^\w+:\/\/)'): + if videolibrarypath.startswith("special:") or not scrapertools.find_single_match(update_path, '(^\w+:\/\/)'): payload["params"] = {"directory": update_path} while xbmc.getCondVisibility('Library.IsScanningVideo()'): @@ -511,8 +512,6 @@ def update(folder_content=config.get_setting("folder_tvshows"), folder=""): data = get_data(payload) - #xbmc.executebuiltin('XBMC.ReloadSkin()') - def search_library_path(): sql = 'SELECT strPath FROM path WHERE strPath LIKE "special://%/plugin.video.kod/library/" AND idParentPath ISNULL' @@ -896,127 +895,76 @@ def clean(path_list=[]): progress = platformtools.dialog_progress_bg(config.get_localized_string(20000), config.get_localized_string(80025)) progress.update(0) - for path in path_list: - - idParentPath = 0 - sql_path = '' - sql_movies_path = '' - sql_tvshows_path = '' - sql_episodes_path = '' - - path, sep = sql_format(path) - movies_folder = config.get_setting("folder_movies") - tvshows_folder = config.get_setting("folder_tvshows") - - # delete episode/movie (downloads.py move_to_libray) - if path.endswith(".strm"): - if movies_folder in path: - sql_movies_path = path - else: - sql_episodes_path = path - # delete movie - elif movies_folder in path: - if not path.endswith(sep): path += sep - - sql_movies_path = path + '%' - # delete tvshow - elif tvshows_folder in path: - if not path.endswith(sep): path += sep - - sql_tvshows_path = path + '%' - - sql_episodes_path = sql_tvshows_path - # delete video library - else: - if not path.endswith(sep): path += sep - - sql_path = path - - sql_movies_path = sql_path + movies_folder - if not sql_movies_path.endswith(sep): sql_movies_path += sep - sql_movies_path += '%' - - sql_tvshows_path = sql_path + tvshows_folder - if not sql_tvshows_path.endswith(sep): sql_tvshows_path += sep - sql_tvshows_path += '%' - - sql_episodes_path = sql_tvshows_path - - if sql_path: - # search video library path in the DB + # if the path list is empty, clean the entire video library + if not path_list: + if not config.get_setting("videolibrary_kodi"): + sql_path, sep = sql_format(config.get_setting("videolibrarypath")) + if not sql_path.endswith(sep): sql_path += sep sql = 'SELECT idPath FROM path where strPath LIKE "%s"' % sql_path nun_records, records = execute_sql_kodi(sql) - # delete video library path - if records: - idPath = records[0][0] - idParentPath = idPath - if not config.get_setting("videolibrary_kodi"): - sql = 'DELETE from path WHERE idPath=%s' % idPath - nun_records, records = execute_sql_kodi(sql) - - if sql_movies_path: - # search movies in the DB - sql = 'SELECT idMovie FROM movie where c22 LIKE "%s"' % sql_movies_path + idPath = records[0][0] + sql = 'DELETE from path WHERE idPath=%s' % idPath nun_records, records = execute_sql_kodi(sql) - # delete movies - if records: - for record in records: - idMovie = record[0] - sql = 'DELETE from movie WHERE idMovie=%s' % idMovie - nun_records, records = execute_sql_kodi(sql) - - if sql_movies_path: - # search movies path and folders in the DB - sql = 'SELECT idPath, idParentPath FROM path where strPath LIKE "%s"' % sql_movies_path + sql = 'DELETE from path WHERE idParentPath=%s' % idPath nun_records, records = execute_sql_kodi(sql) - # delete movies path and folders - if records: - for record in records: - if record[1] == idParentPath and config.get_setting("videolibrary_kodi"): - continue - idPath = record[0] - sql = 'DELETE from path WHERE idPath=%s' % idPath - nun_records, records = execute_sql_kodi(sql) - if sql_tvshows_path: - # search TV shows in the DB - sql = 'SELECT idShow FROM tvshow_view where strPath LIKE "%s"' % sql_tvshows_path - nun_records, records = execute_sql_kodi(sql) - # delete TV shows - if records: - for record in records: - idShow = record[0] - sql = 'DELETE from tvshow WHERE idShow=%s' % idShow - nun_records, records = execute_sql_kodi(sql) + from core import videolibrarytools + for path, folders, files in filetools.walk(videolibrarytools.MOVIES_PATH): + for folder in folders: + path_list.append(filetools.join(config.get_setting("videolibrarypath"), videolibrarytools.FOLDER_MOVIES, folder)) - if sql_episodes_path: - # search episodes in the DB - sql = 'SELECT idEpisode FROM episode where c18 LIKE "%s"' % sql_episodes_path - nun_records, records = execute_sql_kodi(sql) - # delete episodes - if records: - for record in records: - idEpisode = record[0] - sql = 'DELETE from episode WHERE idEpisode=%s' % idEpisode - nun_records, records = execute_sql_kodi(sql) + for path, folders, files in filetools.walk(videolibrarytools.TVSHOWS_PATH): + for folder in folders: + tvshow_nfo = filetools.join(path, folder, "tvshow.nfo") + if filetools.exists(tvshow_nfo): + path_list.append(filetools.join(config.get_setting("videolibrarypath"), videolibrarytools.FOLDER_TVSHOWS, folder)) - if sql_tvshows_path: - # search TV shows path and folders in the DB - sql = 'SELECT idPath, idParentPath FROM path where strPath LIKE "%s"' % sql_tvshows_path + if path_list: t = float(100) / len(path_list) + for i, path in enumerate(path_list): + progress.update(int(math.ceil((i + 1) * t))) + + if not path: + continue + + sql_path, sep = sql_format(path) + if filetools.isdir(path) and not sql_path.endswith(sep): sql_path += sep + + if filetools.isdir(path): + # search movie in the DB + sql = 'SELECT idMovie FROM movie where c22 LIKE "%s"' % (sql_path + '%') nun_records, records = execute_sql_kodi(sql) - # delete tvshows path and folders + # delete movie if records: - for record in records: - if record[1] == idParentPath and config.get_setting("videolibrary_kodi"): - continue - idPath = record[0] - sql = 'DELETE from path WHERE idPath=%s' % idPath - nun_records, records = execute_sql_kodi(sql) + payload = {"jsonrpc": "2.0", "method": "VideoLibrary.RemoveMovie", "id": 1, "params": {"movieid": records[0][0]}} + data = get_data(payload) + continue + # search TV show in the DB + sql = 'SELECT idShow FROM tvshow_view where strPath LIKE "%s"' % sql_path + nun_records, records = execute_sql_kodi(sql) + # delete TV show + if records: + payload = {"jsonrpc": "2.0", "method": "VideoLibrary.RemoveTVShow", "id": 1, "params": {"tvshowid": records[0][0]}} + data = get_data(payload) + elif config.get_setting("folder_movies") in sql_path: + # search movie in the DB + sql = 'SELECT idMovie FROM movie where c22 LIKE "%s"' % sql_path + nun_records, records = execute_sql_kodi(sql) + # delete movie + if records: + payload = {"jsonrpc": "2.0", "method": "VideoLibrary.RemoveMovie", "id": 1, "params": {"movieid": records[0][0]}} + data = get_data(payload) + else: + # search episode in the DB + sql = 'SELECT idEpisode FROM episode where c18 LIKE "%s"' % sql_path + nun_records, records = execute_sql_kodi(sql) + # delete episode + if records: + payload = {"jsonrpc": "2.0", "method": "VideoLibrary.RemoveEpisode", "id": 1, "params": {"episodeid": records[0][0]}} + data = get_data(payload) progress.update(100) xbmc.sleep(1000) progress.close() - xbmc.executebuiltin('XBMC.ReloadSkin()') def execute_sql_kodi(sql): @@ -1120,7 +1068,6 @@ def check_sources(new_movies_path='', new_tvshows_path=''): return False, False - def update_sources(new='', old=''): logger.info() if new == old: return @@ -1212,6 +1159,8 @@ def ask_set_content(silent=False): if set_content("movie", True, custom) and set_content("tvshow", True, custom): platformtools.dialog_ok(config.get_localized_string(80026), config.get_localized_string(70104)) config.set_setting("videolibrary_kodi", True) + from specials import videolibrary + videolibrary.update_videolibrary() update() else: platformtools.dialog_ok(config.get_localized_string(80026), config.get_localized_string(80024)) diff --git a/resources/language/English/strings.po b/resources/language/English/strings.po index e75aa764..e8eeecb9 100644 --- a/resources/language/English/strings.po +++ b/resources/language/English/strings.po @@ -23,12 +23,16 @@ msgctxt "#20001" msgid "eng" msgstr "" +msgctxt "#30000" +msgid "View" +msgstr "" + msgctxt "#30001" msgid "Check for updates" msgstr "" msgctxt "#30002" -msgid "Enable adult mode" +msgid "Enable touch view" msgstr "" msgctxt "#30003" @@ -55,6 +59,10 @@ msgctxt "#30008" msgid "High" msgstr "" +msgctxt "#30009" +msgid "Show only channels" +msgstr "" + msgctxt "#30010" msgid "Channel icons view" msgstr "" @@ -76,7 +84,7 @@ msgid "Password" msgstr "" msgctxt "#30017" -msgid "Download path*" +msgid "Download path" msgstr "" msgctxt "#30018" @@ -108,7 +116,7 @@ msgid "Contextual menu" msgstr "" msgctxt "#30025" -msgid "Show KoD settings" +msgid "" msgstr "" msgctxt "#30026" @@ -144,7 +152,7 @@ msgid "Internal Client" msgstr "" msgctxt "#30034" -msgid ": Select the video, or 'Cancel' for all" +msgid "Select the video, or 'Cancel' for all" msgstr "" msgctxt "#30035" @@ -171,6 +179,10 @@ msgctxt "#30040" msgid "Do you really want to delete the %s file?" msgstr "" +msgctxt "#30041" +msgid "Select a value" +msgstr "" + msgctxt "#30043" msgid "Force view mode" msgstr "" @@ -204,7 +216,7 @@ msgid "Unsopported Server" msgstr "" msgctxt "#30067" -msgid "Path*" +msgid "Path" msgstr "" msgctxt "#30068" @@ -280,7 +292,7 @@ msgid "Documentaries" msgstr "" msgctxt "#30126" -msgid "Adult" +msgid "" msgstr "" msgctxt "#30130" @@ -477,15 +489,15 @@ msgid "Academy Awards" msgstr "" msgctxt "#30998" -msgid "Shortcut" +msgid "Quick menu shortcut" msgstr "" msgctxt "#30999" -msgid "Assign key to open shortcut" +msgid "Assign key" msgstr "" msgctxt "#31000" -msgid "Remove key to open shortcut" +msgid "Unassign key" msgstr "" msgctxt "#50000" @@ -709,11 +721,11 @@ msgid "Mark TV show as watched" msgstr "" msgctxt "#60022" -msgid "Automatically find new episodes: Disable" +msgid "Remove from video library update" msgstr "" msgctxt "#60023" -msgid "Automatically find new episodes: Enable" +msgid "Add to video library update" msgstr "" msgctxt "#60024" @@ -725,7 +737,7 @@ msgid "Delete TV show" msgstr "" msgctxt "#60026" -msgid "Search for new episodes and update" +msgid "KoD settings" msgstr "" msgctxt "#60027" @@ -789,7 +801,7 @@ msgid "Delete TV show" msgstr "" msgctxt "#60042" -msgid "Delete links of %s" +msgid "Delete channel %s" msgstr "" msgctxt "#60043" @@ -797,7 +809,7 @@ msgid "Delete %s links of channel %s" msgstr "" msgctxt "#60044" -msgid "Do you want really to delete '%s' from video library?" +msgid "Do you want really to delete "%s" from video library?" msgstr "" msgctxt "#60045" @@ -877,7 +889,7 @@ msgid "Added episode to the video library..." msgstr "" msgctxt "#60066" -msgid "An error has occurred. It has not been possible to add the movie "%s" to the video library" +msgid "An error has occurred. The movie "%s" has not been added to the video library" msgstr "" msgctxt "#60067" @@ -889,7 +901,7 @@ msgid "An error has occurred. The TV show "%s" has not been added to the video l msgstr "" msgctxt "#60069" -msgid "An error has occurred. The TV show "%s" has not been added completely to the video library" +msgid "An error has occurred. The TV show "%s" has not been completely added to the video library" msgstr "" msgctxt "#60070" @@ -897,7 +909,7 @@ msgid "The TV show "%s" has been added to the video library" msgstr "" msgctxt "#60071" -msgid "Autoplay configuration" +msgid "AutoPlay configuration" msgstr "" msgctxt "#60072" @@ -1373,7 +1385,7 @@ msgid "Find %s possible matches" msgstr "" msgctxt "#60298" -msgid "[%s]: Select the correct TV show" +msgid "%s - Select the correct TV show" msgstr "" msgctxt "#60299" @@ -1401,7 +1413,7 @@ msgid "The unzipped %s file already exists, or you want to overwrite it.?" msgstr "" msgctxt "#60305" -msgid "Adult channels" +msgid "" msgstr "" msgctxt "#60306" @@ -1413,7 +1425,7 @@ msgid "Use 'Preferences' to change your password" msgstr "" msgctxt "#60308" -msgid "Adult channels" +msgid "" msgstr "" msgctxt "#60309" @@ -1517,7 +1529,7 @@ msgid "Configuration" msgstr "" msgctxt "#60334" -msgid "Password for adult channels" +msgid "" msgstr "" msgctxt "#60335" @@ -1557,7 +1569,7 @@ msgid "Information" msgstr "" msgctxt "#60349" -msgid "Go to the main menu" +msgid "Previous menu" msgstr "" msgctxt "#60350" @@ -1565,7 +1577,7 @@ msgid "Search in other channels" msgstr "Search in other channels" msgctxt "#60351" -msgid "Set as Homepage" +msgid "Set as main menu" msgstr "" msgctxt "#60352" @@ -1681,7 +1693,7 @@ msgid "Summary:" msgstr "" msgctxt "#60389" -msgid "Updating %s [%s]..." +msgid "Updating "%s" [%s]..." msgstr "" msgctxt "#60390" @@ -2293,7 +2305,7 @@ msgid "Download settings" msgstr "" msgctxt "#60542" -msgid "Video library settings" +msgid "Configure video library" msgstr "" msgctxt "#60544" @@ -2385,7 +2397,7 @@ msgid "Restore video library (strm, nfo and json)" msgstr "" msgctxt "#60568" -msgid "Search for new episodes and update video library" +msgid "Update video library" msgstr "" msgctxt "#60569" @@ -2489,7 +2501,7 @@ msgid " Server #%s" msgstr "" msgctxt "#60598" -msgid "Configuration of video library" +msgid "Video library configuration" msgstr "" msgctxt "#60600" @@ -2509,15 +2521,15 @@ msgid "When Kodi starts" msgstr "" msgctxt "#60604" -msgid "Once a day" +msgid "Daily" msgstr "" msgctxt "#60605" -msgid "At the start of Kodi and once a day" +msgid "When Kodi starts and daily" msgstr "" msgctxt "#60606" -msgid " Wait before updating at startup of Kodi" +msgid " Update waiting time" msgstr "" msgctxt "#60607" @@ -2541,11 +2553,11 @@ msgid "60 sec" msgstr "" msgctxt "#60613" -msgid " Begin scheduled update from" +msgid " Update time" msgstr "" msgctxt "#60614" -msgid " Search for new episodes in active TV shows" +msgid " Search for new episodes" msgstr "" msgctxt "#60615" @@ -2557,15 +2569,15 @@ msgid "Always" msgstr "" msgctxt "#60617" -msgid "According to new episodes" +msgid "Based on airing" msgstr "" msgctxt "#60618" -msgid " Search for content in" +msgid " Kodi video library update" msgstr "" msgctxt "#60619" -msgid "The folder of each TV show" +msgid "Each TV show" msgstr "" msgctxt "#60620" @@ -2573,7 +2585,7 @@ msgid "All video library" msgstr "" msgctxt "#60621" -msgid "Show links in" +msgid "Links view" msgstr "" msgctxt "#60622" @@ -2629,7 +2641,7 @@ msgid "0 seg" msgstr "" msgctxt "#60637" -msgid "Synchronizing with Trakt" +msgid " Synchronizing with Trakt" msgstr "" msgctxt "#60638" @@ -2673,7 +2685,7 @@ msgid "Never" msgstr "" msgctxt "#60650" -msgid "Video library information provider" +msgid " Video library information providers" msgstr "" msgctxt "#60651" @@ -3117,15 +3129,15 @@ msgid "Most Viewed" msgstr "" msgctxt "#70078" -msgid "Show only links of " +msgid "Show links of " msgstr "" msgctxt "#70079" -msgid "Delete ony the links of " +msgid "Delete channel " msgstr "" msgctxt "#70081" -msgid "The folder also contains local or downloaded video files. Do you want to delete them?" +msgid "The folder "%s" contains other files. Delete it anyway?" msgstr "" msgctxt "#70082" @@ -3145,7 +3157,7 @@ msgid "Delete TV show" msgstr "" msgctxt "#70086" -msgid "Delete only the links of %s" +msgid "Delete channel %s" msgstr "" msgctxt "#70087" @@ -3153,7 +3165,7 @@ msgid "Deleted %s links from canal %s" msgstr "" msgctxt "#70088" -msgid "Are you sure you want to delete '%s' from video library?" +msgid "Are you sure you want to delete "%s" from video library?" msgstr "" msgctxt "#70089" @@ -3257,7 +3269,7 @@ msgid "Only until Kodi restarts" msgstr "" msgctxt "#70115" -msgid "Request password to open adult channels" +msgid "" msgstr "" msgctxt "#70116" @@ -3269,15 +3281,15 @@ msgid "Confirm new password" msgstr "" msgctxt "#70118" -msgid "TV shows folder*" +msgid "TV shows folder" msgstr "" msgctxt "#70119" -msgid "Movies folder*" +msgid "Movies folder" msgstr "" msgctxt "#70120" -msgid "Add KoD contents to Kodi video library*" +msgid "Add KoD contents to Kodi video library" msgstr "" msgctxt "#70121" @@ -3305,8 +3317,8 @@ msgid "Customization" msgstr "" msgctxt "#70128" -msgid "Infoplus Animation" -msgstr "Animazione Infoplus" +msgid "InfoPlus animation" +msgstr "Animazione InfoPlus" msgctxt "#70129" msgid "Without animation" @@ -3397,7 +3409,7 @@ msgid "Contextual menu" msgstr "" msgctxt "#70151" -msgid "Show Infoplus" +msgid "Show InfoPlus" msgstr "" msgctxt "#70152" @@ -3405,7 +3417,7 @@ msgid "Show ExtendedInfo" msgstr "" msgctxt "#70153" -msgid "Shortcut" +msgid "" msgstr "" msgctxt "#70154" @@ -3853,11 +3865,15 @@ msgid "The data entered is not correct!" msgstr "" msgctxt "#70266" -msgid "The search for% s did not match." +msgid "The search for %s did not match" +msgstr "" + +msgctxt "#70268" +msgid "The TV show "%s" is ended or has been canceled. Would you like to remove it from the video library update?" msgstr "" msgctxt "#70269" -msgid "Search new episodes now" +msgid "Search new episodes" msgstr "" msgctxt "#70270" @@ -3929,7 +3945,7 @@ msgid "Configure search" msgstr "" msgctxt "#70287" -msgid "Configure video library" +msgid "Video library settings" msgstr "" msgctxt "#70288" @@ -5656,7 +5672,7 @@ msgid "Dear user, you seem to have problems with ADSL! We suggest you call your msgstr "" msgctxt "#70722" -msgid "Dear User, your current DNS do not allow you to reach all sites, ergo, not all Channels will work, we recommend you, to take advantage of more, to set up the DNS.\nSearch your favorite search engine for\n 1. guides on how to choose a free DNS.\n 2. set it up on your device.\nWe recommend the cloudflare DNS 1.1.1.1 and 1.0.0.1\nAlternatively the Google DNS 8.8.8.8 and 8.8.4.4.\nIf you have a VODAFONE station, enter the modem panel and disable 'Secure DNS' before proceeding.\n" +msgid "Dear User, you have disabled the DNS override and your current DNS do not allow you to reach all sites, ergo, not all Channels will work, we recommend you, to take advantage of more, to set up the DNS or re-enable the override in settings.\nSearch your favorite search engine for\n 1. guides on how to choose a free DNS.\n 2. set it up on your device.\nWe recommend the cloudflare DNS 1.1.1.1 and 1.0.0.1\nAlternatively the Google DNS 8.8.8.8 and 8.8.4.4.\nIf you have a VODAFONE station, enter the modem panel and disable 'Secure DNS' before proceeding.\n" msgstr "" msgctxt "#70723" @@ -5905,7 +5921,7 @@ msgstr "" msgctxt "#70784" msgid "Attention!" -msgstr "Attenzione!" +msgstr "" msgctxt "#70785" msgid "Install Elementum" @@ -5924,7 +5940,7 @@ msgid "Logging" msgstr "" msgctxt "#70789" -msgid "* Change by opening the settings from KoD main menu" +msgid "" msgstr "" msgctxt "#70790" @@ -5987,6 +6003,14 @@ msgctxt "#70804" msgid "Next extraction..." msgstr "" +msgctxt "#70805" +msgid "Stay in seed" +msgstr "" + +msgctxt "#70806" +msgid "Changing this parameter permanently overwrites the Elementum settings.\nDo you want to continue?" +msgstr "" + # DNS start [ settings and declaration ] msgctxt "#707401" msgid "Enable DNS check alert" @@ -6213,7 +6237,7 @@ msgid "Folder name for TV shows" msgstr "" msgctxt "#80022" -msgid "You can configure the Kodi video library later from the settings menu inside KoD" +msgid "You can configure the Kodi video library later from KoD settings" msgstr "" msgctxt "#80023" @@ -6221,7 +6245,7 @@ msgid "You will be asked to choose and configure the information providers for m msgstr "" msgctxt "#80024" -msgid "An error has occurred during the configuration of the Kodi video library. Please check the log and try again from the settings menu inside KoD" +msgid "An error has occurred during the configuration of the Kodi video library. Please check the log and try again from KoD settings" msgstr "" msgctxt "#80025" @@ -6241,7 +6265,7 @@ msgid "The selected folders are already used by the Kodi library. Please change msgstr "" msgctxt "#80029" -msgid "The selected folders are already used by the Kodi library. Please change them properly from the settings menu inside KoD" +msgid "The selected folders are already used by the Kodi library. Please change them properly from KoD settings" msgstr "" msgctxt "#80030" @@ -6253,7 +6277,7 @@ msgid "Unable to connect to github. This is probably due to a lack of connection msgstr "" msgctxt "#80032" -msgid "Installation in progress..." +msgid "Installing..." msgstr "" msgctxt "#80033" @@ -6290,4 +6314,40 @@ msgstr "" msgctxt "#80041" msgid "Latest updates:\n" +msgstr "" + +msgctxt "#80042" +msgid "Include local episodes when adding a TV shows" +msgstr "" + +msgctxt "#80043" +msgid "Attention, in order to watch local episodes you have to configure the Kodi video library from KoD settings" +msgstr "" + +msgctxt "#80044" +msgid "Do you want to include local episodes for the TV show "%s"?" +msgstr "" + +msgctxt "#80045" +msgid "The folder of the local episodes has to be different from the TV show one in the KoD video library" +msgstr "" + +msgctxt "#80046" +msgid "Select the folder containing the local episodes" +msgstr "" + +msgctxt "#80047" +msgid "The TV show "% s" includes local episodes. They will be only removed from the Kodi video library without being deleted" +msgstr "" + +msgctxt "#80048" +msgid "Add local episodes" +msgstr "" + +msgctxt "#80049" +msgid "Remove local episodes" +msgstr "" + +msgctxt "#80050" +msgid "Downloading..." msgstr "" \ No newline at end of file diff --git a/resources/language/Italian/strings.po b/resources/language/Italian/strings.po index 90e15aed..4d226744 100644 --- a/resources/language/Italian/strings.po +++ b/resources/language/Italian/strings.po @@ -23,13 +23,17 @@ msgctxt "#20001" msgid "eng" msgstr "ita" +msgctxt "#30000" +msgid "View" +msgstr "Vista" + msgctxt "#30001" msgid "Check for updates" msgstr "Verifica aggiornamenti" msgctxt "#30002" -msgid "Enable adult mode" -msgstr "Abilita modalità adulti" +msgid "Enable touch view" +msgstr "Abilita vista touch" msgctxt "#30003" msgid "Enable debug logging" @@ -55,6 +59,10 @@ msgctxt "#30008" msgid "High" msgstr "Alta" +msgctxt "#30009" +msgid "Show only channels" +msgstr "Mostra solo canali" + msgctxt "#30010" msgid "Channel icons view" msgstr "Visualizzazione icone dei canali" @@ -76,8 +84,8 @@ msgid "Password" msgstr "Password" msgctxt "#30017" -msgid "Download path*" -msgstr "Percorso download*" +msgid "Download path" +msgstr "Percorso download" msgctxt "#30018" msgid "Download list path" @@ -108,8 +116,8 @@ msgid "Contextual menu" msgstr "Menu contestuale" msgctxt "#30025" -msgid "Show KoD settings" -msgstr "Mostra impostazioni KoD" +msgid "" +msgstr "" msgctxt "#30026" msgid "Direct" @@ -144,8 +152,8 @@ msgid "Internal Client" msgstr "Client Interno" msgctxt "#30034" -msgid ": Select the video, or 'Cancel' for all" -msgstr ": Seleziona il video o 'Annulla' per tutti" +msgid "Select the video, or 'Cancel' for all" +msgstr "Seleziona il video o 'Annulla' per tutti" msgctxt "#30035" msgid "ERROR in the Client" @@ -171,6 +179,10 @@ msgctxt "#30040" msgid "Do you really want to delete the %s file?" msgstr "Vuoi veramente eliminare il file %s?" +msgctxt "#30041" +msgid "Select a value" +msgstr "Seleziona un valore" + msgctxt "#30043" msgid "Force view mode" msgstr "Forza modalità di visualizzazione" @@ -204,8 +216,8 @@ msgid "Unsopported Server" msgstr "Server non supportato" msgctxt "#30067" -msgid "Path*" -msgstr "Percorso*" +msgid "Path" +msgstr "Percorso" msgctxt "#30068" msgid "Filter by servers" @@ -280,8 +292,8 @@ msgid "Documentaries" msgstr "Documentari" msgctxt "#30126" -msgid "Adult" -msgstr "Adulti" +msgid "" +msgstr "" msgctxt "#30130" msgid "News" @@ -476,16 +488,16 @@ msgid "Academy Awards" msgstr "Premi Oscar" msgctxt "#30998" -msgid "Shortcut" -msgstr "Scorciatoia" +msgid "Quick menu shortcut" +msgstr "Scorciatoia menu rapido" msgctxt "#30999" -msgid "Assign key to open shortcut" -msgstr "Assegna tasto scorciatoia" +msgid "Assign key" +msgstr "Assegna tasto" msgctxt "#31000" -msgid "Remove key to open shortcut" -msgstr "Rimuovi tasto scorciatoia" +msgid "Unassign key" +msgstr "Disassegna tasto" msgctxt "#50000" msgid "Sagas" @@ -708,12 +720,12 @@ msgid "Mark TV show as watched" msgstr "Segna serie TV come vista" msgctxt "#60022" -msgid "Automatically find new episodes: Disable" -msgstr "Trova automaticamente nuovi episodi: Disattiva" +msgid "Remove from video library update" +msgstr "Rimuovi da aggiornamento videoteca" msgctxt "#60023" -msgid "Automatically find new episodes: Enable" -msgstr "Trova automaticamente nuovi episodi: Attiva" +msgid "Add to video library update" +msgstr "Aggiungi ad aggiornamento videoteca" msgctxt "#60024" msgid "Delete TV show/channel" @@ -724,8 +736,8 @@ msgid "Delete TV show" msgstr "Elimina serie TV" msgctxt "#60026" -msgid "Search for new episodes and update" -msgstr "Cerca nuovi episodi e aggiorna" +msgid "KoD settings" +msgstr "Impostazioni KoD" msgctxt "#60027" msgid "Season %s" @@ -788,16 +800,16 @@ msgid "Delete TV show" msgstr "Elimina serie TV" msgctxt "#60042" -msgid "Delete links of %s" -msgstr "Elimina link di %s" +msgid "Delete channel %s" +msgstr "Elimina canale %s" msgctxt "#60043" msgid "Delete %s links of channel %s" msgstr "Cancellati %s collegamenti del canale %s" msgctxt "#60044" -msgid "Do you want really to delete '%s' from video library?" -msgstr "Vuoi davvero rimuovere '%s' dalla videoteca?" +msgid "Do you want really to delete "%s" from video library?" +msgstr "Vuoi davvero rimuovere "%s" dalla videoteca?" msgctxt "#60045" msgid "Sync with Trakt started" @@ -876,8 +888,8 @@ msgid "Added episode to the video library..." msgstr "Aggiunta episodio alla videoteca..." msgctxt "#60066" -msgid "An error has occurred. It has not been possible to add the movie "%s" to the video library" -msgstr "Si è verificato un errore. Non è stato possibile aggiungere il film "%s" alla videoteca" +msgid "An error has occurred. The movie "%s" has not been added to the video library" +msgstr "Si è verificato un errore. Il film "%s" non è stato aggiunto alla videoteca" msgctxt "#60067" msgid "An error has occurred. The TV show "%s" has not been added to the video library. It has not been possible to add any episode" @@ -888,7 +900,7 @@ msgid "An error has occurred. The TV show "%s" has not been added to the video l msgstr "Si è verificato un errore. La serie TV "%s" non è stata aggiunta alla videoteca" msgctxt "#60069" -msgid "An error has occurred. The TV show "%s" has not been added completely to the video library" +msgid "An error has occurred. The TV show "%s" has not been completely added to the video library" msgstr "Si è verificato un errore. La serie TV "%s" non è stata aggiunta completamente alla videoteca" msgctxt "#60070" @@ -896,8 +908,8 @@ msgid "The TV show "%s" has been added to the video library" msgstr "La serie TV "%s" è stata aggiunta alla videoteca" msgctxt "#60071" -msgid "Autoplay configuration" -msgstr "Configurazione Autoplay" +msgid "AutoPlay configuration" +msgstr "Configurazione AutoPlay" msgctxt "#60072" msgid "It seems that links of %s are not working." @@ -1372,8 +1384,8 @@ msgid "Find %s possible matches" msgstr "Trovate %s possibili corrispondenze" msgctxt "#60298" -msgid "[%s]: Select the correct TV show" -msgstr "[%s]: Seleziona la serie TV corretta" +msgid "%s - Select the correct TV show" +msgstr "%s - Seleziona la serie TV corretta" msgctxt "#60299" msgid "Not found in the language '%s'" @@ -1400,8 +1412,8 @@ msgid "The unzipped %s file already exists, or you want to overwrite it.?" msgstr "il file %s da decomprimere esiste già, vuoi sovrascriverlo?" msgctxt "#60305" -msgid "Adult channels" -msgstr "Canali per adulti" +msgid "" +msgstr "" msgctxt "#60306" msgid "The fields 'New password' and 'Confirm new password' do not match" @@ -1412,8 +1424,8 @@ msgid "Use 'Preferences' to change your password" msgstr "Entra in 'Preferenze' per cambiare la password" msgctxt "#60308" -msgid "Adult channels" -msgstr "Canali para adulti" +msgid "" +msgstr "" msgctxt "#60309" msgid "The password is not correct." @@ -1516,8 +1528,8 @@ msgid "Configuration" msgstr "Configurazione" msgctxt "#60334" -msgid "Password for adult channels" -msgstr "Password per i canali per adulti" +msgid "" +msgstr "" msgctxt "#60335" msgid "Watch in..." @@ -1556,16 +1568,16 @@ msgid "Information" msgstr "Informazione" msgctxt "#60349" -msgid "Go to the main menu" -msgstr "Andare al menu principale" +msgid "Previous menu" +msgstr "Menu precedente" msgctxt "#60350" msgid "Search in other channels" msgstr "Cerca negli altri canali" msgctxt "#60351" -msgid "Set as Homepage" -msgstr "Impostare come Homepage" +msgid "Set as main menu" +msgstr "Imposta come menu principale" msgctxt "#60352" msgid "Add TV show to video library" @@ -1680,8 +1692,8 @@ msgid "Summary:" msgstr "Riassunto:" msgctxt "#60389" -msgid "Updating %s [%s]..." -msgstr "Aggiornamento %s [%s]..." +msgid "Updating "%s" [%s]..." +msgstr "Aggiornamento "%s" [%s]..." msgctxt "#60390" msgid "AutoPlay configuration" @@ -2292,8 +2304,8 @@ msgid "Download settings" msgstr "Impostazioni download" msgctxt "#60542" -msgid "Video library settings" -msgstr "Impostazioni videoteca" +msgid "Configure video library" +msgstr "Configura videoteca" msgctxt "#60544" msgid "More Options" @@ -2384,8 +2396,8 @@ msgid "Restore video library (strm, nfo and json)" msgstr "Ripristina videoteca (strm, nfo e json)" msgctxt "#60568" -msgid "Search for new episodes and update video library" -msgstr "Cerca nuovi episodi ed aggiorna videoteca" +msgid "Update video library" +msgstr "Aggiorna videoteca" msgctxt "#60569" msgid " - There are no default settings" @@ -2420,7 +2432,7 @@ msgid "A saving error occurred" msgstr "Si è verificato un errore al salvataggio" msgctxt "#60581" -msgid "Restoring the video library" +msgid "Restoring video library" msgstr "Ripristino videoteca" msgctxt "#60582" @@ -2488,8 +2500,8 @@ msgid " Server #%s" msgstr " Server #%s" msgctxt "#60598" -msgid "Configuration of video library" -msgstr "Configurazione della videoteca" +msgid "Video library configuration" +msgstr "Configurazione videoteca" msgctxt "#60600" msgid "TV shows" @@ -2508,16 +2520,16 @@ msgid "When Kodi starts" msgstr "All'avvio di Kodi" msgctxt "#60604" -msgid "Once a day" -msgstr "Una volta al giorno" +msgid "Daily" +msgstr "Giornaliero" msgctxt "#60605" -msgid "At the start of Kodi and once a day" -msgstr "All'avvio di Kodi e una volta al giorno" +msgid "When Kodi starts and daily" +msgstr "All'avvio di Kodi e giornaliero" msgctxt "#60606" -msgid " Wait before updating at startup of Kodi" -msgstr " Attendere prima di aggiornare all'avvio di Kodi" +msgid " Update waiting time" +msgstr " Tempo di attesa aggiornamento" msgctxt "#60607" msgid "When Kodi starts" @@ -2540,12 +2552,12 @@ msgid "60 sec" msgstr "60 sec" msgctxt "#60613" -msgid " Begin scheduled update from" -msgstr " Inizia aggiornamento programmato a partire dalle" +msgid " Update time" +msgstr " Ora aggiornamento" msgctxt "#60614" -msgid " Search for new episodes in active TV shows" -msgstr " Cerca nuovi episodi nelle serie TV attive" +msgid " Search for new episodes" +msgstr " Cerca nuovi episodi" msgctxt "#60615" msgid "Never" @@ -2556,24 +2568,24 @@ msgid "Always" msgstr "Sempre" msgctxt "#60617" -msgid "According to new episodes" -msgstr "Secondo le uscite" +msgid "Based on airing" +msgstr "In base all'uscita" msgctxt "#60618" -msgid " Search for content in" -msgstr " Esegui ricerca dei contenuti in" +msgid " Kodi video library update" +msgstr " Aggiornamento libreria di Kodi" msgctxt "#60619" -msgid "The folder of each TV show" -msgstr "La cartella di ogni serie TV" +msgid "Each TV show" +msgstr "Per serie TV" msgctxt "#60620" msgid "All video library" msgstr "Tutta la videoteca" msgctxt "#60621" -msgid "Show links in" -msgstr "Mostra collegamenti in" +msgid "Links view" +msgstr "Visualizzazione collegamenti" msgctxt "#60622" msgid "Normal window" @@ -2628,8 +2640,8 @@ msgid "0 seg" msgstr "0 sec" msgctxt "#60637" -msgid "Synchronizing with Trakt" -msgstr "Sincronizzazione con Trakt" +msgid " Synchronizing with Trakt" +msgstr " Sincronizzazione con Trakt" msgctxt "#60638" msgid " After mark as watched the episode" @@ -2672,8 +2684,8 @@ msgid "Never" msgstr "Mai" msgctxt "#60650" -msgid "Video library information provider" -msgstr "Provider informazioni videoteca" +msgid " Video library information providers" +msgstr " Provider informazioni videoteca" msgctxt "#60651" msgid " Movies" @@ -2817,11 +2829,11 @@ msgstr "OK" msgctxt "#70002" msgid "Cancel" -msgstr "Annullare" +msgstr "Annulla" msgctxt "#70003" msgid "Default" -msgstr "Default" +msgstr "Predefinito" msgctxt "#70004" msgid "Loading..." @@ -3116,16 +3128,16 @@ msgid "Most Viewed" msgstr "Più Viste" msgctxt "#70078" -msgid "Show only links of " -msgstr "Mostra solo link di " +msgid "Show links of " +msgstr "Mostra link di " msgctxt "#70079" -msgid "Delete only the links of " -msgstr "Elimina solo i link di " +msgid "Delete channel " +msgstr "Elimina canale " msgctxt "#70081" -msgid "The folder also contains local or downloaded video files. Do you want to delete them?" -msgstr "La cartella contiene anche file video locali o scaricati. Vuoi eliminarli?" +msgid "The folder "%s" contains other files. Delete it anyway?" +msgstr "La cartella "%s" contiene altri file. Eliminarla comunque?" msgctxt "#70082" msgid "Global Search" @@ -3144,16 +3156,16 @@ msgid "Delete TV show" msgstr "Elimina serie TV" msgctxt "#70086" -msgid "Delete only the links of %s" -msgstr "Elimina solo i link di %s" +msgid "Delete channel %s" +msgstr "Elimina canale %s" msgctxt "#70087" msgid "Deleted %s links from canal %s" msgstr "Eliminati %s link del canale %s" msgctxt "#70088" -msgid "Are you sure you want to delete '%s' from video library?" -msgstr "Vuoi davvero eliminare '%s' dalla videoteca?" +msgid "Are you sure you want to delete "%s" from video library?" +msgstr "Vuoi davvero eliminare "%s" dalla videoteca?" msgctxt "#70089" msgid "Show only links of %s" @@ -3256,8 +3268,8 @@ msgid "Only until Kodi restarts" msgstr "Fino al riavvio di Kodi" msgctxt "#70115" -msgid "Request password to open adult channels" -msgstr "Richiedi password per aprire canali per adulti" +msgid "" +msgstr "" msgctxt "#70116" msgid "New password" @@ -3268,16 +3280,16 @@ msgid "Confirm new password" msgstr "Conferma nuova password" msgctxt "#70118" -msgid "TV shows folder*" -msgstr "Cartella serie TV*" +msgid "TV shows folder" +msgstr "Cartella serie TV" msgctxt "#70119" -msgid "Movies folder*" -msgstr "Cartella film*" +msgid "Movies folder" +msgstr "Cartella film" msgctxt "#70120" -msgid "Add KoD contents to Kodi video library*" -msgstr "Aggiungi la videoteca di KoD alla libreria di Kodi*" +msgid "Add KoD contents to Kodi video library" +msgstr "Aggiungi la videoteca di KoD alla libreria di Kodi" msgctxt "#70121" msgid "Activate" @@ -3304,8 +3316,8 @@ msgid "Customization" msgstr "Personalizzazione" msgctxt "#70128" -msgid "Infoplus Animation" -msgstr "Animazione Infoplus" +msgid "InfoPlus animation" +msgstr "Animazione InfoPlus" msgctxt "#70129" msgid "Without animation" @@ -3396,16 +3408,16 @@ msgid "Contextual menu" msgstr "Menu contestuale" msgctxt "#70151" -msgid "Show Infoplus" -msgstr "Mostra Infoplus" +msgid "Show InfoPlus" +msgstr "Mostra InfoPlus" msgctxt "#70152" msgid "Show ExtendedInfo" msgstr "Mostra ExtendedInfo" msgctxt "#70153" -msgid "Shortcut" -msgstr "Scorciatoia" +msgid "" +msgstr "" msgctxt "#70154" msgid "TMDB search" @@ -3852,12 +3864,16 @@ msgid "The data entered is not correct!" msgstr "I dati introdotti non sono corretti!" msgctxt "#70266" -msgid "The search for% s did not match." -msgstr "La ricerca di %s non ha dato risultati." +msgid "The search for %s did not match" +msgstr "La ricerca di %s non ha dato risultati" + +msgctxt "#70268" +msgid "The TV show "%s" is ended or has been canceled. Would you like to remove it from the video library update?" +msgstr "La serie TV "%s" è terminata o è stata cancellata. Vuoi rimuoverla dall'aggiornamento della videoteca?" msgctxt "#70269" -msgid "Search new episodes now" -msgstr "Cerca nuovi episodi adesso" +msgid "Search new episodes" +msgstr "Cerca nuovi episodi" msgctxt "#70270" msgid "Movies video library" @@ -3928,8 +3944,8 @@ msgid "Configure search" msgstr "Configura ricerca" msgctxt "#70287" -msgid "Configure video library" -msgstr "Configura videoteca" +msgid "Video library settings" +msgstr "Impostazioni videoteca" msgctxt "#70288" msgid "Configure downloads" @@ -5656,8 +5672,8 @@ msgid "Dear user, you seem to have problems with ADSL! We suggest you call your msgstr "Gentile Utente, sembra tu abbia problemi con l'ADSL! Controlla quanto meno che il modem/router sia acceso e/o il tuo dispositivo connesso.\nKoD NON TROVERA NIENTE finchè non risolverai il problema." msgctxt "#70722" -msgid "Dear User, your current DNS do not allow you to reach all sites, ergo, not all Channels will work, we recommend you, to take advantage of more, to set up the DNS.\nSearch your favorite search engine for\n 1. guides on how to choose a free DNS.\n 2. set it up on your device.\nWe recommend the cloudflare DNS 1.1.1.1 and 1.0.0.1\nAlternatively the Google DNS 8.8.8.8 and 8.8.4.4.\nIf you have a VODAFONE station, enter the modem panel and disable 'Secure DNS' before proceeding.\n" -msgstr "Gentile Utente, i tuoi DNS attuali non ti permettono di raggiungere tutti i siti, ergo, non tutti i Canali funzioneranno, ti consigliamo, per usufruirne di un maggior numero, di impostare i DNS.\nCerca sul tuo motore di ricerca preferito guide su come\n1. scegliere un DNS libero.\n2. impostarlo sul tuo dispositivo.\nNoi ti consigliamo i DNS di cloudflare 1.1.1.1 e 1.0.0.1\nIn alternativa i DNS di Google 8.8.8.8 e 8.8.4.4.\nSe hai una VODAFONE station, entra nel pannello del modem e disattiva la voce 'DNS sicuro' prima di procedere. E Ricordati 'Applica' in fondo alla pagina per salvare le impostazioni...\n" +msgid "Dear User, you have disabled the DNS override and your current DNS do not allow you to reach all sites, ergo, not all Channels will work, we recommend you, to take advantage of more, to set up the DNS or re-enable the override in settings.\nSearch your favorite search engine for\n 1. guides on how to choose a free DNS.\n 2. set it up on your device.\nWe recommend the cloudflare DNS 1.1.1.1 and 1.0.0.1\nAlternatively the Google DNS 8.8.8.8 and 8.8.4.4.\nIf you have a VODAFONE station, enter the modem panel and disable 'Secure DNS' before proceeding.\n" +msgstr "Gentile Utente, hai disattivato l'override DNS ed i tuoi DNS attuali non ti permettono di raggiungere tutti i siti, ergo, non tutti i Canali funzioneranno, ti consigliamo, per usufruirne di un maggior numero, di impostare i DNS oppure di riattivare l'override dalle impostazioni.\nCerca sul tuo motore di ricerca preferito guide su come\n1. scegliere un DNS libero.\n2. impostarlo sul tuo dispositivo.\nNoi ti consigliamo i DNS di cloudflare 1.1.1.1 e 1.0.0.1\nIn alternativa i DNS di Google 8.8.8.8 e 8.8.4.4.\nSe hai una VODAFONE station, entra nel pannello del modem e disattiva la voce 'DNS sicuro' prima di procedere. E Ricordati 'Applica' in fondo alla pagina per salvare le impostazioni...\n" msgctxt "#70723" msgid "links in the folder" @@ -5924,8 +5940,8 @@ msgid "Logging" msgstr "Logging" msgctxt "#70789" -msgid "* Change by opening the settings from KoD main menu" -msgstr "* Cambia aprendo le impostazioni dal menu principale di KoD" +msgid "" +msgstr "" msgctxt "#70790" msgid "RAR download in progress" @@ -5987,6 +6003,14 @@ msgctxt "#70804" msgid "Next extraction..." msgstr "Estrazione successiva..." +msgctxt "#70805" +msgid "Stay in seed" +msgstr "Resta in seed" + +msgctxt "#70806" +msgid "Changing this parameter permanently overwrites the Elementum settings.\nDo you want to continue?" +msgstr "Modificando questo parametro vengono sovrascritte permanentemente le impostazioni di Elementum.\nVuoi continuare?" + # DNS start [ settings and declaration ] msgctxt "#707401" msgid "Enable DNS check alert" @@ -6213,16 +6237,16 @@ msgid "Folder name for TV shows" msgstr "Nome della cartella per le serie TV" msgctxt "#80022" -msgid "You can configure the Kodi video library later from the settings menu inside KoD" -msgstr "Potrai configurare la libreria di Kodi in seguito dal menu impostazioni all'interno di KoD" +msgid "You can configure the Kodi video library later from KoD settings" +msgstr "Potrai configurare la libreria di Kodi in seguito dalle impostazioni di KoD" msgctxt "#80023" msgid "You will be asked to choose and configure the information providers for movies and TV shows" msgstr "Ti verrà chiesto di scegliere e configurare i provider delle informazioni per film e serie TV" msgctxt "#80024" -msgid "An error has occurred during the configuration of the Kodi video library. Please check the log and try again from the settings menu inside KoD" -msgstr "Si è verificato un errore durante la configurazione della libreria di Kodi. Si prega di controllare il log e riprovare dal menu impostazioni all'interno di KoD" +msgid "An error has occurred during the configuration of the Kodi video library. Please check the log and try again from KoD settings" +msgstr "Si è verificato un errore durante la configurazione della libreria di Kodi. Si prega di controllare il log e riprovare dalle impostazioni di KoD" msgctxt "#80025" msgid "Cleaning database..." @@ -6241,8 +6265,8 @@ msgid "The selected folders are already used by the Kodi library. Please change msgstr "Le cartelle selezionate sono già utilizzate dalla libreria di Kodi. Si prega di cambiarle opportunamente" msgctxt "#80029" -msgid "The selected folders are already used by the Kodi library. Please change them properly from the settings menu inside KoD" -msgstr "Le cartelle selezionate sono già utilizzate dalla libreria di Kodi. Si prega di cambiarle opportunamente dal menu impostazioni all'interno di KoD" +msgid "The selected folders are already used by the Kodi library. Please change them properly from KoD settings" +msgstr "Le cartelle selezionate sono già utilizzate dalla libreria di Kodi. Si prega di cambiarle opportunamente dalle impostazioni di KoD" msgctxt "#80030" msgid "The default path and folders will be used. You will be asked to choose and configure the information providers for movies and TV shows" @@ -6253,8 +6277,8 @@ msgid "Unable to connect to github. This is probably due to a lack of connection msgstr "Impossibile connettersi a github. Questo è probabilmente dovuto ad una mancanza di connessione oppure github è attualmente offline. Per favore verifica e riprova" msgctxt "#80032" -msgid "Installation in progress..." -msgstr "Installazione in corso..." +msgid "Installing..." +msgstr "Installazione..." msgctxt "#80033" msgid "Choose the version of KoD to install" @@ -6290,4 +6314,40 @@ msgstr "Add-on aggiornato al commit %s" msgctxt "#80041" msgid "Latest updates:\n" -msgstr "Ultimi aggiornamenti:\n" \ No newline at end of file +msgstr "Ultimi aggiornamenti:\n" + +msgctxt "#80042" +msgid "Include local episodes when adding a TV shows" +msgstr "Includere episodi in locale all'aggiunta di una serie TV" + +msgctxt "#80043" +msgid "Attention, in order to watch local episodes you have to configure the Kodi video library from KoD settings" +msgstr "Attenzione, per guardare gli episodi in locale devi configurare la libreria di Kodi dalle impostazioni di KoD" + +msgctxt "#80044" +msgid "Do you want to include local episodes for the TV show "%s"?" +msgstr "Vuoi includere degli episodi in locale per la serie TV "%s"?" + +msgctxt "#80045" +msgid "The folder of the local episodes has to be different from the TV show one in the KoD video library" +msgstr "La cartella degli episodi in locale deve essere diversa da quella della serie TV nella videoteca di KoD" + +msgctxt "#80046" +msgid "Select the folder containing the local episodes" +msgstr "Seleziona la cartella contenente gli episodi in locale" + +msgctxt "#80047" +msgid "The TV show "% s" includes local episodes. They will be only removed from the Kodi video library without being deleted" +msgstr "La serie TV "%s" include degli episodi in locale. Verranno solo rimossi dalla libreria di Kodi senza essere cancellati" + +msgctxt "#80048" +msgid "Add local episodes" +msgstr "Aggiungi episodi in locale" + +msgctxt "#80049" +msgid "Remove local episodes" +msgstr "Rimuovi episodi in locale" + +msgctxt "#80050" +msgid "Downloading..." +msgstr "Download in corso..." \ No newline at end of file diff --git a/resources/settings.xml b/resources/settings.xml index 3ef50d2c..68732e9c 100644 --- a/resources/settings.xml +++ b/resources/settings.xml @@ -6,10 +6,11 @@ <setting id="autostart" type="action" label="70706" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAiYXV0b3N0YXJ0IiwNCiAgICAiY2hhbm5lbCI6ICJzZXR0aW5nIg0KfQ==)" default="Off"/> <setting label="70579" type="lsep"/> <setting id="addon_update_enabled" type="bool" label="70581" default="true"/> - <setting id="addon_update_message" type="bool" label="70582" default="true"/> - <setting id="addon_update_timer" type="slider" option="int" range="1,1,24" label="707416" default="1"/> + <setting id="addon_update_message" type="bool" label="70582" visible="eq(-1,true)" default="true"/> + <setting id="addon_update_timer" type="slider" option="int" range="1,1,24" label="707416" visible="eq(-2,true)" default="1"/> <setting label="70787" type="lsep"/> <setting id="resolver_dns" type="bool" label="707408" default="true" enable="true" visible="true"/> + <setting id="checkdns" type="bool" default="true" visible="false"/> <setting label="70788" type="lsep"/> <setting id="debug" type="bool" label="30003" default="false"/> </category> @@ -32,7 +33,6 @@ <setting id="folder_tvshows" type="text" label="70118" default="Serie TV"/> <setting id="folder_movies" type="text" label="70119" default="Film"/> <setting id="videolibrary_kodi" type="bool" label="70120" default="false"/> - <setting id="settings_kod" type="action" label="70789" action=""/> <setting label="59997" type="lsep"/> <setting id="videolibrary_max_quality" type="bool" label="70729" default="false" visible="true"/> <setting id="next_ep" type="select" label="70746" lvalues="70752|70747|70748" default="0"/> @@ -41,11 +41,11 @@ <setting id="trakt_sync" type="bool" label="70109" default="false"/> <setting label="30030" type="lsep"/> <setting id="vidolibrary_preferences" type="action" label="60542" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAiY2hhbm5lbF9jb25maWciLA0KICAgICJjaGFubmVsIjogInZpZGVvbGlicmFyeSINCn0==)"/> + <setting id="vidolibrary_update" type="action" label="60568" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAidXBkYXRlX3ZpZGVvbGlicmFyeSIsDQogICAgImNoYW5uZWwiOiAidmlkZW9saWJyYXJ5Ig0KfQ==)"/> <setting id="vidolibrary_export" type="action" label="80000" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAiZXhwb3J0X3ZpZGVvbGlicmFyeSIsDQogICAgImNoYW5uZWwiOiAiYmFja3VwIg0KfQ==)"/> <setting id="vidolibrary_import" type="action" label="80001" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAiaW1wb3J0X3ZpZGVvbGlicmFyeSIsDQogICAgImNoYW5uZWwiOiAiYmFja3VwIg0KfQ==)"/> <setting id="vidolibrary_delete" type="action" label="80036" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAiZGVsZXRlX3ZpZGVvbGlicmFyeSIsDQogICAgImNoYW5uZWwiOiAidmlkZW9saWJyYXJ5Ig0KfQ==)"/> <setting id="vidolibrary_restore" type="action" label="60567" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAicmVzdG9yZV90b29scyIsDQogICAgImNoYW5uZWwiOiAic2V0dGluZyINCn0=)"/> - <setting id="vidolibrary_update" type="action" label="60568" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAidXBkYXRlX3ZpZGVvbGlicmFyeSIsDQogICAgImNoYW5uZWwiOiAidmlkZW9saWJyYXJ5Ig0KfQ==)"/> </category> <!-- Channels --> @@ -68,8 +68,9 @@ <setting id="servers_config" type="action" label="60538" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAic2VydmVyc19tZW51IiwNCiAgICAiY2hhbm5lbCI6ICJzaG9ydGN1dHMiDQp9==)"/> <setting id="debriders_config" type="action" label="60552" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAic2VydmVyc19tZW51IiwNCiAgICAiY2hhbm5lbCI6ICJzaG9ydGN1dHMiLA0KCSJ0eXBlIjogImRlYnJpZGVycyINCn0==)"/> <setting label="70578" type="lsep"/> - <setting id="torrent_config" type="action" label="70253" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJzZXR0aW5nX3RvcnJlbnQiLA0KICAgICJjaGFubmVsIjoic2V0dGluZyINCn0=)"/> - <setting id="quasar_install" type="action" label="70785" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJkb3dubG9hZCIsDQogICAgImNoYW5uZWwiOiJxdWFzYXJfZG93bmxvYWQiDQp9)"/> + <!-- <setting id="torrent_config" type="action" label="70253" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJzZXR0aW5nX3RvcnJlbnQiLA0KICAgICJjaGFubmVsIjoic2V0dGluZyINCn0=)"/> --> + <setting id="elementum_on_seed" type="bool" label="70805" default="false"/> + <setting id="elementum_install" type="action" label="70785" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJkb3dubG9hZCIsDQogICAgImNoYW5uZWwiOiJlbGVtZW50dW1fZG93bmxvYWQiDQp9)"/> </category> <!-- Search --> @@ -94,15 +95,14 @@ <category label="30153"> <setting id="downloadenabled" type="bool" label="70689" default="false"/> <setting id="downloadpath" type="folder" label="30017" visible="eq(-1,true)" default="special://profile/addon_data/plugin.video.kod/downloads/" option="writeable"/> - <setting id="settings_kod1" type="action" label="70789" action="" visible="eq(-2,true)"/> - <setting id="downloadlistpath" type="folder" label="30018" visible="eq(-3,true)" default="special://profile/addon_data/plugin.video.kod/downloads/list/" option="writeable"/> <!-- <setting id="library_add" type="bool" label="70230" default="false"/> <setting id="library_move" type="bool" label="70231" default="false" visible="eq(-1,true)" subsetting="true"/> --> - <setting id="library_move" type="bool" label="70231" visible="eq(-4,true)" default="true"/> - <setting id="browser" type="bool" label="70232" visible="eq(-5,true)" default="true"/> - <setting id="server_speed" type="bool" label="70242" visible="eq(-6,true)" default="true"/> - <setting id="quality" type="select" label="70240" lvalues="70241|70763|70764|70765" visible="eq(-7,true)" default="0"/> - <setting id="download_adv" type="action" label="30030" visible="eq(-8,true)" action="RunPlugin(plugin://plugin.video.kod/?ew0KCSJhY3Rpb24iOiJjaGFubmVsX2NvbmZpZyIsDQoJImNvbmZpZyI6ImRvd25sb2FkcyIsDQogICAgImNoYW5uZWwiOiJzZXR0aW5nIg0KfQ==)"/> + <setting id="downloadlistpath" type="folder" label="30018" visible="eq(-2,true)" default="special://profile/addon_data/plugin.video.kod/downloads/list/" option="writeable"/> + <setting id="library_move" type="bool" label="70231" visible="eq(-3,true)" default="true"/> + <setting id="browser" type="bool" label="70232" visible="eq(-4,true)" default="true"/> + <setting id="server_speed" type="bool" label="70242" visible="eq(-5,true)" default="true"/> + <setting id="quality" type="select" label="70240" lvalues="70241|70763|70764|70765" visible="eq(-6,true)" default="0"/> + <setting id="download_adv" type="action" label="30030" visible="eq(-7,true)" action="RunPlugin(plugin://plugin.video.kod/?ew0KCSJhY3Rpb24iOiJjaGFubmVsX2NvbmZpZyIsDQoJImNvbmZpZyI6ImRvd25sb2FkcyIsDQogICAgImNoYW5uZWwiOiJzZXR0aW5nIg0KfQ==)"/> </category> <!-- News --> @@ -126,26 +126,27 @@ <setting id="enable_link_menu" label="70527" type="bool" default="true"/> <setting id="enable_fav_menu" label="30102" type="bool" default="true"/> <setting id="enable_library_menu" label="30131" type="bool" default="true"/> - <!-- View Mode--> - <setting label="70754" type="lsep"/> + <setting label="30000" type="lsep"/> + <setting id="touch_view" label='30002' type="bool" default="false"/> + <!-- View Mode (hidden)--> <setting id="skin_name" label='Skin Name' type="text" default="skin.estuary" visible="false"/> - <setting id="view_mode_addon" type="action" label="70009" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJ2aWV3X21vZGUiLA0KICAgICJjaGFubmVsIjoic2hvcnRjdXRzIiwNCgkidHlwZSI6ImFkZG9uIg0KfQ==)" default= "Default, 0"/> - <setting id="view_mode_channel" type="action" label="30118" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJ2aWV3X21vZGUiLA0KICAgICJjaGFubmVsIjoic2hvcnRjdXRzIiwNCgkidHlwZSI6ImNoYW5uZWwiDQp9)" default= "Default, 0"/> - <setting id="view_mode_movie" type="action" label="30122" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJ2aWV3X21vZGUiLA0KICAgICJjaGFubmVsIjoic2hvcnRjdXRzIiwNCgkidHlwZSI6Im1vdmllIg0KfQ==)" default= "Default, 0"/> - <setting id="view_mode_tvshow" type="action" label="30123" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJ2aWV3X21vZGUiLA0KICAgICJjaGFubmVsIjoic2hvcnRjdXRzIiwNCgkidHlwZSI6InR2c2hvdyINCn0=)" default= "Default, 0"/> - <setting id="view_mode_season" type="action" label="30140" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJ2aWV3X21vZGUiLA0KICAgICJjaGFubmVsIjoic2hvcnRjdXRzIiwNCgkidHlwZSI6InNlYXNvbiINCn0=)" default= "Default, 0"/> - <setting id="view_mode_episode" type="action" label="70362" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJ2aWV3X21vZGUiLA0KICAgICJjaGFubmVsIjoic2hvcnRjdXRzIiwNCgkidHlwZSI6ImVwaXNvZGUiDQp9)" default= "Default, 0"/> - <setting id="view_mode_server" type="action" label="70145" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiJ2aWV3X21vZGUiLA0KICAgICJjaGFubmVsIjoic2hvcnRjdXRzIiwNCgkidHlwZSI6InNlcnZlciINCn0=)" default= "Default, 0"/> + <setting id="view_mode_addon" type="action" label="70009" default= "Default, 0" visible="false"/> + <setting id="view_mode_channel" type="action" label="30118" default= "Default, 0" visible="false"/> + <setting id="view_mode_movie" type="action" label="30122" default= "Default, 0" visible="false"/> + <setting id="view_mode_tvshow" type="action" label="30123" default= "Default, 0" visible="false"/> + <setting id="view_mode_season" type="action" label="30140" default= "Default, 0" visible="false"/> + <setting id="view_mode_episode" type="action" label="70362" default= "Default, 0" visible="false"/> + <setting id="view_mode_server" type="action" label="70145" default= "Default, 0" visible="false"/> <!-- Contextual --> <setting label="30024" type="lsep"/> <setting id="quick_menu" type="bool" label="60360" default="true"/> <setting id="side_menu" type="bool" label="70737" default="false"/> - <setting id="kod_menu" type="bool" label="30025" default="true"/> + <setting id="kod_menu" type="bool" label="60026" default="true"/> <setting id="infoplus" type="bool" label="70151" default="false"/> <setting id="infoplus_set" type="bool" label="70128" visible="eq(-1,true)" default="false" subsetting="true"/> <setting id="extended_info" type="bool" label="70152" default="false"/> <!-- Shortcut --> - <setting label="70153" type="lsep"/> + <setting label="30998" type="lsep"/> <setting id="shortcut_key" type="action" label="30999" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIg0KfQ==)"/> <setting id="delete_key" type="action" label="31000" action="RunPlugin(plugin://plugin.video.kod/?ewogICAgImFjdGlvbiI6ICJkZWxldGVfa2V5Igp9==)"/> <!-- Custom Start --> @@ -158,18 +159,9 @@ <!-- Others --> <setting label="70149" type="lsep"/> <setting id="icon_set" type="select" label="70108" values="default|light|dark|alfa|mike" default="default"/> + <setting id="only_channel_icons" type="bool" label="30009" default="false"/> <setting id="enable_custom_theme" type="bool" label="70564" default="false"/> <setting id="custom_theme" type="folder" label="70565" default="" visible="eq(-1,true)"/> <setting id="video_thumbnail_type" type="select" label="70131" lvalues="70132|70133" default="1"/> </category> - - <!-- Adult --> - <category label="60305"> - <setting id="adult_aux_intro_password" type="text" label="70113" option="hidden" default=""/> - <setting id="adult_mode" type="select" lvalues="60602|60616|70114" label="30002" enable="!eq(-1,)" default="0"/> - <setting id="adult_request_password" type="bool" label="70115" enable="!eq(-1,0)+!eq(-2,)" default="true"/> - <setting id="adult_aux_new_password1" type="text" label="70116" option="hidden" enable="!eq(-3,)" default=""/> - <setting id="adult_aux_new_password2" type="text" label="70117" option="hidden" enable="!eq(-1,)" default=""/> - </category> - </settings> diff --git a/resources/skins/Default/720p/ChannelSettings.xml b/resources/skins/Default/720p/ChannelSettings.xml index 82c3fb67..e1e5c565 100644 --- a/resources/skins/Default/720p/ChannelSettings.xml +++ b/resources/skins/Default/720p/ChannelSettings.xml @@ -2,113 +2,128 @@ <window> <allowoverlays>false</allowoverlays> <animation type="WindowOpen" reversible="false"> - <effect type="zoom" start="80" end="100" center="640,225" delay="160" tween="back" time="240" /> + <effect type="slide" start="0,200" end="0,0" center="640,225" delay="160" tween="cubic" time="200" /> <effect type="fade" delay="160" end="100" time="240" /> </animation> <animation type="WindowClose" reversible="false"> - <effect type="zoom" start="100" end="80" center="640,225" easing="in" tween="back" time="240" /> + <effect type="slide" start="0,0" end="0,200" center="640,225" easing="in" tween="cubic" time="200" /> <effect type="fade" start="100" end="0" time="240" /> </animation> <controls> + <!-- Background --> <control type="group" id="10001"> - <posx>240</posx> - <posy>110</posy> - <width>800</width> - <height>500</height> + <left>0</left> + <top>0</top> + <width>100%</width> + <height>100%</height> <control type="image"> - <width>800</width> - <height>500</height> - <texture>Shortcut/dialog-bg-solid.png</texture> + <width>100%</width> + <height>100%</height> + <texture colordiffuse="FF232323">Shortcut/white.png</texture> </control> + <!-- Divider --> + <control type="image"> + <top>80</top> + <right>0</right> + <width>100%</width> + <height>1</height> + <texture colordiffuse="33FFFFFF">Shortcut/white.png</texture> + </control> + <!-- Header --> <control type="label" id="10002"> - <posy>15</posy> - <posx>50</posx> - <height>34</height> - <width>725</width> - <font>font12_title</font> + <top>20</top> + <left>70</left> + <height>40</height> + <width>80%</width> + <font>font30_title</font> <textcolor>FFFFFFFF</textcolor> - <align>center</align> + <align>left</align> <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70000]</label> </control> + <!-- CLOSE BUTTON --> <control type="button" id="10003"> - <posx>745</posx> - <posy>25</posy> - <width>20</width> - <height>20</height> - <texturefocus>Controls/DialogCloseButton-focus.png</texturefocus> - <texturenofocus>Controls/DialogCloseButton.png</texturenofocus> + <right>30</right> + <top>25</top> + <width>30</width> + <height>30</height> + <texturefocus colordiffuse="FFFFFFFF">Controls/DialogCloseButton-focus.png</texturefocus> + <texturenofocus colordiffuse="55FFFFFF">Controls/DialogCloseButton-focus.png</texturenofocus> </control> + <!-- OK --> <control type="button" id="10004"> - <posx>40</posx> - <posy>410</posy> + <left>60</left> + <top>120</top> <width>200</width> - <height>50</height> + <height>60</height> <textwidth>110</textwidth> <textcolor>FFFFFFFF</textcolor> - <texturefocus>Controls/KeyboardKey.png</texturefocus> - <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> + <texturefocus colordiffuse="FF0081C2">Shortcut/white.png</texturefocus> + <texturenofocus colordiffuse="FF191919">Shortcut/white.png</texturenofocus> <align>center</align> - <aligny>center</aligny> + <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70001]</label> </control> + <!-- CANCEL --> <control type="button" id="10005"> - <posx>300</posx> - <posy>410</posy> + <left>60</left> + <top>190</top> <width>200</width> - <height>50</height> + <height>60</height> <textwidth>110</textwidth> <textcolor>FFFFFFFF</textcolor> - <texturefocus>Controls/KeyboardKey.png</texturefocus> - <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> + <texturefocus colordiffuse="FF0081C2">Shortcut/white.png</texturefocus> + <texturenofocus colordiffuse="FF191919">Shortcut/white.png</texturenofocus> <align>center</align> - <aligny>center</aligny> + <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70002]</label> </control> + <!-- DEFAULT --> <control type="button" id="10006"> - <posx>560</posx> - <posy>410</posy> + <left>60</left> + <top>260</top> <width>200</width> - <height>50</height> + <height>60</height> <textwidth>110</textwidth> <textcolor>FFFFFFFF</textcolor> - <texturefocus>Controls/KeyboardKey.png</texturefocus> - <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> + <texturefocus colordiffuse="FF0081C2">Shortcut/white.png</texturefocus> + <texturenofocus colordiffuse="FF191919">Shortcut/white.png</texturenofocus> <align>center</align> <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70003]</label> </control> - <control type="group" id="10007"> - <posy>80</posy> - <posx>20</posx> - <width>745</width> - <height>387</height> + <!-- Parameters --> + <control type="group" id="10007"> + <top>120</top> + <left>300</left> + <width>900</width> + <height>680</height> <control type="label" id="100010"> - <posy>30</posy> - <posx>0</posx> - <width>745</width> - <height>300</height> - <font>font16</font> + <top>0</top> + <left>0</left> + <width>900</width> + <height>680</height> + <font>font20</font> <textcolor>FFFFFFFF</textcolor> <align>center</align> <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70004]</label> </control> </control> + <!-- Scrollbar --> <control type="image" id="10008"> - <posy>80</posy> - <posx>780</posx> + <top>120</top> + <right>0</right> <width>10</width> - <height>300</height> - <textcolor>FFFFFFFF</textcolor> - <texture>Controls/ScrollBack.png</texture> + <height>540</height> + <texture border="2" colordiffuse="FF232323">Shortcut/white.png</texture> </control> <control type="image" id="10009"> - <posy>80</posy> - <posx>780</posx> + <top>120</top> + <right>0</right> <width>10</width> - <height>300</height> - <texture>Controls/ScrollBar.png</texture> + <height>540</height> + <texture border="2" colordiffuse="33FFFFFF">Shortcut/white.png</texture> </control> </control> </controls> diff --git a/resources/skins/Default/720p/DarkShortCutMenu.xml b/resources/skins/Default/720p/DarkShortCutMenu.xml index fb98c257..9ba71b24 100644 --- a/resources/skins/Default/720p/DarkShortCutMenu.xml +++ b/resources/skins/Default/720p/DarkShortCutMenu.xml @@ -7,12 +7,10 @@ </coordinates> <defaultcontrol always="true">32500</defaultcontrol> <animation type="WindowOpen" reversible="false"> - <effect type="zoom" start="80" end="100" center="440,110" delay="160" tween="back" time="240" /> - <effect type="fade" delay="160" end="100" time="240" /> + <effect type="fade" delay="160" end="100" time="220" /> </animation> <animation type="WindowClose" reversible="false"> - <effect type="zoom" start="100" end="80" center="440,110" easing="in" tween="back" time="240" /> - <effect type="fade" start="100" end="0" time="240" /> + <effect type="fade" delay="160" start="100" end="0" time="200" /> </animation> <controls> <control type="button"> @@ -24,14 +22,22 @@ <texturenofocus colordiffuse="88FFFFFF">Shortcut/white.png</texturenofocus> <onclick>Action(close)</onclick> </control> - <control type="image"> - <left>0</left> - <top>0</top> - <width>880</width> - <height>220</height> - <texture border="2" colordiffuse="FFEEEEEE">Shortcut/white.png</texture> - </control> <control type="group"> + <animation type="WindowOpen" reversible="false"> + <effect type="slide" start="0,100" end="0,0" center="640,225" delay="160" tween="cubic" time="200" /> + <effect type="fade" delay="160" end="100" time="220" /> + </animation> + <animation type="WindowClose" reversible="false"> + <effect type="slide" start="0,0" end="0,-100" center="640,225" delay="160" tween="cubic" time="200" /> + <effect type="fade" delay="160" start="100" end="0" time="200" /> + </animation> + <control type="image"> + <left>0</left> + <top>0</top> + <width>880</width> + <height>220</height> + <texture border="2" colordiffuse="FFEEEEEE">Shortcut/white.png</texture> + </control> <control type="group"> <control type="image"> <left>-21</left> @@ -53,7 +59,6 @@ <animation effect="slide" start="0,0" end="10,0" time="0" condition="true">Conditional</animation> <animation effect="slide" end="120,0" time="0" condition="!Control.IsVisible(5)">Conditional</animation> <itemlayout height="220" width="220"> - <!-- <top>2</top> --> <control type="image"> <top>0</top> <left>0</left> @@ -69,11 +74,9 @@ <texture colordiffuse="60FFFFFF">Shortcut/black.png</texture> </control> <control type="image"> - <left>35</left> - <top>35</top> - <width>150</width> - <height>150</height> - <texture>$INFO[ListItem.Property(thumb)]</texture> + <width>220</width> + <height>220</height> + <texture>$INFO[ListItem.Property(thumbnail)]</texture> <aspectratio>keep</aspectratio> <align>center</align> </control> @@ -97,8 +100,9 @@ <animation effect="fade" start="100" end="0" time="0">Focus</animation> </control> <control type="image"> + <top>1</top> <width>220</width> - <height>220</height> + <height>218</height> <texture colordiffuse="FF0082C2">Shortcut/button-fo.png</texture> <animation effect="fade" start="100" end="0" time="0">Unfocus</animation> </control> @@ -118,24 +122,32 @@ <texture colordiffuse="60FFFFFF">Shortcut/black.png</texture> </control> <control type="image"> - <left>35</left> - <top>35</top> - <width>150</width> - <height>150</height> - <texture>$INFO[ListItem.Property(thumb)]</texture> + <width>220</width> + <height>220</height> + <texture>$INFO[ListItem.Property(thumbnail)]</texture> <aspectratio>keep</aspectratio> <align>center</align> </control> <control type="textbox"> <left>0</left> - <top>146</top> + <top>160</top> <width>220</width> <height>74</height> <font>font12</font> + <textcolor>FF232323</textcolor> <label>$INFO[ListItem.Label]</label> <align>center</align> <aligny>center</aligny> </control> + <control type="image"> + <left>100</left> + <top>5</top> + <width>20</width> + <height>20</height> + <texture colordiffuse="FF232323">$INFO[ListItem.Property(sub)]</texture> + <aspectratio>keep</aspectratio> + <align>center</align> + </control> </control> </focusedlayout> </control> @@ -151,10 +163,8 @@ <textureslidernibfocus>-</textureslidernibfocus> <showonepage>false</showonepage> <orientation>horizontal</orientation> - <onleft>32500</onleft> - <onright>32500</onright> <ondown>32500</ondown> - <onup>32500</onup> + <!-- <onup>32500</onup> --> <animation effect="slide" end="120,0" time="0" condition="!Control.IsVisible(5)">Conditional</animation> </control> </control> diff --git a/resources/skins/Default/720p/InfoWindow.xml b/resources/skins/Default/720p/InfoWindow.xml index 6998dad6..5c10d776 100644 --- a/resources/skins/Default/720p/InfoWindow.xml +++ b/resources/skins/Default/720p/InfoWindow.xml @@ -1,98 +1,113 @@ <?xml version="1.0" encoding="utf-8"?> <window> - <allowoverlays>false</allowoverlays> + <allowoverlays>false</allowoverlays> + <animation type="WindowOpen" reversible="false"> + <effect type="slide" start="0,200" end="0,0" center="640,225" delay="160" tween="cubic" time="200" /> + <effect type="fade" delay="160" end="100" time="240" /> + </animation> + <animation type="WindowClose" reversible="false"> + <effect type="slide" start="0,0" end="0,200" center="640,225" easing="in" tween="cubic" time="200" /> + <effect type="fade" start="100" end="0" time="240" /> + </animation> <controls> <control type="group" id="10001"> - <posx>50</posx> - <posy>28</posy> - <width>1179</width> - <height>663</height> + <left>0</left> + <top>0</top> + <width>100%</width> + <height>100%</height> <control type="image"> - <width>1179</width> - <height>663</height> - <texture>Shortcut/dialog-bg-solid.png</texture> + <width>100%</width> + <height>100%</height> + <texture colordiffuse="FF232323">Shortcut/white.png</texture> </control> - <!-- <control type="image"> - <height>40</height> - <width>800</width> - <texture>Windows/dialogheader.png</texture> - </control> --> + <!-- FANART --> + <control type="image" id="10004"> + <left>0</left> + <top>0</top> + <width>100%</width> + <height>100%</height> + <texture></texture> + <colordiffuse>33FFFFFF</colordiffuse> + </control> + <!-- Header --> <control type="label" id="10002"> - <posy>15</posy> - <posx>0</posx> + <top>20</top> + <left>0</left> <height>34</height> - <width>1179</width> - <font>font12_title</font> - <textcolor>0xFFFFFFFF</textcolor> - <!-- <shadowcolor>black</shadowcolor> --> + <width>100%</width> + <font>font30_title</font> + <textcolor>FFFFFFFF</textcolor> <align>center</align> <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70000]</label> </control> + <!-- Close --> <control type="button" id="10003"> - <posx>1120</posx> - <posy>25</posy> - <width>20</width> - <height>20</height> - <texturefocus>Controls/DialogCloseButton-focus.png</texturefocus> - <texturenofocus>Controls/DialogCloseButton.png</texturenofocus> + <right>30</right> + <top>25</top> + <width>30</width> + <height>30</height> + <texturefocus colordiffuse="FFFFFFFF">Controls/DialogCloseButton-focus.png</texturefocus> + <texturenofocus colordiffuse="55FFFFFF">Controls/DialogCloseButton-focus.png</texturenofocus> </control> - <control type="image" id="10004"> <!-- FANART --> - <posx>0</posx> - <posy>0</posy> - <width>1179</width> - <height>663</height> + <!-- LOCANDINA --> + <control type="image" id="10005"> + <right>30</right> + <top>110</top> + <width>349</width> + <height>500</height> <texture></texture> - <colordiffuse>33FFFFFF</colordiffuse> + <animation type="WindowOpen" reversible="false"> + <effect type="slide" start="200,0" end="0,0" delay="300" tween="cubic" time="200" /> + <effect type="fade" delay="300" end="100" time="240" /> + </animation> + <animation type="WindowClose" reversible="false"> + <effect type="slide" start="0,0" end="200,0" easing="in" tween="cubic" time="200" /> + <effect type="fade" start="100" end="0" time="240" /> + </animation> </control> - <control type="image" id="10005"> <!-- LOCANDINA --> - <posx>809</posx> - <posy>110</posy> - <width>310</width> - <height>444</height> - <texture></texture> - </control> - <control type="label" id="10006"> <!-- Serie --> - <posy>100</posy> - <posx>60</posx> + <!-- Serie --> + <control type="label" id="10006"> + <top>100</top> + <left>60</left> <height>40</height> <width>100</width> <font>font10</font> - <textcolor>0xFFFFFFFF</textcolor> + <textcolor>FFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="10007"> - <posy>100</posy> - <posx>200</posx> + <top>100</top> + <left>200</left> <height>40</height> <width>365</width> <font>font10</font> <scrollsuffix> | </scrollsuffix> <scroll>true</scroll> - <textcolor>0xFFFFFFFF</textcolor> + <textcolor>FFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="10008"> <!-- Lingua Originale --> - <posy>120</posy> - <posx>60</posx> + <top>125</top> + <left>60</left> <height>40</height> <width>100</width> <font>font10</font> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="10009"> - <posy>120</posy> - <posx>200</posx> + <top>125</top> + <left>200</left> <height>40</height> <width>365</width> <font>font10</font> @@ -100,25 +115,25 @@ <scroll>true</scroll> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100010"> <!-- Punteggio --> - <posy>140</posy> - <posx>60</posx> + <top>150</top> + <left>60</left> <height>40</height> <width>100</width> <font>font10</font> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100011"> - <posy>140</posy> - <posx>200</posx> + <top>150</top> + <left>200</left> <height>40</height> <width>365</width> <font>font10</font> @@ -126,25 +141,25 @@ <scroll>true</scroll> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100012"> <!-- Generi --> - <posy>160</posy> - <posx>60</posx> + <top>175</top> + <left>60</left> <height>40</height> <width>100</width> <font>font10</font> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100013"> - <posy>160</posy> - <posx>200</posx> + <top>175</top> + <left>200</left> <height>40</height> <width>365</width> <font>font10</font> @@ -152,25 +167,25 @@ <scroll>true</scroll> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100014"> - <posy>180</posy> - <posx>60</posx> + <top>200</top> + <left>60</left> <height>40</height> <width>100</width> <font>font10</font> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100015"> - <posy>180</posy> - <posx>200</posx> + <top>200</top> + <left>200</left> <height>40</height> <width>365</width> <font>font10</font> @@ -178,25 +193,25 @@ <scroll>true</scroll> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100016"> - <posy>200</posy> - <posx>60</posx> + <top>225</top> + <left>60</left> <height>40</height> <width>100</width> <font>font10</font> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100017"> - <posy>200</posy> - <posx>200</posx> + <top>225</top> + <left>200</left> <height>40</height> <width>365</width> <font>font10</font> @@ -204,25 +219,25 @@ <scroll>true</scroll> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100018"> - <posy>220</posy> - <posx>60</posx> + <top>250</top> + <left>60</left> <height>40</height> <width>100</width> <font>font10</font> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100019"> - <posy>220</posy> - <posx>200</posx> + <top>250</top> + <left>200</left> <height>40</height> <width>365</width> <font>font10</font> @@ -230,25 +245,25 @@ <scroll>true</scroll> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100020"> - <posy>240</posy> - <posx>60</posx> + <top>275</top> + <left>60</left> <height>40</height> <width>100</width> <font>font10</font> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100021"> - <posy>240</posy> - <posx>200</posx> + <top>275</top> + <left>200</left> <height>40</height> <width>365</width> <font>font10</font> @@ -256,25 +271,25 @@ <scroll>true</scroll> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="label" id="100022"> <!-- Trama --> - <posy>260</posy> - <posx>60</posx> + <top>300</top> + <left>60</left> <height>40</height> <width>505</width> <font>font10</font> <textcolor>0xFFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> - <align>posx</align> - <aligny>posy</aligny> + <align>left</align> + <aligny>top</aligny> <label></label> </control> <control type="textbox" id="100023"> - <posy>260</posy> - <posx>200</posx> + <top>300</top> + <left>200</left> <height>294</height> <width>500</width> <font>font10</font> @@ -283,71 +298,71 @@ <wrapmultiline>true</wrapmultiline> <autoscroll delay="2000" time="1500" repeat="2000">true</autoscroll> <align>justify</align> - <aligny>posy</aligny> + <aligny>top</aligny> <label></label> </control> <control type="group" id="10024"> - <posy>600</posy> - <posx>40</posx> + <bottom>60</bottom> + <left>40</left> <width>760</width> <height>30</height> <control type="button" id="10025"> - <posy>0</posy> - <posx>0</posx> + <top>0</top> + <left>0</left> <width>200</width> <height>50</height> <textwidth>110</textwidth> - <texturefocus>Controls/KeyboardKey.png</texturefocus> - <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> + <texturefocus colordiffuse="FF0081C2">Shortcut/white.png</texturefocus> + <texturenofocus colordiffuse="000081C2">Shortcut/white.png</texturenofocus> <align>center</align> <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70005]</label> </control> <control type="button" id="10026"> - <posy>0</posy> - <posx>210</posx> + <top>0</top> + <left>210</left> <width>200</width> <height>50</height> <textwidth>110</textwidth> - <texturefocus>Controls/KeyboardKey.png</texturefocus> - <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> + <texturefocus colordiffuse="FF0081C2">Shortcut/white.png</texturefocus> + <texturenofocus colordiffuse="000081C2">Shortcut/white.png</texturenofocus> <align>center</align> <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70006]</label> </control> <control type="button" id="10027"> - <posy>0</posy> - <posx>420</posx> + <top>0</top> + <left>420</left> <width>200</width> <height>50</height> <textwidth>110</textwidth> - <texturefocus>Controls/KeyboardKey.png</texturefocus> - <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> + <texturefocus colordiffuse="FF0081C2">Shortcut/white.png</texturefocus> + <texturenofocus colordiffuse="000081C2">Shortcut/white.png</texturenofocus> <align>center</align> <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70002]</label> </control> <control type="button" id="10028"> - <posy>0</posy> - <posx>640</posx> + <top>0</top> + <left>640</left> <width>200</width> <height>50</height> <textwidth>110</textwidth> - <texturefocus>Controls/KeyboardKey.png</texturefocus> - <texturenofocus>Controls/KeyboardKeyNF.png</texturenofocus> + <texturefocus colordiffuse="FF0081C2">Shortcut/white.png</texturefocus> + <texturenofocus colordiffuse="000081C2">Shortcut/white.png</texturenofocus> <align>center</align> <aligny>center</aligny> <label>$ADDON[plugin.video.kod 70007]</label> </control> <control type="label" id="100029"> - <posy>10</posy> - <posx>1080</posx> + <top>10</top> + <left>1080</left> <height>30</height> <width>110</width> <font>font10</font> - <textcolor>0xFFFFFFFF</textcolor> + <textcolor>FFFFFFFF</textcolor> <shadowcolor>black</shadowcolor> <align>right</align> <aligny>center</aligny> diff --git a/resources/skins/Default/720p/ShortCutMenu.xml b/resources/skins/Default/720p/ShortCutMenu.xml index ced2cc5e..d36e2ab4 100644 --- a/resources/skins/Default/720p/ShortCutMenu.xml +++ b/resources/skins/Default/720p/ShortCutMenu.xml @@ -7,12 +7,10 @@ </coordinates> <defaultcontrol always="true">32500</defaultcontrol> <animation type="WindowOpen" reversible="false"> - <effect type="zoom" start="80" end="100" center="440,110" delay="160" tween="back" time="240" /> - <effect type="fade" delay="160" end="100" time="240" /> + <effect type="fade" delay="160" end="100" time="220" /> </animation> <animation type="WindowClose" reversible="false"> - <effect type="zoom" start="100" end="80" center="440,110" easing="in" tween="back" time="240" /> - <effect type="fade" start="100" end="0" time="240" /> + <effect type="fade" delay="160" start="100" end="0" time="200" /> </animation> <controls> <control type="button"> @@ -24,14 +22,22 @@ <texturenofocus colordiffuse="88232323">Shortcut/white.png</texturenofocus> <onclick>Action(close)</onclick> </control> - <control type="image"> - <left>0</left> - <top>0</top> - <width>880</width> - <height>220</height> - <texture border="2" colordiffuse="FF232323">Shortcut/white.png</texture> - </control> <control type="group"> + <animation type="WindowOpen" reversible="false"> + <effect type="slide" start="0,100" end="0,0" center="640,225" delay="160" tween="cubic" time="200" /> + <effect type="fade" delay="160" end="100" time="220" /> + </animation> + <animation type="WindowClose" reversible="false"> + <effect type="slide" start="0,0" end="0,-100" center="640,225" delay="160" tween="cubic" time="200" /> + <effect type="fade" delay="160" start="100" end="0" time="200" /> + </animation> + <control type="image"> + <left>0</left> + <top>0</top> + <width>880</width> + <height>220</height> + <texture border="2" colordiffuse="FF232323">Shortcut/white.png</texture> + </control> <control type="group"> <control type="image"> <left>-21</left> @@ -53,7 +59,6 @@ <animation effect="slide" start="0,0" end="10,0" time="0" condition="true">Conditional</animation> <animation effect="slide" end="120,0" time="0" condition="!Control.IsVisible(5)">Conditional</animation> <itemlayout height="220" width="220"> - <!-- <top>2</top> --> <control type="image"> <top>0</top> <left>0</left> @@ -71,7 +76,7 @@ <control type="image"> <width>220</width> <height>220</height> - <texture>$INFO[ListItem.Property(thumb)]</texture> + <texture>$INFO[ListItem.Property(thumbnail)]</texture> <aspectratio>keep</aspectratio> <align>center</align> </control> @@ -119,7 +124,7 @@ <control type="image"> <width>220</width> <height>220</height> - <texture>$INFO[ListItem.Property(thumb)]</texture> + <texture>$INFO[ListItem.Property(thumbnail)]</texture> <aspectratio>keep</aspectratio> <align>center</align> </control> @@ -134,6 +139,15 @@ <align>center</align> <aligny>center</aligny> </control> + <control type="image"> + <left>100</left> + <top>5</top> + <width>20</width> + <height>20</height> + <texture>$INFO[ListItem.Property(sub)]</texture> + <aspectratio>keep</aspectratio> + <align>center</align> + </control> </control> </focusedlayout> </control> @@ -149,10 +163,8 @@ <textureslidernibfocus>-</textureslidernibfocus> <showonepage>false</showonepage> <orientation>horizontal</orientation> - <!-- <onleft>32500</onleft> - <onright>32500</onright> --> <ondown>32500</ondown> - <onup>32500</onup> + <!-- <onup>32500</onup> --> <animation effect="slide" end="120,0" time="0" condition="!Control.IsVisible(5)">Conditional</animation> </control> </control> diff --git a/resources/views/skin.ace2.json b/resources/views/skin.ace2.json deleted file mode 100644 index 088ebcac..00000000 --- a/resources/views/skin.ace2.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "all":{ - "Right List": 50, - "Frame": 51, - "Wall": 56, - "Tech": 68, - "Sets Plus": 74 - }, - "movie":{ - "Right List": 50, - "Frame": 51, - "Wall": 56, - "Tech": 68, - "Sets Plus": 74 - }, - "tvshow":{ - "Right List": 50, - "Frame": 51, - "Wall": 56, - "Tech": 68, - "Sets Plus": 74 - }, - "season":{ - "Right List": 50, - "Frame": 51, - "Wall": 56, - "Tech": 68, - "Sets Plus": 74 - }, - "episode":{ - "Right List": 50, - "Frame": 51, - "Wall": 56, - "Tech": 68, - "Sets Plus": 74 - }, - "addon":{ - "Right List": 50, - "Frame": 51, - "Wall": 56, - "Tech": 68, - "Sets Plus": 74 - } -} \ No newline at end of file diff --git a/resources/views/skin.aeon.nox.silvo.json b/resources/views/skin.aeon.nox.silvo.json deleted file mode 100644 index 3e777a57..00000000 --- a/resources/views/skin.aeon.nox.silvo.json +++ /dev/null @@ -1,109 +0,0 @@ -{ - "all":{ - "List": 50, - "RightList": 506, - "FanartList": 602, - "LowList": 501, - "BigList": 507, - "Logo": 59, - "Showcase": 53, - "Shift": 57, - "Posters": 56, - "MyFlix": 509, - "TriPanel": 55, - "Icons": 500, - "Small Icons": 499, - "Wall": 503, - "Fullscreen Wall": 609, - "infoWall": 51, - "BannerWall": 58, - "BannerPlex": 601, - "Landscape": 52, - "BigFan": 591, - "Gallery": 504, - "Panel": 505, - "Episode": 502 - }, - "movie":{ - "List": 50, - "RightList": 506, - "FanartList": 602, - "LowList": 501, - "BigList": 507, - "Logo": 59, - "Showcase": 53, - "Shift": 57, - "Posters": 56, - "MyFlix": 509, - "TriPanel": 55, - "Icons": 500, - "Small Icons": 499, - "Wall": 503, - "Fullscreen Wall": 609, - "infoWall": 51, - "BannerWall": 58, - "BannerPlex": 601, - "Landscape": 52, - "BigFan": 591, - "Gallery": 504, - "Panel": 505 - }, - "tvshow":{ - "List": 50, - "RightList": 506, - "FanartList": 602, - "LowList": 501, - "BigList": 507, - "Logo": 59, - "Showcase": 53, - "Shift": 57, - "Posters": 56, - "MyFlix": 509, - "TriPanel": 55, - "Icons": 500, - "Small Icons": 499, - "Wall": 503, - "Fullscreen Wall": 609, - "infoWall": 51, - "BannerWall": 58, - "BannerPlex": 601, - "Landscape": 52, - "BigFan": 591, - "Gallery": 504, - "Panel": 505, - "Episode": 502 - }, - "season":{ - "List": 50, - "RightList": 506, - "FanartList": 602, - "LowList": 501, - "Showcase": 53, - "Shift": 57, - "Posters": 56, - "TriPanel": 55, - "Icons": 500, - "Small Icons": 499, - "Wall": 503, - "BigFan": 591 - }, - "episode":{ - "List": 50, - "RightList": 506, - "FanartList": 602, - "LowList": 501, - "BigList": 507, - "Episode": 502, - "MyFlix": 509, - "TriPanel": 55, - "Icons": 500, - "Landscape": 52, - "Panel": 505 - }, - "addon":{ - "List": 50, - "Shift": 57, - "Icons": 500, - "Wall": 503 - } -} \ No newline at end of file diff --git a/resources/views/skin.aeon.tajo.json b/resources/views/skin.aeon.tajo.json deleted file mode 100644 index ad06eaaf..00000000 --- a/resources/views/skin.aeon.tajo.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "all":{ - "List": 50, - "MultiSort": 589, - "MultiWall": 587, - "Shelf 3D": 595, - "List 3D": 596, - "Fanart 3D": 593, - "Triple 3D": 590, - "Wall 3D": 586, - "Banner 3D": 594, - "MultiWall 3D": 587 - }, - "movie":{ - "List": 50, - "MultiSort": 589, - "MultiWall": 587, - "Shelf 3D": 595, - "List 3D": 596, - "Fanart 3D": 593, - "Triple 3D": 590, - "Wall 3D": 586, - "Banner 3D": 594, - "MultiWall 3D": 587 - }, - "tvshow":{ - "List": 50, - "MultiWall": 587, - "Shelf 3D": 595, - "List 3D": 596, - "Fanart 3D": 593, - "Triple 3D": 590, - "Wall 3D": 586, - "Banner 3D": 594, - "MultiWall 3D": 587 - }, - "season":{ - "List": 50, - "Shelf 3D": 595, - "List 3D": 596, - "Wall 3D": 586 - }, - "episode":{ - "List": 50, - "List 3D": 596 - }, - "addon":{ - "List": 50 - } -} \ No newline at end of file diff --git a/resources/views/skin.aeonmq8.json b/resources/views/skin.aeonmq8.json deleted file mode 100644 index e0ce9573..00000000 --- a/resources/views/skin.aeonmq8.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "all":{ - "Right List": 51, - "Banner": 52, - "Banner Info": 53, - "Poster": 54, - "Low List": 55, - "Wall": 56, - "Glass List": 57, - "Coverflow": 58, - "Wide": 59, - "Showart": 60, - "Multiplex": 61, - "Panel": 62, - "Shelf": 63, - "Sets": 65, - "Showcase": 66, - "Wall 3D": 67, - "Posterlite": 80, - "Poster List": 70, - "Total Art": 71 - }, - "movie":{ - "Right List": 51, - "Banner": 52, - "Banner Info": 53, - "Poster": 54, - "Low List": 55, - "Wall": 56, - "Glass List": 57, - "Coverflow": 58, - "Wide": 59, - "Showart": 60, - "Multiplex": 61, - "Panel": 62, - "Shelf": 63, - "Sets": 65, - "Showcase": 66, - "Wall 3D": 67, - "Posterlite": 80, - "Poster List": 70, - "Total Art": 71 - }, - "tvshow":{ - "Right List": 51, - "Banner": 52, - "Banner Info": 53, - "Poster": 54, - "Low List": 55, - "Wall": 56, - "Glass List": 57, - "Coverflow": 58, - "Wide": 59, - "Showart": 60, - "Multiplex": 61, - "Panel": 62, - "Shelf": 63, - "Showcase": 66, - "Wall 3D": 67, - "Posterlite": 80 - }, - "season":{ - "Right List": 51, - "Banner": 52, - "Banner Info": 53, - "Poster": 54, - "Low List": 55, - "Wall": 56, - "Glass List": 57, - "Coverflow": 58, - "Wide": 59, - "Showart": 60, - "Multiplex": 61, - "Panel": 62, - "Shelf": 63, - "Showcase": 66, - "Wall 3D": 67, - "Posterlite": 80 - }, - "episode":{ - "Right List": 51, - "Poster": 54, - "Low List": 55, - "Wall": 56, - "Glass List": 57, - "Wide": 59, - "Showart": 60, - "Multiplex": 61, - "Panel": 62, - "Wall 3D": 67, - "Posterlite": 80 - }, - "addon":{ - "Right List": 51 - } -} \ No newline at end of file diff --git a/resources/views/skin.amber.json b/resources/views/skin.amber.json deleted file mode 100644 index dafb4913..00000000 --- a/resources/views/skin.amber.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "all":{ - "List": 50, - "Low List": 54, - "Big List": 52, - "Tall List": 501, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Fanart": 56 - }, - "movie":{ - "List": 50, - "Low List": 54, - "Big List": 52, - "Tall List": 501, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Fanart": 56 - }, - "tvshow":{ - "List": 50, - "Low List": 54, - "Big List": 52, - "Tall List": 501, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Fanart": 56 - }, - "season":{ - "List": 50, - "Low List": 54, - "Big List": 52, - "Tall List": 501, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Fanart": 56 - }, - "episode":{ - "List": 50, - "Low List": 54, - "Big List": 52, - "Tall List": 501, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Fanart": 56 - }, - "addon":{ - "List": 50, - "Low List": 54, - "Big List": 52, - "Tall List": 501, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Fanart": 56 - } -} \ No newline at end of file diff --git a/resources/views/skin.apptv.json b/resources/views/skin.apptv.json deleted file mode 100644 index a7e8373a..00000000 --- a/resources/views/skin.apptv.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "all":{ - "List": 50, - "Icons": 52, - "Banner": 53, - "Wrap": 54, - "Coverflow": 57, - "Wall": 58 - }, - "movie":{ - "List": 50, - "Icons": 52, - "Wrap": 54, - "Coverflow": 57, - "Wall": 58 - }, - "tvshow":{ - "List": 50, - "Icons": 52, - "Banner": 53, - "Wrap": 54, - "Coverflow": 57, - "Wall": 58 - }, - "season":{ - "List": 50, - "Icons": 52, - "Wrap": 54, - "Coverflow": 57, - "Wall": 58 - }, - "episode":{ - "List": 50, - "Icons": 52, - "Wrap": 54, - "Coverflow": 57 - }, - "addon":{ - "List": 50, - "Icons": 52, - "Wrap": 54, - "Coverflow": 57, - "Wall": 58 - } -} \ No newline at end of file diff --git a/resources/views/skin.arctic.zephyr.2.json b/resources/views/skin.arctic.zephyr.2.json deleted file mode 100644 index 4c3e2e13..00000000 --- a/resources/views/skin.arctic.zephyr.2.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "all":{ - "List": 50, - "MediaInfo": 501, - "MediaInfo 2": 502, - "Tri-Panel": 504, - "Banner List": 503, - "Poster Wall": 51, - "Landscape Wall": 511, - "Info Wall": 514, - "Info Wall 2": 516, - "Landscape Wall Small": 515, - "Poster Showcase": 52, - "Landscape Showcase": 521, - "Poster": 53, - "Lovefilm": 523, - "Seasons Info": 522, - "Seasons Info 2": 524, - "Square Wall": 510, - "Icon Wall": 512 - }, - "movie":{ - "List": 50, - "MediaInfo": 501, - "MediaInfo 2": 502, - "Tri-Panel": 504, - "Banner List": 503, - "Poster Wall": 51, - "Landscape Wall": 511, - "Info Wall": 514, - "Info Wall 2": 516, - "Landscape Wall Small": 515, - "Poster Showcase": 52, - "Landscape Showcase": 521, - "Poster": 53, - "Lovefilm": 523 - }, - "tvshow":{ - "List": 50, - "MediaInfo": 501, - "MediaInfo 2": 502, - "Tri-Panel": 504, - "Banner List": 503, - "Poster Wall": 51, - "Landscape Wall": 511, - "Info Wall": 514, - "Info Wall 2": 516, - "Landscape Wall Small": 515, - "Poster Showcase": 52, - "Landscape Showcase": 521, - "Poster": 53, - "Lovefilm": 523 - }, - "season":{ - "List": 50, - "Banner List": 503, - "Poster Wall": 51, - "Info Wall": 514, - "Info Wall 2": 516, - "Poster Showcase": 52, - "Seasons Info": 522, - "Seasons Info 2": 524, - "Poster": 53 - }, - "episode":{ - "List": 50, - "MediaInfo": 501, - "MediaInfo 2": 502, - "Tri-Panel": 504, - "Banner List": 503, - "Landscape Wall": 511, - "Info Wall 2": 516, - "Landscape Wall Small": 515, - "Landscape Showcase": 521 - }, - "addon":{ - "List": 50, - "Square Wall": 510, - "Icon Wall": 512 - } -} \ No newline at end of file diff --git a/resources/views/skin.aura.json b/resources/views/skin.aura.json deleted file mode 100644 index b106fe37..00000000 --- a/resources/views/skin.aura.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "all":{ - "List": 50, - "Poster Wall": 51, - "Landscape Wall": 52, - "Square Wall": 53, - "Banner Wall": 54, - "Showcase": 55, - "Landscape Showcase": 56, - "Square Showcase": 57, - "Big Posters": 58, - "Lovefilm": 59, - "Media Info": 500, - "info List": 501, - "Episode List": 502, - "Square Wall Large": 503 - }, - "movie":{ - "List": 50, - "Poster Wall": 51, - "Landscape Wall": 52, - "Showcase": 55, - "Landscape Showcase": 56, - "Square Showcase": 57, - "Big Posters": 58, - "Lovefilm": 59, - "Media Info": 500, - "info List": 501, - "Episode List": 502 - }, - "tvshow":{ - "List": 50, - "Poster Wall": 51, - "Landscape Wall": 52, - "Banner Wall": 54, - "Showcase": 55, - "Landscape Showcase": 56, - "Square Showcase": 57, - "Big Posters": 58, - "Lovefilm": 59, - "Media Info": 500, - "info List": 501, - "Episode List": 502 - }, - "season":{ - "List": 50, - "Poster Wall": 51, - "Landscape Wall": 52, - "Banner Wall": 54, - "Showcase": 55, - "Landscape Showcase": 56, - "Square Showcase": 57, - "Big Posters": 58, - "Lovefilm": 59, - "Media Info": 500, - "info List": 501, - "Episode List": 502 - }, - "episode":{ - "List": 50, - "Poster Wall": 51, - "Landscape Wall": 52, - "Banner Wall": 54, - "Showcase": 55, - "Landscape Showcase": 56, - "Square Showcase": 57, - "Big Posters": 58, - "Lovefilm": 59, - "Media Info": 500, - "info List": 501, - "Episode List": 502 - }, - "addon":{ - "List": 50, - "Square Wall": 53, - "Square Wall Large": 503 - } -} \ No newline at end of file diff --git a/resources/views/skin.bello.7.json b/resources/views/skin.bello.7.json deleted file mode 100644 index 992a8ffc..00000000 --- a/resources/views/skin.bello.7.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "all":{ - "List": 50, - "Bello": 66, - "Slide": 51, - "Gallery": 52, - "Tiles": 562, - "Thumbnails": 560, - "Posters": 561, - "Posters 2": 64, - "Banner List": 57, - "3D Wall": 53, - "Wall": 54, - "List Wall": 58, - "VideoWall": 580, - "Showtime": 65, - "Netflix": 59 - }, - "movie":{ - "List": 50, - "Bello": 66, - "Slide": 51, - "Gallery": 52, - "Tiles": 562, - "Thumbnails": 560, - "Posters": 561, - "Posters 2": 64, - "Banner List": 57, - "3D Wall": 53, - "Wall": 54, - "List Wall": 58, - "VideoWall": 580, - "Showtime": 65, - "Netflix": 59 - }, - "tvshow":{ - "List": 50, - "Bello": 66, - "Slide": 51, - "Gallery": 52, - "Tiles": 562, - "Thumbnails": 560, - "Posters 2": 64, - "Banner List": 57, - "3D Wall": 53, - "Wall": 54, - "List Wall": 58, - "VideoWall": 580, - "Showtime": 65, - "Netflix": 59 - }, - "season":{ - "List": 50, - "Bello": 66, - "Slide": 51, - "Gallery": 52, - "Tiles": 562, - "Thumbnails": 560, - "Banner List": 57, - "Netflix": 59 - }, - "episode":{ - "List": 50, - "Bello": 66, - "Gallery": 52, - "Tiles": 562, - "Thumbnails": 560, - "Banner List": 57, - "Netflix": 59 - }, - "addon":{ - "List": 50, - "Bello": 66, - "Thumbnails": 560 - } -} \ No newline at end of file diff --git a/resources/views/skin.blackglassnova.json b/resources/views/skin.blackglassnova.json deleted file mode 100644 index d59b9ed2..00000000 --- a/resources/views/skin.blackglassnova.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "all":{ - "List": 50, - "Large List": 52, - "Low List": 54, - "Banner List": 56, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Poster Showcase": 59, - "Card List": 5050 - }, - "movie":{ - "List": 50, - "Large List": 52, - "Low List": 54, - "Banner List": 56, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Poster Showcase": 59, - "Card List": 5050 - }, - "tvshow":{ - "List": 50, - "Large List": 52, - "Low List": 54, - "Banner List": 56, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Poster Showcase": 59, - "Card List": 5050 - }, - "season":{ - "List": 50, - "Large List": 52, - "Low List": 54, - "Banner List": 56, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Poster Showcase": 59, - "Card List": 5050 - }, - "episode":{ - "List": 50, - "Large List": 52, - "Low List": 54, - "Banner List": 56, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Poster Showcase": 59, - "Card List": 5050 - }, - "addon":{ - "List": 50, - "Large List": 52, - "Low List": 54, - "Panel": 51, - "Big Panel": 55, - "Icons": 53, - "Fanart": 58, - "Card List": 5050 - } -} diff --git a/resources/views/skin.box.json b/resources/views/skin.box.json deleted file mode 100644 index e602ab5c..00000000 --- a/resources/views/skin.box.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "all":{ - "List": 50, - "Thumbs": 52, - "Fanart": 527, - "Poster": 532, - "Banner": 50, - "Banner List": 536, - "Fanart Logo": 539, - "List Info": 5231, - "Thumb Info": 533 - }, - "movie":{ - "List": 50, - "Thumbs": 52, - "Fanart": 527, - "Poster": 532, - "Fanart Logo": 539, - "List Info": 5231, - "Thumb Info": 533 - }, - "tvshow":{ - "List": 50, - "Thumbs": 52, - "Fanart": 527, - "Poster": 532, - "Banner": 50, - "Banner List": 536, - "Fanart Logo": 539, - "List Info": 5231, - "Thumb Info": 533 - }, - "season":{ - "List": 50, - "Poster": 532 - }, - "episode":{ - "List": 50, - "Thumbs": 52, - "List Info": 5231, - "Thumb Info": 533 - }, - "addon":{ - "List": 50 - } -} \ No newline at end of file diff --git a/resources/views/skin.confluence.json b/resources/views/skin.confluence.json deleted file mode 100644 index f0348dd8..00000000 --- a/resources/views/skin.confluence.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "all":{ - "List": 50, - "Big List": 55, - "Thumbnail": 500, - "Poster Wrap": 501, - "Fanart": 508, - "Media Info": 503, - "Media Info 2": 504, - "Media Info 3": 504, - "Wide": 505 - }, - "movie":{ - "List": 50, - "Big List": 55, - "Thumbnail": 500, - "Poster Wrap": 501, - "Fanart": 508, - "Media Info": 503, - "Media Info 2": 504, - "Media Info 3": 504 - }, - "tvshow":{ - "List": 50, - "Big List": 55, - "Thumbnail": 500, - "Poster Wrap": 501, - "Fanart": 508, - "Media Info": 503, - "Media Info 2": 504, - "Media Info 3": 504, - "Wide": 505 - }, - "season":{ - "List": 50, - "Big List": 55, - "Thumbnail": 500, - "Poster Wrap": 501, - "Media Info 2": 504, - "Media Info 3": 504 - }, - "episode":{ - "List": 50, - "Big List": 55, - "Thumbnail": 500, - "Media Info": 503, - "Media Info 2": 504, - "Media Info 3": 504 - }, - "addon":{ - "List": 50, - "Big List": 55, - "Thumbnail": 500 - } -} \ No newline at end of file diff --git a/resources/views/skin.eminence.2.json b/resources/views/skin.eminence.2.json deleted file mode 100644 index f2a5267e..00000000 --- a/resources/views/skin.eminence.2.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "all":{ - "Plain List": 552, - "Poster List": 550, - "Big List": 510, - "Media Info": 51, - "Media Info 2": 57, - "Media Info 3": 577, - "Info List": 53, - "Info List 2": 553, - "Fanart List": 54, - "Gallery": 58, - "Big Icons": 52, - "Mediun Icons": 525, - "Showcase": 535, - "Icons": 500, - "Info Icons": 59, - "Wide": 55, - "Fanart": 56 - }, - "movie":{ - "Plain List": 552, - "Poster List": 550, - "Big List": 510, - "Media Info": 51, - "Media Info 2": 57, - "Media Info 3": 577, - "Info List": 53, - "Info List 2": 553, - "Fanart List": 54, - "Gallery": 58, - "Big Icons": 52, - "Mediun Icons": 525, - "Showcase": 535, - "Icons": 500, - "Info Icons": 59, - "Wide": 55, - "Fanart": 56 - }, - "tvshow":{ - "Plain List": 552, - "Poster List": 550, - "Big List": 510, - "Media Info": 51, - "Media Info 2": 57, - "Media Info 3": 577, - "Info List": 53, - "Info List 2": 553, - "Fanart List": 54, - "Gallery": 58, - "Big Icons": 52, - "Mediun Icons": 525, - "Showcase": 535, - "Icons": 500, - "Info Icons": 59, - "Wide": 55, - "Fanart": 56 - }, - "season":{ - "Plain List": 552, - "Poster List": 550, - "Media Info": 51, - "Media Info 2": 57, - "Info List": 53, - "Fanart List": 54, - "Gallery": 58, - "Big Icons": 52, - "Mediun Icons": 525, - "Showcase": 535, - "Icons": 500, - "Info Icons": 59 - }, - "episode":{ - "Plain List": 552, - "Poster List": 550, - "Media Info": 51, - "Media Info 2": 57, - "Info List": 53, - "Info List 2": 553, - "Fanart List": 54, - "Gallery": 58, - "Icons": 500, - "Fanart": 56 - }, - "addon":{ - "Plain List": 552, - "Poster List": 550, - "Big List": 510, - "Icons": 500, - "Info Icons": 59 - } -} \ No newline at end of file diff --git a/resources/views/skin.estuary.json b/resources/views/skin.estuary.json deleted file mode 100644 index f329efb6..00000000 --- a/resources/views/skin.estuary.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "all":{ - "List": 50, - "Poster": 51, - "Shift": 53, - "InfoWall": 54, - "WideList": 55, - "Wall": 500, - "Banner": 501, - "Fanart": 502 - }, - "movie":{ - "List": 50, - "Poster": 51, - "Shift": 53, - "InfoWall": 54, - "WideList": 55, - "Wall": 500, - "Fanart": 502 - }, - "tvshow":{ - "List": 50, - "Poster": 51, - "Shift": 53, - "InfoWall": 54, - "WideList": 55, - "Wall": 500, - "Banner": 501, - "Fanart": 502 - }, - "season":{ - "List": 50, - "Poster": 51, - "Shift": 53, - "InfoWall": 54, - "WideList": 55, - "Wall": 500, - "Fanart": 502 - }, - "episode":{ - "List": 50, - "Poster": 51, - "Shift": 53, - "InfoWall": 54, - "WideList": 55, - "Wall": 500, - "Banner": 501, - "Fanart": 502 - }, - "addon":{ - "WideList": 55, - "Wall": 500 - } -} \ No newline at end of file diff --git a/resources/views/skin.phenomenal.json b/resources/views/skin.phenomenal.json deleted file mode 100644 index a59df640..00000000 --- a/resources/views/skin.phenomenal.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "all":{ - "Fanart": 50, - "Poster": 51, - "Thumb": 52, - "Showcase": 53, - "Wide List": 54, - "Icons": 55, - "Pictures": 56, - "Banner List": 57 - }, - "movie":{ - "Fanart": 50, - "Poster": 51, - "Thumb": 52, - "Showcase": 53, - "Wide List": 54, - "Icons": 55, - "Pictures": 56, - "Banner List": 57 - }, - "tvshow":{ - "Fanart": 50, - "Poster": 51, - "Thumb": 52, - "Showcase": 53, - "Wide List": 54, - "Icons": 55, - "Pictures": 56, - "Banner List": 57 - }, - "season":{ - "Fanart": 50, - "Poster": 51, - "Thumb": 52, - "Showcase": 53, - "Wide List": 54, - "Icons": 55, - "Pictures": 56, - "Banner List": 57 - }, - "episode":{ - "Fanart": 50, - "Poster": 51, - "Thumb": 52, - "Showcase": 53, - "Wide List": 54, - "Icons": 55, - "Pictures": 56, - "Banner List": 57 - }, - "addon":{ - "Fanart": 50, - "Poster": 51, - "Thumb": 52, - "Showcase": 53, - "Wide List": 54, - "Icons": 55, - "Pictures": 56, - "Banner List": 57 - } -} \ No newline at end of file diff --git a/resources/views/skin.quartz.json b/resources/views/skin.quartz.json deleted file mode 100644 index 8b352603..00000000 --- a/resources/views/skin.quartz.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "all":{ - "List": 50, - "Big List": 51, - "Media Info": 52, - "Media Info 2": 54, - "Icons": 53, - "Big Icons": 501, - "Panel": 501, - "Wide": 55, - "Fanart 1": 57, - "Fanart 2": 59, - "Fanart 3": 500 - }, - "movie":{ - "List": 50, - "Big List": 51, - "Media Info": 52, - "Media Info 2": 54, - "Icons": 53, - "Big Icons": 501, - "Panel": 501, - "Fanart 1": 57, - "Fanart 2": 59, - "Fanart 3": 500 - }, - "tvshow":{ - "List": 50, - "Big List": 51, - "Media Info": 52, - "Media Info 2": 54, - "Icons": 53, - "Big Icons": 501, - "Panel": 501, - "Wide": 55, - "Fanart 1": 57, - "Fanart 2": 59, - "Fanart 3": 500 - }, - "season":{ - "List": 50, - "Big List": 51, - "Media Info": 52, - "Media Info 2": 54, - "Icons": 53, - "Big Icons": 501, - "Panel": 501, - "Fanart 1": 57, - "Fanart 2": 59, - "Fanart 3": 500 - }, - "episode":{ - "List": 50, - "Big List": 51, - "Media Info": 52, - "Media Info 2": 54, - "Icons": 53, - "Panel": 501, - "Fanart 3": 500 - }, - "addon":{ - "List": 50, - "Big List": 51, - "Icons": 53 - } -} \ No newline at end of file diff --git a/resources/views/skin.rapier.json b/resources/views/skin.rapier.json deleted file mode 100644 index edcd77b6..00000000 --- a/resources/views/skin.rapier.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "all":{ - - }, - "movie":{ - "Icon": 50, - "List": 52, - "List Info": 58, - "List Info 2": 66, - "List Info 3": 95, - "Thumbs Info": 97, - "Wrap Info": 53, - "3D Wrap": 588, - "Fanart": 583, - "Wall": 69 - }, - "tvshow":{ - "Icon": 50, - "List": 52, - "List Info": 61, - "List Info 2": 65, - "List Info 3": 589, - "Thumbs Info": 100, - "Wrap Info": 577, - "3D Wrap": 588, - "Fanart": 583, - "Wall": 586 - }, - "season":{ - "Icon": 50, - "List": 52, - "List Info": 61, - "List Info 2": 65, - "List Info 3": 589, - "Wall": 586, - "Extended": 585 - }, - "episode":{ - "Icon": 50, - "List": 52, - "List Info": 73, - "List Info 2": 68, - "List Info 3": 94 - }, - "addon":{ - "Icon": 50, - "List": 52, - "List Info": 91, - "Wrap Info": 93 - } -} \ No newline at end of file diff --git a/resources/views/skin.revolve.json b/resources/views/skin.revolve.json deleted file mode 100644 index b6343245..00000000 --- a/resources/views/skin.revolve.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "all":{ - "Circle List": 52, - "Thumbnails List": 55, - "Drop List": 51, - "Landscape List": 53, - "Revolve List": 54, - "Panels List": 56, - "Gallery List": 57, - "Rotate List": 58, - "Banners List": 59 - }, - "movie":{ - "Circle List": 52, - "Thumbnails List": 55, - "Drop List": 51, - "Landscape List": 53, - "Revolve List": 54, - "Panels List": 56, - "Gallery List": 57, - "Rotate List": 58, - "Banners List": 59 - }, - "tvshow":{ - "Circle List": 52, - "Thumbnails List": 55, - "Drop List": 51, - "Landscape List": 53, - "Revolve List": 54, - "Panels List": 56, - "Gallery List": 57, - "Rotate List": 58, - "Banners List": 59 - }, - "season":{ - "Circle List": 52, - "Thumbnails List": 55, - "Drop List": 51, - "Landscape List": 53, - "Revolve List": 54, - "Panels List": 56, - "Gallery List": 57, - "Rotate List": 58, - "Banners List": 59 - }, - "episode":{ - "Circle List": 52, - "Thumbnails List": 55, - "Drop List": 51, - "Landscape List": 53, - "Revolve List": 54, - "Panels List": 56, - "Gallery List": 57, - "Rotate List": 58, - "Banners List": 59 - }, - "addon":{ - "Circle List": 52, - "Thumbnails List": 55, - "Drop List": 51, - "Landscape List": 53, - "Revolve List": 54, - "Panels List": 56, - "Gallery List": 57, - "Rotate List": 58, - "Banners List": 59 - } -} \ No newline at end of file diff --git a/resources/views/skin.unity.json b/resources/views/skin.unity.json deleted file mode 100644 index 9f17aa28..00000000 --- a/resources/views/skin.unity.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "all":{ - "List": 50, - "Big List": 51, - "Thumbnail": 500, - "Poster Wrap": 501, - "Fanart": 508, - "Media Info": 505, - "Media Info 2": 504, - "Media Info 3": 503, - "Media Info 4": 515, - "Wide": 505, - "Info": 550, - "Info 2": 551 - }, - "movie":{ - "List": 50, - "Big List": 51, - "Thumbnail": 500, - "Poster Wrap": 501, - "Fanart": 508, - "Media Info": 505, - "Media Info 2": 504, - "Media Info 3": 503 - }, - "tvshow":{ - "List": 50, - "Big List": 51, - "Thumbnail": 500, - "Poster Wrap": 501, - "Fanart": 508, - "Media Info": 505, - "Media Info 2": 504, - "Media Info 3": 503, - "Wide": 505 - }, - "season":{ - "List": 50, - "Big List": 51, - "Thumbnail": 500, - "Poster Wrap": 501, - "Media Info": 505, - "Media Info 2": 504, - "Media Info 3": 503 - }, - "episode":{ - "List": 50, - "Big List": 51, - "Thumbnail": 500, - "Media Info": 505, - "Media Info 2": 504, - "Media Info 3": 503 - }, - "addon":{ - "List": 50, - "Big List": 51, - "Thumbnail": 500, - "Info": 550, - "Info 2": 551 - } -} \ No newline at end of file diff --git a/servers/torrent.py b/servers/torrent.py index 7e6413f6..416419e0 100755 --- a/servers/torrent.py +++ b/servers/torrent.py @@ -1,76 +1,35 @@ # -*- coding: utf-8 -*- -import sys +import re, os, sys, time, requests, xbmc, xbmcaddon -# from builtins import str -from builtins import range +from core import filetools, jsontools +from core.support import dbg, log, match +from platformcode import config, platformtools +from torrentool.api import Torrent +from lib.guessit import guessit -PY3 = False -VFS = True -if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int; VFS = False - -if PY3: - #from future import standard_library - #standard_library.install_aliases() - import urllib.parse as urllib # Es muy lento en PY2. En PY3 es nativo +if sys.version_info[0] >= 3: + import urllib.parse as urllib else: import urllib -import time -import os -import traceback -import re +monitor = filetools.join(config.get_data_path(), 'elementum_monitor.json') +extensions_list = ['.aaf', '.3gp', '.asf', '.avi', '.flv', '.mpeg', '.m1v', '.m2v', '.m4v', '.mkv', '.mov', '.mpg', '.mpe', '.mp4', '.ogg', '.wmv'] -try: - import xbmc - import xbmcgui - import xbmcaddon -except: - pass - -from core import filetools -from core import httptools -from core import scrapertools -from core import jsontools -from platformcode import logger -from platformcode import config -from platformcode import platformtools - -trackers = [ - "udp://tracker.openbittorrent.com:80/announce", - "http://tracker.torrentbay.to:6969/announce", - "http://tracker.pow7.com/announce", - "udp://tracker.ccc.de:80/announce", - "udp://open.demonii.com:1337", - - "http://9.rarbg.com:2710/announce", - "http://bt.careland.com.cn:6969/announce", - "http://explodie.org:6969/announce", - "http://mgtracker.org:2710/announce", - "http://tracker.best-torrents.net:6969/announce", - "http://tracker.tfile.me/announce", - "http://tracker1.wasabii.com.tw:6969/announce", - "udp://9.rarbg.com:2710/announce", - "udp://9.rarbg.me:2710/announce", - "udp://coppersurfer.tk:6969/announce", - - "http://www.spanishtracker.com:2710/announce", - "http://www.todotorrents.com:2710/announce", - ] # Returns an array of possible video url's from the page_url -def get_video_url(page_url, premium=False, user="", password="", video_password=""): +def get_video_url(page_url, premium=False, user='', password='', video_password=''): torrent_options = platformtools.torrent_client_installed(show_tuple=True) if len(torrent_options) == 0: from specials import elementum_download elementum_download.download() - logger.info("server=torrent, the url is the good") + log('server=torrent, the url is the good') - if page_url.startswith("magnet:"): - video_urls = [["magnet: [torrent]", page_url]] + if page_url.startswith('magnet:'): + video_urls = [['magnet: [torrent]', page_url]] else: - video_urls = [[".torrent [torrent]", page_url]] + video_urls = [['.torrent [torrent]', page_url]] return video_urls @@ -82,1275 +41,246 @@ class XBMCPlayer(xbmc.Player): xbmc_player = XBMCPlayer() - -def caching_torrents(url, referer=None, post=None, torrents_path=None, timeout=10, \ - lookup=False, data_torrent=False, headers={}, proxy_retries=1): - if torrents_path != None: - logger.info("path = " + torrents_path) - else: - logger.info() - if referer and post: - logger.info('REFERER: ' + referer) - - torrent_file = '' - t_hash = '' - if referer: - headers.update({'Content-Type': 'application/x-www-form-urlencoded', 'Referer': referer}) #Necesario para el Post del .Torrent - - """ - Descarga en el path recibido el .torrent de la url recibida, y pasa el decode - Devuelve el path real del .torrent, o el path vacío si la operación no ha tenido éxito - """ - - videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca - if torrents_path == None: - if not videolibrary_path: - torrents_path = '' - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path #Si hay un error, devolvemos el "path" vacío - torrents_path = filetools.join(videolibrary_path, 'temp_torrents_Alfa', 'cliente_torrent_Alfa.torrent') #path de descarga temporal - if '.torrent' not in torrents_path: - torrents_path += '.torrent' #path para dejar el .torrent - #torrents_path_encode = filetools.encode(torrents_path) #encode utf-8 del path - torrents_path_encode = torrents_path - - #if url.endswith(".rar") or url.startswith("magnet:"): #No es un archivo .torrent - if url.endswith(".rar"): #No es un archivo .torrent - logger.error('No es un archivo Torrent: ' + url) - torrents_path = '' - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path #Si hay un error, devolvemos el "path" vacío - - try: - #Descargamos el .torrent - if url.startswith("magnet:"): - if config.get_setting("magnet2torrent", server="torrent", default=False): - torrent_file = magnet2torrent(url, headers=headers) #Convierte el Magnet en un archivo Torrent - else: - if data_torrent: - return (url, torrent_file) - return url - if not torrent_file: - logger.error('No es un archivo Magnet: ' + url) - torrents_path = '' - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path #Si hay un error, devolvemos el "path" vacío - else: - if lookup: - proxy_retries = 0 - if post: #Descarga con POST - response = httptools.downloadpage(url, headers=headers, post=post, \ - follow_redirects=False, timeout=timeout, proxy_retries=proxy_retries) - else: #Descarga sin post - response = httptools.downloadpage(url, headers=headers, timeout=timeout, \ - proxy_retries=proxy_retries) - if not response.sucess: - logger.error('Archivo .torrent no encontrado: ' + url) - torrents_path = '' - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path #Si hay un error, devolvemos el "path" vacío - torrent_file = response.data - torrent_file_uncoded = response.data - if PY3 and isinstance(torrent_file, bytes): - torrent_file = "".join(chr(x) for x in bytes(torrent_file_uncoded)) - - #Si es un archivo .ZIP tratamos de extraer el contenido - if torrent_file.startswith("PK"): - logger.info("it's a zip archive: " + url) - - torrents_path_zip = filetools.join(videolibrary_path, 'temp_torrents_zip') #Carpeta de trabajo - torrents_path_zip = filetools.encode(torrents_path_zip) - torrents_path_zip_file = filetools.join(torrents_path_zip, 'temp_torrents_zip.zip') #Nombre del .zip - - import time - filetools.rmdirtree(torrents_path_zip) #Borramos la carpeta temporal - time.sleep(1) #Hay que esperar, porque si no da error - filetools.mkdir(torrents_path_zip) #La creamos de nuevo - - if filetools.write(torrents_path_zip_file, torrent_file_uncoded, vfs=VFS): #Salvamos el .zip - torrent_file = '' #Borramos el contenido en memoria - try: #Extraemos el .zip - from core import ziptools - unzipper = ziptools.ziptools() - unzipper.extract(torrents_path_zip_file, torrents_path_zip) - except: - import xbmc - xbmc.executebuiltin('XBMC.Extract("%s", "%s")' % (torrents_path_zip_file, torrents_path_zip)) - time.sleep(1) - - for root, folders, files in filetools.walk(torrents_path_zip): #Recorremos la carpeta para leer el .torrent - for file in files: - if file.endswith(".torrent"): - input_file = filetools.join(root, file) #nombre del .torrent - torrent_file = filetools.read(input_file, vfs=VFS) #leemos el .torrent - torrent_file_uncoded = torrent_file - if PY3 and isinstance(torrent_file, bytes): - torrent_file = "".join(chr(x) for x in bytes(torrent_file_uncoded)) - - filetools.rmdirtree(torrents_path_zip) #Borramos la carpeta temporal - - #Si no es un archivo .torrent (RAR, HTML,..., vacío) damos error - if not scrapertools.find_single_match(torrent_file, '^d\d+:.*?\d+:'): - logger.error('No es un archivo Torrent: ' + url) - torrents_path = '' - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path #Si hay un error, devolvemos el "path" vacío - - #Calculamos el Hash del Torrent y modificamos el path - import bencode, hashlib - - decodedDict = bencode.bdecode(torrent_file_uncoded) - if not PY3: - t_hash = hashlib.sha1(bencode.bencode(decodedDict[b"info"])).hexdigest() - else: - t_hash = hashlib.sha1(bencode.bencode(decodedDict["info"])).hexdigest() - - if t_hash: - torrents_path = filetools.join(filetools.dirname(torrents_path), t_hash + '.torrent') - torrents_path_encode = filetools.join(filetools.dirname(torrents_path_encode), t_hash + '.torrent') - - #Salvamos el .torrent - if not lookup: - if not filetools.write(torrents_path_encode, torrent_file_uncoded, vfs=VFS): - logger.error('ERROR: Archivo .torrent no escrito: ' + torrents_path_encode) - torrents_path = '' #Si hay un error, devolvemos el "path" vacío - torrent_file = '' #... y el buffer del .torrent - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path - except: - torrents_path = '' #Si hay un error, devolvemos el "path" vacío - torrent_file = '' #... y el buffer del .torrent - logger.error('Error en el proceso de descarga del .torrent: ' + url + ' / ' + torrents_path_encode) - logger.error(traceback.format_exc()) - - #logger.debug(torrents_path) - if data_torrent: - return (torrents_path, torrent_file) - return torrents_path - - -def magnet2torrent(magnet, headers={}): - logger.info() - - torrent_file = '' - info = '' - post = '' - LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default="") - LIBTORRENT_MAGNET_PATH = filetools.join(config.get_setting("downloadpath"), 'magnet') - MAGNET2TORRENT = config.get_setting("magnet2torrent", server="torrent", default=False) - btih = scrapertools.find_single_match(magnet, 'urn:btih:([\w\d]+)\&').upper() - - if magnet.startswith('magnet') and MAGNET2TORRENT: - - # Tratamos de convertir el magnet on-line (opción más rápida, pero no se puede convertir más de un magnet a la vez) - url_list = [ - ('https://itorrents.org/torrent/', 6, '', '.torrent') - ] # Lista de servicios on-line testeados - for url, timeout, id, sufix in url_list: - if id: - post = '%s=%s' % (id, magnet) - else: - url = '%s%s%s' % (url, btih, sufix) - response = httptools.downloadpage(url, timeout=timeout, headers=headers, post=post) - if not response.sucess: - continue - if not scrapertools.find_single_match(response.data, '^d\d+:.*?\d+:') and not response.data.startswith("PK"): - continue - torrent_file = response.data - break - - #Usamos Libtorrent para la conversión del magnet como alternativa (es lento) - if not torrent_file: - lt, e, e1, e2 = import_libtorrent(LIBTORRENT_PATH) # Importamos Libtorrent - if lt: - ses = lt.session() # Si se ha importado bien, activamos Libtorrent - ses.add_dht_router("router.bittorrent.com",6881) - ses.add_dht_router("router.utorrent.com",6881) - ses.add_dht_router("dht.transmissionbt.com",6881) - if ses: - filetools.mkdir(LIBTORRENT_MAGNET_PATH) # Creamos la carpeta temporal - params = { - 'save_path': LIBTORRENT_MAGNET_PATH, - 'trackers': trackers, - 'storage_mode': lt.storage_mode_t.storage_mode_allocate - } # Creamos los parámetros de la sesión - - h = lt.add_magnet_uri(ses, magnet, params) # Abrimos la sesión - i = 0 - while not h.has_metadata() and not xbmc.abortRequested: # Esperamos mientras Libtorrent abre la sesión - h.force_dht_announce() - time.sleep(1) - i += 1 - logger.error(i) - if i > 5: - LIBTORRENT_PATH = '' # No puede convertir el magnet - break - - if LIBTORRENT_PATH: - info = h.get_torrent_info() # Obtiene la información del .torrent - torrent_file = lt.bencode(lt.create_torrent(info).generate()) # Obtiene los datos del .torrent - ses.remove_torrent(h) # Desactiva Libtorrent - filetools.rmdirtree(LIBTORRENT_MAGNET_PATH) # Elimina la carpeta temporal - - return torrent_file - - -def verify_url_torrent(url, timeout=5): - """ - Verifica si el archivo .torrent al que apunta la url está disponible, descargándolo en un area temporal - Entrada: url - Salida: True o False dependiendo del resultado de la operación - """ - - if not url or url == 'javascript:;': #Si la url viene vacía... - return False #... volvemos con error - torrents_path = caching_torrents(url, timeout=timeout, lookup=True) #Descargamos el .torrent - if torrents_path: #Si ha tenido éxito... - return True - else: - return False - - -# Reproductor Cliente Torrent propio (libtorrent) -def bt_client(mediaurl, xlistitem, rar_files, subtitle=None, password=None, item=None): - logger.info() - - # Importamos el cliente - from btserver import Client - - played = False - debug = False - - try: - save_path_videos = '' - save_path_videos = filetools.join(config.get_setting("bt_download_path", server="torrent", \ - default=config.get_setting("downloadpath")), 'BT-torrents') - except: - pass - if not config.get_setting("bt_download_path", server="torrent") and save_path_videos: - config.set_setting("bt_download_path", filetools.join(config.get_data_path(), 'downloads'), server="torrent") - if not save_path_videos: - save_path_videos = filetools.join(config.get_data_path(), 'downloads', 'BT-torrents') - config.set_setting("bt_download_path", filetools.join(config.get_data_path(), 'downloads'), server="torrent") - - UNRAR = config.get_setting("unrar_path", server="torrent", default="") - BACKGROUND = config.get_setting("mct_background_download", server="torrent", default=True) - RAR = config.get_setting("mct_rar_unpack", server="torrent", default=True) - try: - BUFFER = int(config.get_setting("bt_buffer", server="torrent", default="50")) - except: - BUFFER = 50 - DOWNLOAD_LIMIT = config.get_setting("mct_download_limit", server="torrent", default="") - if DOWNLOAD_LIMIT: - try: - DOWNLOAD_LIMIT = int(DOWNLOAD_LIMIT) - except: - DOWNLOAD_LIMIT = 0 - else: - DOWNLOAD_LIMIT = 0 - UPLOAD_LIMIT = 100 - - torr_client = 'BT' - rar_file = '' - rar_names = [] - rar = False - rar_res = False - bkg_user = False - video_names = [] - video_file = '' - video_path = '' - videourl = '' - msg_header = 'KoD %s Client Torrent' % torr_client - extensions_list = ['.aaf', '.3gp', '.asf', '.avi', '.flv', '.mpeg', - '.m1v', '.m2v', '.m4v', '.mkv', '.mov', '.mpg', - '.mpe', '.mp4', '.ogg', '.rar', '.wmv', '.zip'] - - for entry in rar_files: - for file, path in list(entry.items()): - if file == 'path' and '.rar' in str(path): - for file_r in path: - rar_names += [file_r] - rar = True - if RAR and BACKGROUND: - bkg_user = True - elif file == 'path' and not '.rar' in str(path): - for file_r in path: - if os.path.splitext(file_r)[1] in extensions_list: - video_names += [file_r] - elif file == '__name': - video_path = path - video_file = path - if rar: rar_file = '%s/%s' % (video_path, rar_names[0]) - erase_file_path = filetools.join(save_path_videos, video_path) - video_path = erase_file_path - if video_names: video_file = video_names[0] - if not video_file and mediaurl.startswith('magnet'): - video_file = urllib.unquote_plus(scrapertools.find_single_match(mediaurl, '(?:\&|&)dn=([^\&]+)\&')) - erase_file_path = filetools.join(save_path_videos, video_file) - - if rar and RAR and not UNRAR: - if not platformtools.dialog_yesno(msg_header, config.get_localized_string(70791)): - return - - # Iniciamos el cliente: - c = Client(url=mediaurl, is_playing_fnc=xbmc_player.isPlaying, wait_time=None, auto_shutdown=False, timeout=10, - temp_path=save_path_videos, print_status=debug, auto_delete=False) - - activo = True - finalizado = False - dp_cerrado = True - - # Mostramos el progreso - if rar and RAR and BACKGROUND: # Si se descarga un RAR... - progreso = platformtools.dialog_progress_bg(msg_header) - platformtools.dialog_notification(config.get_localized_string(70790), config.get_localized_string(70769), time=10000) - else: - progreso = platformtools.dialog_progress('%s Torrent Client' % torr_client, '') - dp_cerrado = False - - # Mientras el progreso no sea cancelado ni el cliente cerrado - try: - while not c.closed and not xbmc.abortRequested: - # Obtenemos el estado del torrent - s = c.status - if debug: - # Montamos las tres lineas con la info del torrent - txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \ - (s.progress_file, s.file_size, s.str_state, s._download_rate) - txt2 = 'S: %d(%d) P: %d(%d) | DHT:%s (%d) | Trakers: %d | Pi: %d(%d)' % \ - (s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete, s.dht_state, s.dht_nodes, - s.trackers, s.pieces_sum, s.pieces_len) - txt3 = 'Origen Peers TRK: %d DHT: %d PEX: %d LSD %d ' % \ - (s.trk_peers, s.dht_peers, s.pex_peers, s.lsd_peers) - else: - txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \ - (s.progress_file, s.file_size, s.str_state, s._download_rate) - txt2 = 'S: %d(%d) P: %d(%d) | DHT:%s (%d) | Trakers: %d | Pi: %d(%d)' % \ - (s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete, s.dht_state, s.dht_nodes, - s.trackers, s.pieces_sum, s.pieces_len) - txt3 = video_file - - if rar and RAR and BACKGROUND or bkg_user: - progreso.update(s.buffer, txt, txt2) - else: - progreso.update(s.buffer, txt, txt2, txt3) - time.sleep(1) - - if (not bkg_user and progreso.iscanceled()) and (not (rar and RAR and BACKGROUND) and progreso.iscanceled()): - - if not dp_cerrado: - progreso.close() - dp_cerrado = True - if 'Finalizado' in s.str_state or 'Seeding' in s.str_state: - """ - if not rar and platformtools.dialog_yesno(msg_header, config.get_localized_string(70198)): - played = False - dp_cerrado = False - progreso = platformtools.dialog_progress(msg_header, '') - progreso.update(s.buffer, txt, txt2, txt3) - else: - """ - dp_cerrado = False - progreso = platformtools.dialog_progress(msg_header, '') - break - - else: - if not platformtools.dialog_yesno(msg_header, config.get_localized_string(30031), config.get_localized_string(30032)): - dp_cerrado = False - progreso = platformtools.dialog_progress(msg_header, '') - break - - else: - bkg_user = True - if not dp_cerrado: progreso.close() - dp_cerrado = False - progreso = platformtools.dialog_progress_bg(msg_header) - progreso.update(s.buffer, txt, txt2) - if not c.closed: - c.set_speed_limits(DOWNLOAD_LIMIT, UPLOAD_LIMIT) # Bajamos la velocidad en background - - # Si el buffer se ha llenado y la reproduccion no ha sido iniciada, se inicia - if ((s.pieces_sum >= BUFFER or 'Finalizado' in s.str_state or 'Seeding' in s.str_state) and not rar and not bkg_user) or \ - (s.pieces_sum >= s.pieces_len - 3 and s.pieces_len > 0 and ('Finalizado' in s.str_state or 'Seeding' \ - in s.str_state) and (rar or bkg_user)) and not played: - - if rar and RAR and UNRAR: - c.stop() - activo = False - finalizado = True - bkg_user = False - dp_cerrado = False - video_file, rar_res, video_path, erase_file_path = extract_files(rar_file, \ - save_path_videos, password, progreso, item, torr_client) # ... extraemos el vídeo del RAR - if rar_res and not xbmc.abortRequested: - time.sleep(1) - else: - break - elif (rar and not UNRAR) or (rar and not RAR): - break - elif bkg_user: - finalizado = True - break - - # Cerramos el progreso - if not dp_cerrado: - progreso.close() - dp_cerrado = True - - # Reproducimos el vídeo extraido, si no hay nada en reproducción - if not c.closed: - c.set_speed_limits(DOWNLOAD_LIMIT, UPLOAD_LIMIT) # Bajamos la velocidad en background - bkg_auto = True - while xbmc_player.isPlaying() and not xbmc.abortRequested: - time.sleep(3) - - # Obtenemos el playlist del torrent - #videourl = c.get_play_list() - if not rar_res: # Es un Magnet ? - video_file = filetools.join(save_path_videos, s.file_name) - if erase_file_path == save_path_videos: - erase_file_path = video_file - videourl = video_file - else: - videourl = filetools.join(video_path, video_file) - - # Iniciamos el reproductor - playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) - playlist.clear() - playlist.add(videourl, xlistitem) - # xbmc_player = xbmc_player - log("##### videourl: %s" % videourl) - xbmc_player.play(playlist) - - # Marcamos como reproducido para que no se vuelva a iniciar - played = True - - mark_auto_as_watched(item) - - # Y esperamos a que el reproductor se cierre - bkg_auto = True - dp_cerrado = True - while xbmc_player.isPlaying() and not xbmc.abortRequested: - time.sleep(1) - - if xbmc.getCondVisibility('Player.Playing'): - if not dp_cerrado: - dp_cerrado = True - progreso.close() - - if xbmc.getCondVisibility('Player.Paused') and not rar_res: - if not c.closed: s = c.status - txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \ - (s.progress_file, s.file_size, s.str_state, s._download_rate) - txt2 = 'S: %d(%d) P: %d(%d) | DHT:%s (%d) | Trakers: %d | Pi: %d(%d)' % \ - (s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete, s.dht_state, s.dht_nodes, - s.trackers, s.pieces_sum, s.pieces_len) - txt3 = video_file[:99] - if dp_cerrado: - dp_cerrado = False - progreso = xbmcgui.DialogProgressBG() - progreso.create(msg_header) - progreso.update(s.buffer, msg_header, '[CR][CR]' + txt + '[CR]' + txt2) - - if not dp_cerrado: - dp_cerrado = True - progreso.close() - - # Miramos si se ha completado la descarga para borrar o no los archivos - if activo: - s = c.status - if s.pieces_sum == s.pieces_len: - finalizado = True - break - - if not platformtools.dialog_yesno(msg_header, config.get_localized_string(30031), config.get_localized_string(30032)): - progreso = platformtools.dialog_progress(msg_header, '') - dp_cerrado = False - break - else: - bkg_user = True - played = False - if not dp_cerrado: progreso.close() - progreso = platformtools.dialog_progress_bg(msg_header) - progreso.update(s.buffer, txt, txt2) - dp_cerrado = False - continue - - # Cuando este cerrado, Volvemos a mostrar el dialogo - if not (rar and bkg_user): - progreso = platformtools.dialog_progress(msg_header, '') - progreso.update(s.buffer, txt, txt2, txt3) - dp_cerrado = False - - break - except: - logger.error(traceback.format_exc(1)) - return - - if not dp_cerrado: - if rar or bkg_user: - progreso.update(100, config.get_localized_string(70200), " ") - else: - progreso.update(100, config.get_localized_string(70200), " ", " ") - - # Detenemos el cliente - if activo and not c.closed: - c.stop() - activo = False - - # Cerramos el progreso - if not dp_cerrado: - progreso.close() - dp_cerrado = True - - # Y borramos los archivos de descarga restantes - time.sleep(1) - if filetools.exists(erase_file_path) and not bkg_user: - if finalizado and not platformtools.dialog_yesno(msg_header, config.get_localized_string(70792)): - return - log("##### erase_file_path: %s" % erase_file_path) - for x in range(10): - if filetools.isdir(erase_file_path): - if erase_file_path != save_path_videos: - filetools.rmdirtree(erase_file_path) - else: - break - else: - filetools.remove(erase_file_path) - time.sleep(5) - if not filetools.exists(erase_file_path): - break - - -def call_torrent_via_web(mediaurl, torr_client): - # Usado para llamar a los clientes externos de Torrents para automatizar la descarga de archivos que contienen .RAR - logger.info() - - post = '' - ELEMENTUMD_HOST = "http://localhost:65220" - if torr_client == 'elementum': - try: - ADDON = xbmcaddon.Addon("plugin.video.elementum") - except: - ADDON = False - if ADDON: - ELEMENTUMD_HOST = "http://" + ADDON.getSetting("remote_host") + ":" + ADDON.getSetting("remote_port") - - local_host = {"quasar": ["http://localhost:65251/torrents/", "add?uri"], \ - "elementum": ["%s/torrents/" % ELEMENTUMD_HOST, "add"]} - - if torr_client == "quasar": - uri = '%s%s=%s' % (local_host[torr_client][0], local_host[torr_client][1], mediaurl) - elif torr_client == "elementum": - uri = '%s%s' % (local_host[torr_client][0], local_host[torr_client][1]) - post = 'uri=%s&file=null&all=1' % mediaurl - - if post: - response = httptools.downloadpage(uri, post=post, timeout=5, alfa_s=True, ignore_response_code=True) - else: - response = httptools.downloadpage(uri, timeout=5, alfa_s=True, ignore_response_code=True) - - return response.sucess - - def mark_auto_as_watched(item): - time_limit = time.time() + 150 #Marcamos el timepo máx. de buffering - while not platformtools.is_playing() and time.time() < time_limit: #Esperamos mientra buffera - time.sleep(5) #Repetimos cada intervalo - #logger.debug(str(time_limit)) + time_limit = time.time() + 150 + while not platformtools.is_playing() and time.time() < time_limit: + time.sleep(5) if item.subtitle: time.sleep(5) xbmc_player.setSubtitles(item.subtitle) - #subt = xbmcgui.ListItem(path=item.url, thumbnailImage=item.thumbnail) - #subt.setSubtitles([item.subtitle]) - if item.strm_path and platformtools.is_playing(): #Sólo si es de Videoteca + if item.strm_path and platformtools.is_playing(): from platformcode import xbmc_videolibrary - xbmc_videolibrary.mark_auto_as_watched(item) #Marcamos como visto al terminar - #logger.debug("Llamado el marcado") + xbmc_videolibrary.mark_auto_as_watched(item) -def wait_for_download(item, mediaurl, rar_files, torr_client, password='', size='', rar_control={}): - logger.info() - - from subprocess import Popen, PIPE, STDOUT - - # Analizamos los archivos dentro del .torrent - rar = False - rar_names = [] - rar_names_abs = [] - folder = '' - if rar_control: - for x, entry in enumerate(rar_control['rar_files']): - if '__name' in entry: - folder = rar_control['rar_files'][x]['__name'] - break - rar_names = [rar_control['rar_names'][0]] +def setting(): + if xbmc.getCondVisibility('System.HasAddon("plugin.video.elementum")') == 1: + elementum_setting = xbmcaddon.Addon(id='plugin.video.elementum') + elementum_host = 'http://127.0.0.1:' + elementum_setting.getSetting('remote_port') + '/torrents/' + TorrentPath = xbmc.translatePath(elementum_setting.getSetting('torrents_path')) else: - for entry in rar_files: - for file, path in list(entry.items()): - if file == 'path' and '.rar' in str(path): - for file_r in path: - rar_names += [file_r] - rar = True - elif file == '__name': - folder = path + elementum_setting = '' + elementum_host = '' + TorrentPath = '' + return elementum_setting, elementum_host, TorrentPath - if not folder: # Si no se detecta el folder... - return ('', '', '') # ... no podemos hacer nada - if not rar_names: - return ('', '', folder) - rar_file = '%s/%s' % (folder, rar_names[0]) - log("##### rar_file: %s" % rar_file) - if len(rar_names) > 1: - log("##### rar_names: %s" % str(rar_names)) +def elementum_download(item): + elementum_setting, elementum_host, TorrentPath = setting() - # Localizamos el path de descarga del .torrent - save_path_videos = '' - __settings__ = xbmcaddon.Addon(id="plugin.video.%s" % torr_client) # Apunta settings del cliente torrent - if torr_client == 'torrenter': - save_path_videos = str(xbmc.translatePath(__settings__.getSetting('storage'))) - if not save_path_videos: - save_path_videos = str(filetools.join(xbmc.translatePath("special://home/"), \ - "cache", "xbmcup", "plugin.video.torrenter", "Torrenter")) - else: - save_path_videos = str(xbmc.translatePath(__settings__.getSetting('download_path'))) - if __settings__.getSetting('download_storage') == '1': # Descarga en memoria? - return ('', '', folder) # volvemos - if not save_path_videos: # No hay path de descarga? - return ('', '', folder) # Volvemos - log("##### save_path_videos: %s" % save_path_videos) + if elementum_setting: + set_elementum(True) + time.sleep(3) + TorrentName = match(item.url, patron=r'btih(?::|%3A)([^&%]+)', string=True).match + post = 'uri=%s&file=null&all=1' % urllib.quote_plus(item.url) + match(elementum_host + 'add', post=post, timeout=5, alfa_s=True, ignore_response_code=True) + while not filetools.isfile(filetools.join(elementum_setting.getSetting('torrents_path'), TorrentName + '.torrent')): + time.sleep(1) - # Si es nueva descarga, ponemos un archivo de control para reiniciar el UNRar si ha habido cancelación de Kodi - # Si ya existe el archivo (llamada), se reinicia el proceso de UNRar donde se quedó - if rar_control: - if 'downloading' not in rar_control['status']: - log("##### Torrent DESCARGADO Anteriormente: %s" % str(folder)) - return (rar_file, save_path_videos, folder) - else: - rar_control = { - 'torr_client': torr_client, - 'rar_files': rar_files, - 'rar_names': rar_names, - 'size': size, - 'password': password, - 'download_path': filetools.join(save_path_videos, folder), - 'status': 'downloading', - 'error': 0, - 'error_msg': '', - 'item': item.tourl(), - 'mediaurl': mediaurl - } + monitor_update(TorrentPath, TorrentName) - if torr_client == 'quasar': # Quasar no copia en .torrent - ret = filetools.copy(item.url, filetools.join(save_path_videos, 'torrents', \ - filetools.basename(item.url)), silent=True) - # Esperamos mientras el .torrent se descarga. Verificamos si el .RAR está descargado al completo - platformtools.dialog_notification(config.get_localized_string(70803), "", time=10000) +def elementum_monitor(): + path = xbmc.translatePath(config.get_setting('downloadlistpath')) + elementum_setting, elementum_host, TorrentPath = setting() + active_torrent = filetools.listdir(TorrentPath) - # Plan A: usar el monitor del cliente torrent para ver el status de la descarga - loop = 3600 # Loop de 10 horas hasta crear archivo - wait_time = 10 - time.sleep(wait_time) - fast = False - ret = filetools.write(filetools.join(rar_control['download_path'], '_rar_control.json'), jsontools.dump(rar_control)) - - for x in range(loop): - if xbmc.abortRequested: - return ('', '', folder) - torr_data, deamon_url, index = get_tclient_data(folder, torr_client) - if not torr_data or not deamon_url: - if len(filetools.listdir(rar_control['download_path'], silent=True)) <= 1: - filetools.remove(filetools.join(rar_control['download_path'], '_rar_control.json'), silent=True) - filetools.rmdir(rar_control['download_path'], silent=True) - return ('', '', folder) # Volvemos - if (torr_client in ['quasar'] or torr_client in ['elementum']) and not \ - torr_data['label'].startswith('0.00%') and not fast: - platformtools.dialog_notification(config.get_localized_string(60200), config.get_localized_string(70769), time=10000) - fast = True - if not torr_data['label'].startswith('100.00%'): - log("##### Downloading: %s, ID: %s" % (scrapertools.find_single_match(torr_data['label'], '(^.*?\%)'), index)) - time.sleep(wait_time) - continue - - update_rar_control(rar_control['download_path'], status='downloaded') - log("##### Torrent FINALIZED: %s" % str(folder)) - return (rar_file, save_path_videos, folder) - - # Plan B: monitorizar con UnRAR si los archivos se han desacargado por completo - unrar_path = config.get_setting("unrar_path", server="torrent", default="") - if not unrar_path: # Si Unrar no está instalado... - return ('', '', folder) # ... no podemos hacer nada - - cmd = [] - for rar_name in rar_names: # Preparamos por si es un archivo multiparte - cmd.append(['%s' % unrar_path, 'l', '%s' % filetools.join(save_path_videos, folder, rar_name)]) - - creationflags = '' - if xbmc.getCondVisibility("system.platform.Windows"): - creationflags = 0x08000000 - loop = 30 # Loop inicial de 5 minutos hasta crear archivo - wait_time = 10 - loop_change = 0 - loop_error = 6 - part_name = '' - y = 0 - returncode = '' - fast = False - while rar and not xbmc.abortRequested: - for x in range(loop): # Loop corto (5 min.) o largo (10 h.) - if xbmc.abortRequested: - return ('', '', folder) - if not rar or loop_change > 0: - loop = loop_change # Paso de loop corto a largo - loop_change = 0 - break - try: - responses = [] - for z, command in enumerate(cmd): # Se prueba por cada parte - if xbmc.getCondVisibility("system.platform.Windows"): - data_rar = Popen(command, bufsize=0, stdout=PIPE, stdin=PIPE, \ - stderr=STDOUT, creationflags=creationflags) - else: - data_rar = Popen(command, bufsize=0, stdout=PIPE, stdin=PIPE, \ - stderr=STDOUT) - out_, error_ = data_rar.communicate() - responses.append([z, str(data_rar.returncode), out_, error_]) # Se guarda la respuesta de cada parte - except: - logger.error(traceback.format_exc(1)) # Error de incompatibilidad de UnRAR - rar = False - break - else: - dl_files = 0 - for z, returncode, out__, error__ in responses: # Analizamos las respuestas - if returncode == '0': # Ya se ha descargado... parte ... - dl_files += 1 - part_name = scrapertools.find_single_match(str(out__), '(\.part\d+.rar)') - log("##### Torrent downloading: %s, %s" % (part_name, str(returncode))) - if dl_files == len(cmd): # ... o todo - fast = True - rar = False - break # ... o sólo una parte - elif returncode == '10': # archivo no existe - if loop != 30: # Si el archivo es borrado durante el proceso ... - rar = False - break #... abortamos - elif returncode == '6': # En proceso de descarga - y += 1 - #if loop == 30 and y == len(responses): # Si es la primera vez en proceso ... - if loop == 30 and y == 1: # Si es la primera vez en proceso ... - if torr_client in ['quasar']: - platformtools.dialog_notification(config.get_localized_string(60200), config.get_localized_string(70769), time=10000) - loop_change = 3600 # ... pasamos a un loop de 10 horas - elif loop <= 6: # Recuerado el error desconocido - loop_change = 3600 # ... pasamos a un loop de 10 horas - loop_error = 6 # Restauramos loop_error por si acaso - break - elif returncode == '1': # Ha alcanzado el fin de archivo ??? pasamos - part_name = scrapertools.find_single_match(str(out__), '(\.part\d+.rar)') - log("##### Torrent downloading: %s, %s" % (part_name, str(returncode))) - else: # No entendemos el error - loop_change = loop_error # ... pasamos a un loop de 1 minutos para reintentar - loop_error += -1 - break #... abortamos - - if str(returncode) in ['0', '6', '10']: - log("##### Torrent downloading: %s" % str(returncode)) - else: - log("##### Torrent downloading: %s, %s" % (str(out__), str(returncode))) - if not rar or fast: - fast = False - break - time.sleep(wait_time) # Esperamos un poco y volvemos a empezar + if elementum_setting: + # check if command file exist + if filetools.isfile(monitor): + json = jsontools.load(open(monitor, "r").read()) + Monitor = json['monitor'] + # else create it else: - rar = False + Monitor = jsontools.load('{"monitor":{},"settings":{}}') + json = jsontools.dump(Monitor) + filetools.write(monitor, json, silent=True) + + if len(Monitor) > 0: + try: + data = requests.get(elementum_host + '/list').json() + except: + data = '' + if data: + for it in data: + progress = round(it['progress'], 2) + status = it['status'] + name = it['id'] + if name in Monitor: + jsontools.update_node(progress, Monitor[name]['file'], 'downloadProgress', path, silent=True) + jsontools.update_node(4, Monitor[name]['file'], 'downloadStatus', path, silent=True) + if status in ['Paused']: + jsontools.update_node(0, Monitor[name]['file'], 'downloadStatus', path, silent=True) + if status in ['Seeding', 'Finished'] and not config.get_setting('elementum_on_seed'): + monitor_update(TorrentPath, name, remove=True) + dlJson = jsontools.load(open(filetools.join(path, Monitor[name]['file']), "r").read()) + jsontools.update_node(dlJson['downloadSize'], Monitor[name]['file'], 'downloadCompleted', path, silent=True) + jsontools.update_node(2, Monitor[name]['file'], 'downloadStatus', path, silent=True) + requests.get(elementum_host + 'pause/' + name) + filetools.remove(filetools.join(TorrentPath, name + '.torrent')) + filetools.remove(filetools.join(TorrentPath, name + '.fastresume')) + # time.sleep(1) + # rename(Monitor[name]['file']) + + +def monitor_update(TorrentPath, value, remove=False): + elementum_setting, elementum_host, TorrentPath = setting() + json = jsontools.load(open(monitor, "r").read()) + Monitor = json['monitor'] + info = Torrent.from_file(filetools.join(TorrentPath, value + '.torrent')) + path = xbmc.translatePath(config.get_setting('downloadlistpath')) + + if not value in Monitor: + Monitor[value]={} + Monitor[value]['name'] = info.name + Monitor[value]['size'] = info.total_size + File = find_file(value) + Monitor[value]['file'] = File + json = jsontools.dump(json) + filetools.write(monitor, json, silent=True) + + backupFilename = jsontools.load(open(filetools.join(path, File), "r").read())['downloadFilename'] + jsontools.update_node(value, File, 'TorrentName', path, silent=True) + jsontools.update_node(info.total_size, File, 'downloadSize', path, silent=True) + jsontools.update_node(backupFilename, File, 'backupFilename', path, silent=True) + jsontools.update_node(info.name, File, 'downloadFilename', path, silent=True) + + elif remove: + Monitor.pop(value) + jsontools.dump(json) + filetools.write(monitor, jsontools.dump(json), silent=True) + + if len(Monitor) == 0: set_elementum() + + +def set_elementum(SET=False): + elementum_setting, elementum_host, TorrentPath = setting() + json = jsontools.load(open(monitor, "r").read()) + backup_setting = json['settings'] + write = False + if SET: + if elementum_setting.getSetting('logger_silent') == False or not 'logger_silent' in backup_setting: + elementum_setting.setSetting('logger_silent', 'true') + backup_setting['logger_silent'] = 'false' + + if elementum_setting.getSetting('download_storage') != 0 or not 'download_storage' in backup_setting: + backup_setting['download_storage'] = elementum_setting.getSetting('download_storage') # Backup Setting + elementum_setting.setSetting('download_storage', '0') # Set Setting + + if elementum_setting.getSetting('download_path') != config.get_setting('downloadpath') or not 'download_path' in backup_setting: + backup_setting['download_path'] = elementum_setting.getSetting('download_path') # Backup Setting + elementum_setting.setSetting('download_path', config.get_setting('downloadpath')) # Set Setting + write = True + + elif backup_setting: + elementum_setting.setSetting('logger_silent', backup_setting['logger_silent']) + elementum_setting.setSetting('download_storage', backup_setting['download_storage']) + elementum_setting.setSetting('download_path', backup_setting['download_path']) + json['settings'] = {} + write = True + if write: + json = jsontools.dump(json) + filetools.write(monitor, json, silent=True) + time.sleep(1) + + +def find_file(hash): + path = xbmc.translatePath(config.get_setting('downloadlistpath')) + files = filetools.listdir(path) + for f in files: + filepath = filetools.join(path, f) + json = jsontools.load(filetools.read(filepath)) + if ('downloadServer' in json and 'url' in json['downloadServer'] and hash in json['downloadServer']['url']) or ('url' in json and hash in json['url']): break + return filetools.split(filepath)[-1] - if str(returncode) == '0': - log("##### Torrent FINALIZED: %s" % str(returncode)) + +def elementum_actions(parameter, TorrentHash): + elementum_setting, elementum_host, TorrentPath = setting() + if elementum_setting: + if parameter == 'delete': monitor_update(TorrentPath, TorrentHash, remove=True) + requests.get('%s/%s/%s' %(elementum_host, parameter, TorrentHash)) + + +def process_filename(filename, Title, ext=True): + extension = os.path.splitext(filename)[-1] + parsedTitle = guessit(filename) + t = parsedTitle.get('title', '') + episode = '' + s = ' - ' + if parsedTitle.get('episode') and parsedTitle.get('season'): + if type(parsedTitle.get('season')) == list: + episode += str(parsedTitle.get('season')[0]) + '-' + str(parsedTitle.get('season')[-1]) + else: + episode += str(parsedTitle.get('season')) + + if type(parsedTitle.get('episode')) == list: + episode += 'x' + str(parsedTitle.get('episode')[0]).zfill(2) + '-' + str(parsedTitle.get('episode')[-1]).zfill(2) + else: + episode += 'x' + str(parsedTitle.get('episode')).zfill(2) + elif parsedTitle.get('season') and type(parsedTitle.get('season')) == list: + episode += s + config.get_localized_string(30140) + " " +str(parsedTitle.get('season')[0]) + '-' + str(parsedTitle.get('season')[-1]) + elif parsedTitle.get('season'): + episode += s + config.get_localized_string(60027) % str(parsedTitle.get('season')) + if parsedTitle.get('episode_title'): + episode += s + parsedTitle.get('episode_title') + title = (t if t else Title) + s + episode + (extension if ext else '') + return title + + +def rename(File): + jsonPath = xbmc.translatePath(config.get_setting('downloadlistpath')) + json = jsontools.load(open(filetools.join(jsonPath, File), "r").read()) + filePath = filetools.join(xbmc.translatePath(config.get_setting('downloadpath')), json['downloadFilename']) + + if json['infoLabels']['mediatype'] == 'movie': + if filetools.isdir(filePath): + extension = '' + files = filetools.listdir(filePath) + oldName = json['downloadFilename'] + newName = json['backupFilename'] + for f in files: + ext = os.path.splitext(f)[-1] + if ext in extensions_list: extension = ext + filetools.rename(filetools.join(filePath, f), f.replace(oldName, newName)) + filetools.rename(filePath, newName) + jsontools.update_node(filetools.join(newName, newName + extension), File, 'downloadFilename', jsonPath) + + else: + oldName = json['downloadFilename'] + newName = json['backupFilename'] + os.path.splitext(oldName)[-1] + filetools.rename(filePath, newName) + jsontools.update_node(newName, File, 'downloadFilename', jsonPath) else: - rar_file = '' - logger.error('##### Torrent NO DESCARGADO: %s, %s' % (str(out__), str(returncode))) + sep = '/' if filePath.lower().startswith("smb://") else os.sep + FolderName = json['backupFilename'].split(sep)[0] + Title = re.sub(r'(\s*\[[^\]]+\])', '', FolderName) + if filetools.isdir(filePath): + files = filetools.listdir(filePath) + file_dict = {} + for f in files: + title = process_filename(f, Title, ext=False) + ext = os.path.splitext(f)[-1] + name = os.path.splitext(f)[0] + if title not in file_dict and ext in extensions_list: + file_dict[title] = name - return (rar_file, save_path_videos, folder) + for title, name in file_dict.items(): + for f in files: + if name in f: + filetools.rename(filetools.join(filePath, f), f.replace(name, title)) - -def get_tclient_data(folder, torr_client): - - # Monitoriza el estado de descarga del torrent en Quasar y Elementum - ELEMENTUMD_HOST = "http://localhost:65220" - if torr_client == 'elementum': - try: - ADDON = xbmcaddon.Addon("plugin.video.elementum") - except: - ADDON = False - if ADDON: - ELEMENTUMD_HOST = "http://" + ADDON.getSetting("remote_host") + ":" + ADDON.getSetting("remote_port") - - local_host = {"quasar": "http://localhost:65251/torrents/", "elementum": "%s/torrents/" % ELEMENTUMD_HOST} - torr = '' - torr_id = '' - x = 0 - y = '' - - try: - data = httptools.downloadpage(local_host[torr_client], timeout=5, alfa_s=True).data - if not data: - return '', local_host[torr_client], 0 - - data = jsontools.load(data) - data = data['items'] - for x, torr in enumerate(data): - if not folder in torr['label']: - continue - if "elementum" in torr_client: - torr_id = scrapertools.find_single_match(str(torr), 'torrents\/move\/(.*?)\)') - break + filetools.rename(filePath, FolderName) + jsontools.update_node(FolderName, File, 'downloadFilename', jsonPath) else: - return '', local_host[torr_client], 0 - except: - log(traceback.format_exc(1)) - return '', local_host[torr_client], 0 - - if torr_id: - y = torr_id - else: - y = x - return torr, local_host[torr_client], y - - -def extract_files(rar_file, save_path_videos, password, dp, item=None, \ - torr_client=None, rar_control={}, size='RAR', mediaurl=''): - logger.info() - - from platformcode import custom_code - - if not rar_control: - rar_control = { - 'torr_client': torr_client, - 'rar_files': [{"__name": "%s" % rar_file.split("/")[0]}], - 'rar_names': [filetools.basename(rar_file)], - 'size': size, - 'password': password, - 'download_path': save_path_videos, - 'status': 'downloaded', - 'error': 0, - 'error_msg': '', - 'item': item.tourl(), - 'mediaurl': mediaurl - } - ret = filetools.write(filetools.join(rar_control['download_path'], '_rar_control.json'), jsontools.dump(rar_control)) - - #reload(sys) - #sys.setdefaultencoding('utf-8') - sys.path.insert(0, config.get_setting("unrar_path", server="torrent", default="")\ - .replace('/unrar', '').replace('\\unrar,exe', '')) - - import rarfile - - # Verificamos si hay path para UnRAR - rarfile.UNRAR_TOOL = config.get_setting("unrar_path", server="torrent", default="") - if not rarfile.UNRAR_TOOL: - if xbmc.getCondVisibility("system.platform.Android"): - rarfile.UNRAR_TOOL = xbmc.executebuiltin("StartAndroidActivity(com.rarlab.rar)") - return rar_file, False, '', '' - log("##### unrar_path: %s" % rarfile.UNRAR_TOOL) - rarfile.DEFAULT_CHARSET = 'utf-8' - - # Preparamos un path alternativo más corto para no sobrepasar la longitud máxima - video_path = '' - if item: - if item.contentType == 'movie': - video_path = '%s-%s' % (item.contentTitle, item.infoLabels['tmdb_id']) - else: - video_path = '%s-%sx%s-%s' % (item.contentSerieName, item.contentSeason, \ - item.contentEpisodeNumber, item.infoLabels['tmdb_id']) - video_path = video_path.replace("á", "a").replace("é", "e").replace("í", "i").replace("ó", "o")\ - .replace("ú", "u").replace("ü", "u").replace("ñ", "n")\ - .replace("Á", "A").replace("É", "E").replace("Í", "I").replace("Ó", "O")\ - .replace("Ú", "U").replace("Ü", "U").replace("Ñ", "N") - - # Renombramos el path dejado en la descarga a uno más corto - rename_status = False - org_rar_file = rar_file - org_save_path_videos = save_path_videos - if video_path and '/' in rar_file: - log("##### rar_file: %s" % rar_file) - rename_status, rar_file = rename_rar_dir(org_rar_file, org_save_path_videos, video_path, torr_client) - - # Calculamos el path para del RAR - if "/" in rar_file: - folders = rar_file.split("/") - erase_file_path = filetools.join(save_path_videos, folders[0]) - file_path = save_path_videos - for f in folders: - file_path = filetools.join(file_path, f) - else: - file_path = save_path_videos - erase_file_path = save_path_videos - - # Calculamos el path para la extracción - if "/" in rar_file: - folders = rar_file.split("/") - for f in folders: - if not '.rar' in f: - save_path_videos = filetools.join(save_path_videos, f) - save_path_videos = filetools.join(save_path_videos, 'Extracted') - if not filetools.exists(save_path_videos): filetools.mkdir(save_path_videos) - log("##### save_path_videos: %s" % save_path_videos) - - rar_control = update_rar_control(erase_file_path, status='UnRARing') - - # Permite hasta 5 pasadas de extracción de .RARs anidados - platformtools.dialog_notification(config.get_localized_string(70793), rar_file, time=5000) - for x in range(5): - try: - if not PY3: - archive = rarfile.RarFile(file_path.decode("utf8")) - else: - archive = rarfile.RarFile(file_path) - except: - log("##### ERROR in rar archive: %s" % rar_file) - log("##### ERROR in rar folder: %s" % file_path) - log(traceback.format_exc()) - error_msg = config.get_localized_string(70796) - error_msg1 = config.get_localized_string(60015) - platformtools.dialog_notification(error_msg, error_msg1) - rar_control = update_rar_control(erase_file_path, error=True, error_msg=error_msg, status='ERROR') - return rar_file, False, '', '' - - # Analizamos si es necesaria una contraseña, que debería estar en item.password - if archive.needs_password(): - if not password: - pass_path = filetools.split(file_path)[0] - password = last_password_search(pass_path, erase_file_path) - if not password : - password = platformtools.dialog_input(heading=config.get_localized_string(70794) % pass_path) - if not password: - error_msg = config.get_localized_string(60309) - rar_control = update_rar_control(erase_file_path, error=True, error_msg=error_msg, status='ERROR') - dp.close() - return custom_code.reactivate_unrar(init=False, mute=False) - archive.setpassword(password) - log("##### Password rar: %s" % password) - - # Miramos el contenido del RAR a extraer - files = archive.infolist() - info = [] - for idx, i in enumerate(files): - if i.file_size == 0: - files.pop(idx) - continue - filename = i.filename - if "/" in filename: - filename = filename.rsplit("/", 1)[1] - - info.append("%s - %.2f MB" % (filename, i.file_size / 1048576.0)) - if info: - info.append(config.get_localized_string(70801)) - else: - error_msg = config.get_localized_string(70797) - error_msg1 = config.get_localized_string(70798) - platformtools.dialog_notification(error_msg, error_msg1) - rar_control = update_rar_control(erase_file_path, error=True, error_msg=error_msg, status='ERROR') - dp.close() - return custom_code.reactivate_unrar(init=False, mute=False) - - # Seleccionamos extraer TODOS los archivos del RAR - #selection = xbmcgui.Dialog().select("Selecciona el fichero a extraer y reproducir", info) - selection = len(info) - 1 - if selection < 0: - error_msg = config.get_localized_string(70797) - platformtools.dialog_notification(error_msg) - rar_control = update_rar_control(erase_file_path, error=True, error_msg=error_msg, status='ERROR') - return rar_file, False, '', '' - else: - try: - log("##### RAR Extract INI #####") - if selection == len(info) - 1: - log("##### rar_file 1: %s" % file_path) - log("##### save_path_videos 1: %s" % save_path_videos) - dp.update(99, config.get_localized_string(70803), config.get_localized_string(70802)) - archive.extractall(save_path_videos) - else: - log("##### rar_file 2: %s" % file_path) - log("##### save_path_videos 2: %s" % save_path_videos) - dp.update(99, config.get_localized_string(70802), config.get_localized_string(70803) + " %s" % info[selection]) - archive.extract(files[selection], save_path_videos) - log("##### RAR Extract END #####") - except (rarfile.RarWrongPassword, rarfile.RarCRCError): - log(traceback.format_exc(1)) - error_msg = config.get_localized_string(70799) - error_msg1 = config.get_localized_string(60309) - platformtools.dialog_notification(error_msg, error_msg1) - rar_control = update_rar_control(erase_file_path, error=True, error_msg=error_msg1, status='ERROR') - dp.close() - return custom_code.reactivate_unrar(init=False, mute=False) - except rarfile.BadRarFile: - log(traceback.format_exc(1)) - error_msg = config.get_localized_string(70799) - error_msg1 = config.get_localized_string(60800) - platformtools.dialog_notification(error_msg, error_msg1) - rar_control = update_rar_control(erase_file_path, error=True, error_msg=error_msg1, status='ERROR') - #return rar_file, False, '', erase_file_path - dp.close() - return custom_code.reactivate_unrar(init=False, mute=False) - except: - log(traceback.format_exc(1)) - error_msg = config.get_localized_string(70799) - error_msg1 = config.get_localized_string(60015) - platformtools.dialog_notification(error_msg, error_msg1) - rar_control = update_rar_control(erase_file_path, error=True, error_msg=error_msg, status='ERROR') - dp.close() - return custom_code.reactivate_unrar(init=False, mute=False) - - extensions_list = ['.aaf', '.3gp', '.asf', '.avi', '.flv', '.mpeg', - '.m1v', '.m2v', '.m4v', '.mkv', '.mov', '.mpg', - '.mpe', '.mp4', '.ogg', '.wmv'] - - # Localizamos el path donde se ha dejado la extracción - folder = True - file_result = filetools.listdir(save_path_videos) - while folder: - for file_r in file_result: - if filetools.isdir(filetools.join(save_path_videos, file_r)): - file_result_alt = filetools.listdir(filetools.join(save_path_videos, file_r)) - if file_result_alt: - file_result = file_result_alt - save_path_videos = filetools.join(save_path_videos, file_r) - else: - folder = False - break - else: - folder = False - - # Si hay RARs anidados, ajustamos los paths para la siguiente pasada - if '.rar' in str(file_result): - for file_r in file_result: - if '.rar' in file_r: - rar_file = file_r - file_path = str(filetools.join(save_path_videos, rar_file)) - save_path_videos = filetools.join(save_path_videos, 'Extracted') - rar_control = update_rar_control(erase_file_path, newextract=(rar_file)) - if not filetools.exists(save_path_videos): filetools.mkdir(save_path_videos) - platformtools.dialog_notification(config.get_localized_string(70804), rar_file, time=5000) - - # Si ya se ha extraido todo, preparamos el retorno - else: - video_list = [] - for file_r in file_result: - if os.path.splitext(file_r)[1] in extensions_list: - video_list += [file_r] - if len(video_list) == 0: - error_msg = config.get_localized_string(70797) - error_msg1 = config.get_localized_string(70798) - platformtools.dialog_notification(error_msg, error_msg1) - rar_control = update_rar_control(erase_file_path, error=True, error_msg=error_msg, status='ERROR') - dp.close() - return custom_code.reactivate_unrar(init=False, mute=False) - else: - log("##### Archive extracted: %s" % video_list[0]) - platformtools.dialog_notification(config.get_localized_string(70795), video_list[0], time=10000) - log("##### Archive removes: %s" % file_path) - #rar_control = update_rar_control(erase_file_path, status='DONE') - ret = filetools.remove(filetools.join(erase_file_path, '_rar_control.json'), silent=True) - return str(video_list[0]), True, save_path_videos, erase_file_path - - -def rename_rar_dir(rar_file, save_path_videos, video_path, torr_client): - logger.info() - - rename_status = False - folders = rar_file.split("/") - if filetools.exists(filetools.join(save_path_videos, folders[0])) and video_path not in folders[0]: - if not PY3: - src = filetools.join(save_path_videos, folders[0]).decode("utf8") - dst = filetools.join(save_path_videos, video_path).decode("utf8") - dst_file = video_path.decode("utf8") - else: - src = filetools.join(save_path_videos, folders[0]) - dst = filetools.join(save_path_videos, video_path) - dst_file = video_path - - for x in range(20): - if xbmc.abortRequested: - return rename_status, rar_file - xbmc.sleep(1000) - - # Se para la actividad para que libere los archivos descargados - if torr_client in ['quasar', 'elementum']: - torr_data, deamon_url, index = get_tclient_data(folders[0], torr_client) - if torr_data and deamon_url: - log("##### Client URL: %s" % '%spause/%s' % (deamon_url, index)) - data = httptools.downloadpage('%spause/%s' % (deamon_url, index), timeout=5, alfa_s=True).data - - try: - if filetools.exists(src): - filetools.rename(src, dst_file, silent=True, strict=True) - elif not filetools.exists(dst_file): - break - except: - log("##### Rename ERROR: SRC: %s" % src) - log(traceback.format_exc(1)) - else: - if filetools.exists(dst): - log("##### Renamed: SRC: %s" % src) - log("##### TO: DST: %s" % dst) - rar_file = video_path + '/' + folders[1] - rename_status = True - update_rar_control(dst, newpath=dst) - break - - return rename_status, rar_file - - -def last_password_search(pass_path, erase_file_path=''): - logger.info(pass_path) - - if not erase_file_path: - erase_file_path = pass_path - - # Busca en el Path de extracción si hay algún archivo que contenga la URL donde pueda estar la CONTRASEÑA - password = '' - patron_url = '(http.*\:\/\/(?:www.)?\w+\.\w+\/.*?)[\n|\r|$]' - patron_pass = '<input\s*type="text"\s*id="txt_password"\s*name="[^"]+"\s*onClick="[^"]+"\s*value="([^"]+)"' - - try: - pass_path_list = filetools.listdir(pass_path) - for file in pass_path_list: - if 'contrase' in file.lower() and '.rar' not in file: - file_pass = filetools.read(filetools.join(pass_path, file)) - url = scrapertools.find_single_match(file_pass, patron_url) - if url: - data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(url).data) - password = scrapertools.find_single_match(data, patron_pass) - if password: - update_rar_control(erase_file_path, password=password, status='UnRARing: Password update') - break - except: - log(traceback.format_exc(1)) - - log("##### Password Extracted: %s" % password) - return password - - -def update_rar_control(path, newpath='', newextract='', password='', error='', error_msg='', status=''): - - try: - rar_control = {} - rar_control = jsontools.load(filetools.read(filetools.join(path, '_rar_control.json'))) - if rar_control: - if newpath: - rar_control['download_path'] = newpath - for x, entry in enumerate(rar_control['rar_files']): - if '__name' in entry: - rar_control['rar_files'][x]['__name'] = filetools.basename(newpath) - break - if newextract: - for x, entry in enumerate(rar_control['rar_files']): - if '__name' in entry: - #rar_control['rar_files'][x]['__name'] = filetools.join(rar_control['rar_files'][x]['__name'], 'Extracted') - rar_control['rar_files'][x]['__name'] = rar_control['rar_files'][x]['__name'] + '/Extracted' - break - rar_control['rar_names'] = [newextract] - if password: rar_control['password'] = password - if error: rar_control['error'] += 1 - if error_msg: rar_control['error_msg'] = error_msg - if status and status not in rar_control['status']: rar_control['status'] = status - ret = filetools.write(filetools.join(rar_control['download_path'], '_rar_control.json'), \ - jsontools.dump(rar_control)) - logger.debug('%s, %s, %s, %s, %s, %s' % (rar_control['download_path'], \ - rar_control['rar_names'][0], rar_control['password'], \ - str(rar_control['error']), rar_control['error_msg'], rar_control['status'])) - except: - log(traceback.format_exc(1)) - - return rar_control - - -def import_libtorrent(LIBTORRENT_PATH): - logger.info(LIBTORRENT_PATH) - - e = '' - e1 = '' - e2 = '' - fp = '' - pathname = '' - description = '' - lt = '' - - try: - sys.path.insert(0, LIBTORRENT_PATH) - if LIBTORRENT_PATH: - try: - if not xbmc.getCondVisibility("system.platform.android"): - import libtorrent as lt - pathname = LIBTORRENT_PATH - else: - import imp - from ctypes import CDLL - dll_path = os.path.join(LIBTORRENT_PATH, 'liblibtorrent.so') - liblibtorrent = CDLL(dll_path) - - path_list = [LIBTORRENT_PATH, xbmc.translatePath('special://xbmc')] - fp, pathname, description = imp.find_module('libtorrent', path_list) - - # Esta parte no funciona en Android. Por algún motivo da el error "dlopen failed: library "liblibtorrent.so" not found" - # Hay que encontrar un hack para rodear el problema. Lo siguiente ha sido probado sin éxito: - #if fp: fp.close() - #fp = filetools.file_open(filetools.join(LIBTORRENT_PATH, 'libtorrent.so'), mode='rb') # Usa XbmcVFS - #fp = open(os.path.join(LIBTORRENT_PATH, 'libtorrent.so'), 'rb') - - try: - lt = imp.load_module('libtorrent', fp, pathname, description) - finally: - if fp: fp.close() - - except Exception as e1: - logger.error(traceback.format_exc(1)) - log('fp = ' + str(fp)) - log('pathname = ' + str(pathname)) - log('description = ' + str(description)) - if fp: fp.close() - from lib.python_libtorrent.python_libtorrent import get_libtorrent - lt = get_libtorrent() - - except Exception as e2: - try: - logger.error(traceback.format_exc()) - if fp: fp.close() - e = e1 or e2 - ok = platformtools.dialog_ok(config.get_localized_string(30035), config.get_localized_string(30036), config.get_localized_string(60015), str(e2)) - except: - pass - - try: - if not e1 and e2: e1 = e2 - except: - try: - if e2: - e1 = e2 - else: - e1 = '' - e2 = '' - except: - e1 = '' - e2 = '' - - return lt, e, e1, e2 - - -def log(texto): - try: - xbmc.log(texto, xbmc.LOGNOTICE) - except: - pass + filename = filetools.split(filePath)[-1] + title = process_filename(filename, Title) + NewFolder = filetools.join(config.get_setting('downloadpath'), FolderName) + if not filetools.isdir(NewFolder): + filetools.mkdir(NewFolder) + from_folder = filetools.join(config.get_setting('downloadpath'), filename) + to_folder = filetools.join(config.get_setting('downloadpath'), FolderName, title) + filetools.move(from_folder, to_folder) + jsontools.update_node(filetools.join(FolderName, title), File, 'downloadFilename', jsonPath) \ No newline at end of file diff --git a/servers/upstream.json b/servers/upstream.json index d77be9eb..ee3e9685 100644 --- a/servers/upstream.json +++ b/servers/upstream.json @@ -4,8 +4,8 @@ "ignore_urls": [], "patterns": [ { - "pattern": "upstream.to/([a-z0-9]+)", - "url": "https://upstream.to/\\1" + "pattern": "upstream.to/((?:embed-)?[a-z0-9]+)", + "url": "https://upstream.to/\\1.html" } ] }, diff --git a/servers/vupplayer.json b/servers/vupplayer.json index 8823c477..d0b39cdf 100644 --- a/servers/vupplayer.json +++ b/servers/vupplayer.json @@ -4,14 +4,14 @@ "ignore_urls": [], "patterns": [ { - "pattern": "vup.to/(?:embed-)?([a-z0-9]+).html", + "pattern": "vup.to/((?:embed-)?[a-z0-9]+)", "url": "https://vup.to/\\1.html" } ] }, "free": true, "id": "vupplayer", - "name": "VUP Player", + "name": "VUP", "settings": [ { "default": false, diff --git a/service.py b/service.py index 667d6058..b9bf33a9 100644 --- a/service.py +++ b/service.py @@ -1,434 +1,402 @@ -# -*- coding: utf-8 -*- -# ------------------------------------------------------------ -# Service for updating new episodes on library series -# ------------------------------------------------------------ - -import datetime, imp, math, threading, traceback, sys, glob - -from platformcode import config -try: - import xbmc, os - librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib')) - sys.path.append(librerias) -except: - import os - librerias = os.path.join(config.get_runtime_path(), 'lib') - sys.path.append(librerias) - - - - -from core import channeltools, filetools, videolibrarytools -from platformcode import logger -from platformcode import platformtools -from specials import videolibrary -from platformcode import updater - - -def update(path, p_dialog, i, t, serie, overwrite): - logger.info("Updating " + path) - insertados_total = 0 - - head_nfo, it = videolibrarytools.read_nfo(path + '/tvshow.nfo') - # videolibrarytools.check_renumber_options(it) - videolibrarytools.update_renumber_options(it, head_nfo, path) - category = serie.category - - # logger.debug("%s: %s" %(serie.contentSerieName,str(list_canales) )) - for channel, url in serie.library_urls.items(): - serie.channel = channel - serie.url = url - - ###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial - try: - head_nfo, it = videolibrarytools.read_nfo(path + '/tvshow.nfo') #Refresca el .nfo para recoger actualizaciones - if it.emergency_urls: - serie.emergency_urls = it.emergency_urls - serie.category = category - except: - logger.error(traceback.format_exc()) - - channel_enabled = channeltools.is_enabled(serie.channel) - - if channel_enabled: - - heading = config.get_localized_string(20000) - p_dialog.update(int(math.ceil((i + 1) * t)), heading, config.get_localized_string(60389) % (serie.contentSerieName, - serie.channel.capitalize())) - try: - pathchannels = filetools.join(config.get_runtime_path(), "channels", serie.channel + '.py') - logger.info("Cargando canal: " + pathchannels + " " + - serie.channel) - - if serie.library_filter_show: - serie.show = serie.library_filter_show.get(serie.channel, serie.contentSerieName) - - obj = imp.load_source(serie.channel, pathchannels) - itemlist = obj.episodios(serie) - - try: - if int(overwrite) == 3: - # Sobrescribir todos los archivos (tvshow.nfo, 1x01.nfo, 1x01 [canal].json, 1x01.strm, etc...) - insertados, sobreescritos, fallidos, notusedpath = videolibrarytools.save_tvshow(serie, itemlist) - #serie= videolibrary.check_season_playcount(serie, serie.contentSeason) - #if filetools.write(path + '/tvshow.nfo', head_nfo + it.tojson()): - # serie.infoLabels['playcount'] = serie.playcount - else: - insertados, sobreescritos, fallidos = videolibrarytools.save_episodes(path, itemlist, serie, - silent=True, - overwrite=overwrite) - #it = videolibrary.check_season_playcount(it, it.contentSeason) - #if filetools.write(path + '/tvshow.nfo', head_nfo + it.tojson()): - # serie.infoLabels['playcount'] = serie.playcount - insertados_total += insertados - - except Exception as ex: - logger.error("Error al guardar los capitulos de la serie") - template = "An exception of type %s occured. Arguments:\n%r" - message = template % (type(ex).__name__, ex.args) - logger.error(message) - - except Exception as ex: - logger.error("Error al obtener los episodios de: %s" % serie.show) - template = "An exception of type %s occured. Arguments:\n%r" - message = template % (type(ex).__name__, ex.args) - logger.error(message) - - else: - logger.debug("Canal %s no activo no se actualiza" % serie.channel) - - #Sincronizamos los episodios vistos desde la videoteca de Kodi con la de Alfa - try: - if config.is_xbmc(): #Si es Kodi, lo hacemos - from platformcode import xbmc_videolibrary - xbmc_videolibrary.mark_content_as_watched_on_alfa(path + '/tvshow.nfo') - except: - logger.error(traceback.format_exc()) - - return insertados_total > 0 - - -def check_for_update(overwrite=True): - logger.info("Update Series...") - p_dialog = None - serie_actualizada = False - update_when_finished = False - hoy = datetime.date.today() - estado_verify_playcount_series = False - - try: - if config.get_setting("update", "videolibrary") != 0 or overwrite: - config.set_setting("updatelibrary_last_check", hoy.strftime('%Y-%m-%d'), "videolibrary") - - heading = config.get_localized_string(60389) - p_dialog = platformtools.dialog_progress_bg(config.get_localized_string(20000), heading) - p_dialog.update(0, '') - show_list = [] - - for path, folders, files in filetools.walk(videolibrarytools.TVSHOWS_PATH): - show_list.extend([filetools.join(path, f) for f in files if f == "tvshow.nfo"]) - - if show_list: - t = float(100) / len(show_list) - - for i, tvshow_file in enumerate(show_list): - head_nfo, serie = videolibrarytools.read_nfo(tvshow_file) - path = filetools.dirname(tvshow_file) - - logger.info("serie=" + serie.contentSerieName) - p_dialog.update(int(math.ceil((i + 1) * t)), heading, serie.contentSerieName) - - #Verificamos el estado del serie.library_playcounts de la Serie por si está incompleto - try: - estado = False - #Si no hemos hecho la verificación o no tiene playcount, entramos - estado = config.get_setting("verify_playcount", "videolibrary") - if not estado or estado == False or not serie.library_playcounts: #Si no se ha pasado antes, lo hacemos ahora - serie, estado = videolibrary.verify_playcount_series(serie, path) #También se pasa si falta un PlayCount por completo - except: - logger.error(traceback.format_exc()) - else: - if estado: #Si ha tenido éxito la actualización... - estado_verify_playcount_series = True #... se marca para cambiar la opción de la Videoteca - - interval = int(serie.active) # Podria ser del tipo bool - - if not serie.active: - # si la serie no esta activa descartar - if not overwrite: - #Sincronizamos los episodios vistos desde la videoteca de Kodi con la de Alfa, aunque la serie esté desactivada - try: - if config.is_xbmc(): #Si es Kodi, lo hacemos - from platformcode import xbmc_videolibrary - xbmc_videolibrary.mark_content_as_watched_on_alfa(path + '/tvshow.nfo') - except: - logger.error(traceback.format_exc()) - - continue - - # obtenemos las fecha de actualizacion y de la proxima programada para esta serie - update_next = serie.update_next - if update_next: - y, m, d = update_next.split('-') - update_next = datetime.date(int(y), int(m), int(d)) - else: - update_next = hoy - - update_last = serie.update_last - if update_last: - y, m, d = update_last.split('-') - update_last = datetime.date(int(y), int(m), int(d)) - else: - update_last = hoy - - # si la serie esta activa ... - if overwrite or config.get_setting("updatetvshows_interval", "videolibrary") == 0: - # ... forzar actualizacion independientemente del intervalo - serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) - if not serie_actualizada: - update_next = hoy + datetime.timedelta(days=interval) - - elif interval == 1 and update_next <= hoy: - # ...actualizacion diaria - serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) - if not serie_actualizada and update_last <= hoy - datetime.timedelta(days=7): - # si hace una semana q no se actualiza, pasar el intervalo a semanal - interval = 7 - update_next = hoy + datetime.timedelta(days=interval) - - elif interval == 7 and update_next <= hoy: - # ...actualizacion semanal - serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) - if not serie_actualizada: - if update_last <= hoy - datetime.timedelta(days=14): - # si hace 2 semanas q no se actualiza, pasar el intervalo a mensual - interval = 30 - - update_next += datetime.timedelta(days=interval) - - elif interval == 30 and update_next <= hoy: - # ...actualizacion mensual - serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) - if not serie_actualizada: - update_next += datetime.timedelta(days=interval) - - if serie_actualizada: - update_last = hoy - update_next = hoy + datetime.timedelta(days=interval) - - head_nfo, serie = videolibrarytools.read_nfo(tvshow_file) #Vuelve a leer el.nfo, que ha sido modificado - if interval != int(serie.active) or update_next.strftime('%Y-%m-%d') != serie.update_next or update_last.strftime('%Y-%m-%d') != serie.update_last: - serie.update_last = update_last.strftime('%Y-%m-%d') - if update_next > hoy: - serie.update_next = update_next.strftime('%Y-%m-%d') - serie.active = interval - serie.channel = "videolibrary" - serie.action = "get_seasons" - filetools.write(tvshow_file, head_nfo + serie.tojson()) - - if serie_actualizada: - if config.get_setting("search_new_content", "videolibrary") == 0: - # Actualizamos la videoteca de Kodi: Buscar contenido en la carpeta de la serie - if config.is_xbmc() and config.get_setting("videolibrary_kodi"): - from platformcode import xbmc_videolibrary - xbmc_videolibrary.update(folder=filetools.basename(path)) - else: - update_when_finished = True - - if estado_verify_playcount_series: #Si se ha cambiado algún playcount, ... - estado = config.set_setting("verify_playcount", True, "videolibrary") #... actualizamos la opción de Videolibrary - - if config.get_setting("search_new_content", "videolibrary") == 1 and update_when_finished: - # Actualizamos la videoteca de Kodi: Buscar contenido en todas las series - if config.is_xbmc() and config.get_setting("videolibrary_kodi"): - from platformcode import xbmc_videolibrary - xbmc_videolibrary.update() - - p_dialog.close() - - else: - logger.info("No actualiza la videoteca, está desactivado en la configuración de alfa") - - except Exception as ex: - logger.error("Se ha producido un error al actualizar las series") - template = "An exception of type %s occured. Arguments:\n%r" - message = template % (type(ex).__name__, ex.args) - logger.error(message) - - if p_dialog: - p_dialog.close() - - from core.item import Item - item_dummy = Item() - videolibrary.list_movies(item_dummy, silent=True) - - -def start(thread=True): - if thread: - t = threading.Thread(target=start, args=[False]) - t.setDaemon(True) - t.start() - else: - import time - - update_wait = [0, 10000, 20000, 30000, 60000] - wait = update_wait[int(config.get_setting("update_wait", "videolibrary"))] - if wait > 0: - time.sleep(wait) - - if not config.get_setting("update", "videolibrary") == 2: - check_for_update(overwrite=False) - - # Se ejecuta ciclicamente - while True: - monitor_update() - time.sleep(3600) # cada hora - - -def monitor_update(): - update_setting = config.get_setting("update", "videolibrary") - - # "Actualizar "Una sola vez al dia" o "al inicar Kodi y al menos una vez al dia" - - if update_setting == 2 or update_setting == 3: - hoy = datetime.date.today() - last_check = config.get_setting("updatelibrary_last_check", "videolibrary") - if last_check: - y, m, d = last_check.split('-') - last_check = datetime.date(int(y), int(m), int(d)) - else: - last_check = hoy - datetime.timedelta(days=1) - - update_start = config.get_setting("everyday_delay", "videolibrary") * 4 - - # logger.info("Ultima comprobacion: %s || Fecha de hoy:%s || Hora actual: %s" % - # (last_check, hoy, datetime.datetime.now().hour)) - # logger.info("Atraso del inicio del dia: %i:00" % update_start) - - if last_check <= hoy and datetime.datetime.now().hour == int(update_start): - logger.info("Inicio actualizacion programada para las %s h.: %s" % (update_start, datetime.datetime.now())) - check_for_update(overwrite=False) - - if not config.dev_mode(): - period = float(config.get_setting('addon_update_timer')) * 3600 - curTime = time.time() - lastCheck = config.get_setting("updater_last_check", "videolibrary", '0') - if lastCheck: - lastCheck = float(lastCheck) - else: - lastCheck = 0 - - if curTime - lastCheck > period: - updated, needsReload = updater.check(background=True) - config.set_setting("updater_last_check", str(curTime), "videolibrary") - if needsReload: - xbmc.executescript(__file__) - exit(0) - -# def get_channel_json(): -# import urllib, os, xbmc -# addon = config.get_addon_core() -# ROOT_DIR = config.get_runtime_path() -# LOCAL_FILE = os.path.join(ROOT_DIR, "channels.json") -# -# if os.path.exists(LOCAL_FILE): -# os.remove(LOCAL_FILE) -# urllib.urlretrieve("https://raw.githubusercontent.com/kodiondemand/addon/master/channels.json", LOCAL_FILE) -# -# if addon.getSetting("use_custom_url") != "true": -# channels_path = os.path.join(ROOT_DIR, "channels", '*.json') -# channel_files = sorted(glob.glob(channels_path), key=lambda x: os.path.basename(x)) -# for channel_file in channel_files: -# if channel_file: -# try: import json -# except: import simplejson as json -# with open(LOCAL_FILE) as f: -# data = json.load(f) -# try: -# if data[channel_file]: -# config.set_setting(name=data[channel_file], value="value", channel=channel_file) -# except: pass #channel not in json - -# always bypass al websites that use cloudflare at startup, so there's no need to wait 5 seconds when opened -def callCloudflare(): - from core import httptools, support - import json - channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json') - channel_files = [os.path.splitext(os.path.basename(c))[0] for c in glob.glob(channels_path)] - for channel_name in channel_files: - channel_parameters = channeltools.get_channel_parameters(channel_name) - if 'cloudflare' in channel_parameters and channel_parameters["cloudflare"]: - channel = __import__('channels.%s' % channel_name, fromlist=["channels.%s" % channel_name]) - try: - channel.findhost() - except: - pass - httptools.downloadpage(channel.host) - - servers_path = os.path.join(config.get_runtime_path(), "servers", '*.json') - servers_files = glob.glob(servers_path) - for server in servers_files: - with open(server) as server: - server_parameters = json.load(server) - if 'cloudflare' in server_parameters and server_parameters["cloudflare"]: - patternUrl = server_parameters["find_videos"]["patterns"][0]["url"] - url = '/'.join(patternUrl.split('/')[:3]) - httptools.downloadpage(url) - - -if __name__ == "__main__": - # threading.Thread(target=callCloudflare()) - # Se ejecuta en cada inicio - import xbmc - import time - - # mark as stopped all downloads (if we are here, probably kodi just started) - from specials.downloads import stop_all - stop_all() - - # modo adulto: - # sistema actual 0: Nunca, 1:Siempre, 2:Solo hasta que se reinicie Kodi - # si es == 2 lo desactivamos. - if config.get_setting("adult_mode") == 2: - config.set_setting("adult_mode", 0) - - update_wait = [0, 10000, 20000, 30000, 60000] - wait = update_wait[int(config.get_setting("update_wait", "videolibrary"))] - if wait > 0: - xbmc.sleep(wait) - - - # Verificar quick-fixes al abrirse Kodi, y dejarlo corriendo como Thread - if not config.dev_mode(): - updated, needsReload = updater.check(background=True) - config.set_setting("updater_last_check", str(time.time()), "videolibrary") - if needsReload: - xbmc.executescript(__file__) - exit(0) - - if xbmc.getCondVisibility('System.HasAddon(repository.kod)'): - filetools.rmdirtree(xbmc.translatePath('special://home/addons/repository.kod')) - - # Copia Custom code a las carpetas de Alfa desde la zona de Userdata - from platformcode import custom_code - custom_code.init() - - if not config.get_setting("update", "videolibrary") == 2: - check_for_update(overwrite=False) - - - # Se ejecuta ciclicamente - if config.get_platform(True)['num_version'] >= 14: - monitor = xbmc.Monitor() # For Kodi >= 14 - else: - monitor = None # For Kodi < 14 - - if monitor: - while not monitor.abortRequested(): - monitor_update() - if monitor.waitForAbort(3600): # cada hora - break - else: - while not xbmc.abortRequested: - monitor_update() - xbmc.sleep(3600) +# -*- coding: utf-8 -*- +import datetime +import math +import os +import sys +import threading +import traceback +import xbmc +import xbmcgui +from platformcode import config +librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib')) +sys.path.insert(0, librerias) + +from core import videolibrarytools, filetools, channeltools +from lib import schedule +from platformcode import logger, platformtools, updater +from specials import videolibrary +from servers import torrent + + +def update(path, p_dialog, i, t, serie, overwrite): + logger.info("Updating " + path) + insertados_total = 0 + + head_nfo, it = videolibrarytools.read_nfo(path + '/tvshow.nfo') + # videolibrarytools.check_renumber_options(it) + videolibrarytools.update_renumber_options(it, head_nfo, path) + category = serie.category + + # logger.debug("%s: %s" %(serie.contentSerieName,str(list_canales) )) + for channel, url in serie.library_urls.items(): + serie.channel = channel + serie.url = url + + ###### Redirección al canal NewPct1.py si es un clone, o a otro canal y url si ha intervención judicial + try: + head_nfo, it = videolibrarytools.read_nfo(path + '/tvshow.nfo') #Refresca el .nfo para recoger actualizaciones + if it.emergency_urls: + serie.emergency_urls = it.emergency_urls + serie.category = category + except: + logger.error(traceback.format_exc()) + + channel_enabled = channeltools.is_enabled(serie.channel) + + if channel_enabled: + + heading = config.get_localized_string(20000) + p_dialog.update(int(math.ceil((i + 1) * t)), heading, config.get_localized_string(60389) % (serie.contentSerieName, + serie.channel.capitalize())) + try: + pathchannels = filetools.join(config.get_runtime_path(), "channels", serie.channel + '.py') + logger.info("loading channel: " + pathchannels + " " + + serie.channel) + + if serie.library_filter_show: + serie.show = serie.library_filter_show.get(serie.channel, serie.contentSerieName) + + obj = __import__('channels.%s' % serie.channel, fromlist=[pathchannels]) + + itemlist = obj.episodios(serie) + + try: + if int(overwrite) == 3: + # Sobrescribir todos los archivos (tvshow.nfo, 1x01.nfo, 1x01 [canal].json, 1x01.strm, etc...) + insertados, sobreescritos, fallidos, notusedpath = videolibrarytools.save_tvshow(serie, itemlist) + #serie= videolibrary.check_season_playcount(serie, serie.contentSeason) + #if filetools.write(path + '/tvshow.nfo', head_nfo + it.tojson()): + # serie.infoLabels['playcount'] = serie.playcount + else: + insertados, sobreescritos, fallidos = videolibrarytools.save_episodes(path, itemlist, serie, + silent=True, + overwrite=overwrite) + #it = videolibrary.check_season_playcount(it, it.contentSeason) + #if filetools.write(path + '/tvshow.nfo', head_nfo + it.tojson()): + # serie.infoLabels['playcount'] = serie.playcount + insertados_total += insertados + + except Exception as ex: + logger.error("Error when saving the chapters of the series") + template = "An exception of type %s occured. Arguments:\n%r" + message = template % (type(ex).__name__, ex.args) + logger.error(message) + + except Exception as ex: + logger.error("Error in obtaining the episodes of: %s" % serie.show) + template = "An exception of type %s occured. Arguments:\n%r" + message = template % (type(ex).__name__, ex.args) + logger.error(message) + + else: + logger.debug("Channel %s not active is not updated" % serie.channel) + + #Sincronizamos los episodios vistos desde la videoteca de Kodi con la de Alfa + try: + if config.is_xbmc(): #Si es Kodi, lo hacemos + from platformcode import xbmc_videolibrary + xbmc_videolibrary.mark_content_as_watched_on_alfa(path + '/tvshow.nfo') + except: + logger.error(traceback.format_exc()) + + return insertados_total > 0 + + +def check_for_update(overwrite=True): + logger.info("Update Series...") + p_dialog = None + serie_actualizada = False + update_when_finished = False + hoy = datetime.date.today() + estado_verify_playcount_series = False + + try: + if config.get_setting("update", "videolibrary") != 0 or overwrite: + config.set_setting("updatelibrary_last_check", hoy.strftime('%Y-%m-%d'), "videolibrary") + + heading = config.get_localized_string(60389) + p_dialog = platformtools.dialog_progress_bg(config.get_localized_string(20000), heading) + p_dialog.update(0, '') + show_list = [] + + for path, folders, files in filetools.walk(videolibrarytools.TVSHOWS_PATH): + show_list.extend([filetools.join(path, f) for f in files if f == "tvshow.nfo"]) + + if show_list: + t = float(100) / len(show_list) + + for i, tvshow_file in enumerate(show_list): + head_nfo, serie = videolibrarytools.read_nfo(tvshow_file) + path = filetools.dirname(tvshow_file) + + logger.info("serie=" + serie.contentSerieName) + p_dialog.update(int(math.ceil((i + 1) * t)), heading, serie.contentSerieName) + + #Verificamos el estado del serie.library_playcounts de la Serie por si está incompleto + try: + estado = False + #Si no hemos hecho la verificación o no tiene playcount, entramos + estado = config.get_setting("verify_playcount", "videolibrary") + if not estado or estado == False or not serie.library_playcounts: #Si no se ha pasado antes, lo hacemos ahora + serie, estado = videolibrary.verify_playcount_series(serie, path) #También se pasa si falta un PlayCount por completo + except: + logger.error(traceback.format_exc()) + else: + if estado: #Si ha tenido éxito la actualización... + estado_verify_playcount_series = True #... se marca para cambiar la opción de la Videoteca + + interval = int(serie.active) # Podria ser del tipo bool + + if not serie.active: + # si la serie no esta activa descartar + if not overwrite: + #Sincronizamos los episodios vistos desde la videoteca de Kodi con la de Alfa, aunque la serie esté desactivada + try: + if config.is_xbmc(): #Si es Kodi, lo hacemos + from platformcode import xbmc_videolibrary + xbmc_videolibrary.mark_content_as_watched_on_alfa(path + '/tvshow.nfo') + except: + logger.error(traceback.format_exc()) + + continue + + # obtenemos las fecha de actualizacion y de la proxima programada para esta serie + update_next = serie.update_next + if update_next: + y, m, d = update_next.split('-') + update_next = datetime.date(int(y), int(m), int(d)) + else: + update_next = hoy + + update_last = serie.update_last + if update_last: + y, m, d = update_last.split('-') + update_last = datetime.date(int(y), int(m), int(d)) + else: + update_last = hoy + + # si la serie esta activa ... + if overwrite or config.get_setting("updatetvshows_interval", "videolibrary") == 0: + # ... forzar actualizacion independientemente del intervalo + serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) + if not serie_actualizada: + update_next = hoy + datetime.timedelta(days=interval) + + elif interval == 1 and update_next <= hoy: + # ...actualizacion diaria + serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) + if not serie_actualizada and update_last <= hoy - datetime.timedelta(days=7): + # si hace una semana q no se actualiza, pasar el intervalo a semanal + interval = 7 + update_next = hoy + datetime.timedelta(days=interval) + + elif interval == 7 and update_next <= hoy: + # ...actualizacion semanal + serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) + if not serie_actualizada: + if update_last <= hoy - datetime.timedelta(days=14): + # si hace 2 semanas q no se actualiza, pasar el intervalo a mensual + interval = 30 + + update_next += datetime.timedelta(days=interval) + + elif interval == 30 and update_next <= hoy: + # ...actualizacion mensual + serie_actualizada = update(path, p_dialog, i, t, serie, overwrite) + if not serie_actualizada: + update_next += datetime.timedelta(days=interval) + + if serie_actualizada: + update_last = hoy + update_next = hoy + datetime.timedelta(days=interval) + + head_nfo, serie = videolibrarytools.read_nfo(tvshow_file) #Vuelve a leer el.nfo, que ha sido modificado + if interval != int(serie.active) or update_next.strftime('%Y-%m-%d') != serie.update_next or update_last.strftime('%Y-%m-%d') != serie.update_last: + serie.update_last = update_last.strftime('%Y-%m-%d') + if update_next > hoy: + serie.update_next = update_next.strftime('%Y-%m-%d') + serie.active = interval + serie.channel = "videolibrary" + serie.action = "get_seasons" + filetools.write(tvshow_file, head_nfo + serie.tojson()) + + if serie_actualizada: + if config.get_setting("search_new_content", "videolibrary") == 0: + # Actualizamos la videoteca de Kodi: Buscar contenido en la carpeta de la serie + if config.is_xbmc() and config.get_setting("videolibrary_kodi"): + from platformcode import xbmc_videolibrary + xbmc_videolibrary.update(folder=filetools.basename(path)) + else: + update_when_finished = True + + if estado_verify_playcount_series: #Si se ha cambiado algún playcount, ... + estado = config.set_setting("verify_playcount", True, "videolibrary") #... actualizamos la opción de Videolibrary + + if config.get_setting("search_new_content", "videolibrary") == 1 and update_when_finished: + # Actualizamos la videoteca de Kodi: Buscar contenido en todas las series + if config.is_xbmc() and config.get_setting("videolibrary_kodi"): + from platformcode import xbmc_videolibrary + xbmc_videolibrary.update() + + p_dialog.close() + + else: + logger.info("Not update the video library, it is disabled") + + except Exception as ex: + logger.error("An error occurred while updating the series") + template = "An exception of type %s occured. Arguments:\n%r" + message = template % (type(ex).__name__, ex.args) + logger.error(message) + + if p_dialog: + p_dialog.close() + + from core.item import Item + item_dummy = Item() + videolibrary.list_movies(item_dummy, silent=True) + + +def viewmodeMonitor(): + try: + currentModeName = xbmc.getInfoLabel('Container.Viewmode') + win = xbmcgui.Window(xbmcgui.getCurrentWindowId()) + currentMode = int(win.getFocusId()) + if currentModeName and 'plugin.video.kod' in xbmc.getInfoLabel( + 'Container.FolderPath') and currentMode < 1000 and currentMode > 50: # inside addon and in itemlist view + content, Type = platformtools.getCurrentView() + if content: + defaultMode = int(config.get_setting('view_mode_%s' % content).split(',')[-1]) + if currentMode != defaultMode: + logger.info('viewmode changed: ' + currentModeName + '-' + str(currentMode) + ' - content: ' + content) + config.set_setting('view_mode_%s' % content, currentModeName + ', ' + str(currentMode)) + except: + logger.error(traceback.print_exc()) + + +def updaterCheck(): + # updater check + updated, needsReload = updater.check(background=True) + if needsReload: + xbmc.executescript(__file__) + exit(0) + + +def run_threaded(job_func, args): + job_thread = threading.Thread(target=job_func, args=args) + job_thread.start() + + +class AddonMonitor(xbmc.Monitor): + def __init__(self): + self.settings_pre = config.get_all_settings_addon() + + self.updaterPeriod = None + self.update_setting = None + self.update_hour = None + self.scheduleScreenOnJobs() + self.scheduleUpdater() + + # videolibrary wait + update_wait = [0, 10000, 20000, 30000, 60000] + wait = update_wait[int(config.get_setting("update_wait", "videolibrary"))] + if wait > 0: + xbmc.sleep(wait) + if not config.get_setting("update", "videolibrary") == 2: + run_threaded(check_for_update, (False,)) + self.scheduleVideolibrary() + super(AddonMonitor, self).__init__() + + def onSettingsChanged(self): + logger.info('settings changed') + settings_post = config.get_all_settings_addon() + from platformcode import xbmc_videolibrary + + if self.settings_pre.get('downloadpath', None) != settings_post.get('downloadpath', None): + xbmc_videolibrary.update_sources(settings_post.get('downloadpath', None), + self.settings_pre.get('downloadpath', None)) + + # si se ha cambiado la ruta de la videoteca llamamos a comprobar directorios para que lo cree y pregunte + # automaticamente si configurar la videoteca + if self.settings_pre.get("videolibrarypath", None) != settings_post.get("videolibrarypath", None) or \ + self.settings_pre.get("folder_movies", None) != settings_post.get("folder_movies", None) or \ + self.settings_pre.get("folder_tvshows", None) != settings_post.get("folder_tvshows", None): + videolibrary.move_videolibrary(self.settings_pre.get("videolibrarypath", None), + settings_post.get("videolibrarypath", None), + self.settings_pre.get("folder_movies", None), + settings_post.get("folder_movies", None), + self.settings_pre.get("folder_tvshows", None), + settings_post.get("folder_tvshows", None)) + + # si se ha puesto que se quiere autoconfigurar y se había creado el directorio de la videoteca + if not self.settings_pre.get("videolibrary_kodi", None) and settings_post.get("videolibrary_kodi", None): + xbmc_videolibrary.ask_set_content(silent=True) + elif self.settings_pre.get("videolibrary_kodi", None) and not settings_post.get("videolibrary_kodi", None): + xbmc_videolibrary.clean() + + if self.settings_pre.get('addon_update_timer') != settings_post.get('addon_update_timer'): + schedule.clear('updater') + self.scheduleUpdater() + + if self.update_setting != config.get_setting("update", "videolibrary") or self.update_hour != config.get_setting("everyday_delay", "videolibrary") * 4: + schedule.clear('videolibrary') + self.scheduleVideolibrary() + + if self.settings_pre.get('elementum_on_seed') != settings_post.get('elementum_on_seed') and settings_post.get('elementum_on_seed'): + if not platformtools.dialog_yesno(config.get_localized_string(70805), config.get_localized_string(70806)): + config.set_setting('elementum_on_seed', False) + + self.settings_pre = settings_post + + def onScreensaverActivated(self): + logger.info('screensaver activated, un-scheduling screen-on jobs') + schedule.clear('screenOn') + + def onScreensaverDeactivated(self): + logger.info('screensaver deactivated, re-scheduling screen-on jobs') + self.scheduleScreenOnJobs() + + def scheduleUpdater(self): + if not config.dev_mode(): + updaterCheck() + self.updaterPeriod = config.get_setting('addon_update_timer') + schedule.every(self.updaterPeriod).hours.do(updaterCheck).tag('updater') + logger.info('scheduled updater every ' + str(self.updaterPeriod) + ' hours') + + def scheduleVideolibrary(self): + self.update_setting = config.get_setting("update", "videolibrary") + # 2= daily 3=daily and when kodi starts + if self.update_setting == 2 or self.update_setting == 3: + self.update_hour = config.get_setting("everyday_delay", "videolibrary") * 4 + schedule.every().day.at(str(self.update_hour).zfill(2) + ':00').do(run_threaded, check_for_update, (False,)).tag('videolibrary') + logger.info('scheduled videolibrary at ' + str(self.update_hour).zfill(2) + ':00') + + def scheduleScreenOnJobs(self): + schedule.every().second.do(viewmodeMonitor).tag('screenOn') + schedule.every().second.do(torrent.elementum_monitor).tag('screenOn') + + def onDPMSActivated(self): + logger.info('DPMS activated, un-scheduling screen-on jobs') + schedule.clear('screenOn') + + def onDPMSDeactivated(self): + logger.info('DPMS deactivated, re-scheduling screen-on jobs') + self.scheduleScreenOnJobs() + + +if __name__ == "__main__": + logger.info('Starting KoD service') + monitor = AddonMonitor() + + # mark as stopped all downloads (if we are here, probably kodi just started) + from specials.downloads import stop_all + try: + stop_all() + except: + logger.error(traceback.format_exc()) + + while True: + schedule.run_pending() + + if monitor.waitForAbort(1): # every second + break diff --git a/specials/autoplay.py b/specials/autoplay.py index 5d66b9f9..552e5be8 100644 --- a/specials/autoplay.py +++ b/specials/autoplay.py @@ -56,6 +56,7 @@ def show_option(channel, itemlist, text_color=colorKOD, thumbnail=None, fanart=N :return: ''' from channelselector import get_thumb + from core.support import typo logger.info() if not config.is_xbmc(): @@ -69,7 +70,7 @@ def show_option(channel, itemlist, text_color=colorKOD, thumbnail=None, fanart=N plot_autoplay = config.get_localized_string(60399) itemlist.append( Item(channel=__channel__, - title=config.get_localized_string(60071), + title=typo(config.get_localized_string(60071), 'bold color kod'), action="autoplay_config", text_color=text_color, text_bold=True, @@ -113,12 +114,12 @@ def start(itemlist, item): if item.channel == 'videolibrary': autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') channel_id = item.contentChannel - try: - active = autoplay_node['status'] - except: - active = is_active(item.channel) + # try: + # active = autoplay_node['status'] + # except: + # active = is_active(item.channel) - if not channel_id in autoplay_node or not active: + if not channel_id in autoplay_node: # or not active: return itemlist # Agrega servidores y calidades que no estaban listados a autoplay_node @@ -724,19 +725,40 @@ def reset(item, dict): return -def set_status(status): - logger.info() - # Obtiene el nodo AUTOPLAY desde el json - autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') - autoplay_node['status'] = status +# def set_status(status): +# logger.info() +# # Obtiene el nodo AUTOPLAY desde el json +# autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') +# autoplay_node['status'] = status +# +# result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY') - result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY') +# return if item channel has autoplay and hideserver enabled +def get_channel_AP_HS(item): + autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') + channel_node = autoplay_node.get(item.channel, {}) + if not channel_node: # non ha mai aperto il menu del canale quindi in autoplay_data.json non c'e la key + try: + channelFile = __import__('channels.' + item.channel, fromlist=["channels.%s" % item.channel]) + except: + channelFile = __import__('specials.' + item.channel, fromlist=["specials.%s" % item.channel]) + if hasattr(channelFile, 'list_servers') and hasattr(channelFile, 'list_quality'): + init(item.channel, channelFile.list_servers, channelFile.list_quality) + + autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') + channel_node = autoplay_node.get(item.channel, {}) + settings_node = channel_node.get('settings', {}) + AP = get_setting('autoplay') or (settings_node['active'] if 'active' in settings_node else False) + HS = config.get_setting('hide_servers') or ( + settings_node['hide_servers'] if 'hide_server' in settings_node else False) + + return AP, HS def play_multi_channel(item, itemlist): logger.info() global PLAYED video_dict = dict() - set_status(True) + # set_status(True) for video_item in itemlist: if is_active(video_item.contentChannel): @@ -751,3 +773,6 @@ def play_multi_channel(item, itemlist): start(videos, item) else: break + + AP, HS = get_channel_AP_HS(item) + return HS \ No newline at end of file diff --git a/specials/backup.py b/specials/backup.py index d46b4a1e..5802ea1d 100644 --- a/specials/backup.py +++ b/specials/backup.py @@ -10,6 +10,7 @@ import xbmc from core import ziptools, videolibrarytools, filetools from platformcode import logger, config, platformtools, xbmc_videolibrary from distutils.dir_util import copy_tree +from specials import videolibrary temp_path = xbmc.translatePath("special://userdata/addon_data/plugin.video.kod/temp/") movies_path = os.path.join(temp_path, "movies") @@ -67,15 +68,14 @@ def import_videolibrary(item): unzipper = ziptools.ziptools() unzipper.extract(zip_file, temp_path) - p_dialog.update(25) + p_dialog.update(20) + if config.is_xbmc() and config.get_setting("videolibrary_kodi"): + xbmc_videolibrary.clean() + p_dialog.update(30) filetools.rmdirtree(videolibrarytools.MOVIES_PATH) filetools.rmdirtree(videolibrarytools.TVSHOWS_PATH) p_dialog.update(50) - if config.is_xbmc() and config.get_setting("videolibrary_kodi"): - strm_list = [] - strm_list.append(config.get_setting('videolibrarypath')) - xbmc_videolibrary.clean(strm_list) config.verify_directories_created() if filetools.exists(movies_path): @@ -91,9 +91,6 @@ def import_videolibrary(item): p_dialog.close() platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(80008), time=5000, sound=False) - if platformtools.dialog_yesno(config.get_localized_string(20000), config.get_localized_string(80009)): - import service - service.check_for_update(overwrite=True) - + videolibrary.update_videolibrary() if config.is_xbmc() and config.get_setting("videolibrary_kodi"): - xbmc_videolibrary.update() + xbmc_videolibrary.update() \ No newline at end of file diff --git a/specials/checkhost.py b/specials/checkhost.py index 41f5efbe..5593aae3 100644 --- a/specials/checkhost.py +++ b/specials/checkhost.py @@ -20,7 +20,7 @@ LIST_SITE = ['http://www.ansa.it/', 'https://www.google.it']#, 'https://www.goog # lista di siti che non verranno raggiunti con i DNS del gestore -LST_SITE_CHCK_DNS = ['https://casacinema.space', 'https://documentari-streaming-da.com'] +LST_SITE_CHCK_DNS = ['https://www.casacinema.me/', 'https://cb01-nuovo-indirizzo.info/'] #'https://www.italia-film.pw', 'https://www.cb01.uno/',] # tolti class Kdicc(): @@ -224,25 +224,24 @@ def test_conn(is_exit, check_dns, view_msg, exit() # se ha i DNS filtrati lo comunico all'utente if check_dns == True: - if ktest.check_Dns(): - if not ktest.check_Dns(): - if view_msg == True: - ktest.view_Advise(config.get_localized_string(70722)) + if not ktest.check_Dns(): + if view_msg == True: + ktest.view_Advise(config.get_localized_string(70722)) xbmc.log("############ Inizio Check DNS ############", level=xbmc.LOGNOTICE) xbmc.log("## IP: %s" % (ktest.ip_addr), level=xbmc.LOGNOTICE) xbmc.log("## DNS: %s" % (ktest.dns), level=xbmc.LOGNOTICE) xbmc.log("############ Fine Check DNS ############", level=xbmc.LOGNOTICE) - if check_dns == True: - if ktest.check_Ip() == True and ktest.check_Adsl() == True and ktest.check_Dns() == True: - return True - else: - return False - else: - if ktest.check_Ip() == True and ktest.check_Adsl() == True: - return True - else: - return False + # if check_dns == True: + # if ktest.check_Ip() == True and ktest.check_Adsl() == True and ktest.check_Dns() == True: + # return True + # else: + # return False + # else: + # if ktest.check_Ip() == True and ktest.check_Adsl() == True: + # return True + # else: + # return False # def per la creazione del file channels.json def check_channels(inutile=''): diff --git a/specials/community.json b/specials/community.json index c72593c1..2302af6f 100644 --- a/specials/community.json +++ b/specials/community.json @@ -2,7 +2,6 @@ "id": "community", "name": "Community", "active": true, - "adult": false, "language": ["*"], "thumbnail": "", "banner": "", diff --git a/specials/downloads.json b/specials/downloads.json index 966eab98..1044ac8a 100644 --- a/specials/downloads.json +++ b/specials/downloads.json @@ -2,7 +2,6 @@ "id": "downloads", "name": "Descargas", "active": false, - "adult": false, "language": ["*"], "categories": [ "movie" diff --git a/specials/downloads.py b/specials/downloads.py index f7e9decd..ae6014ea 100644 --- a/specials/downloads.py +++ b/specials/downloads.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Gestor de descargas +# Download manager # ------------------------------------------------------------ from __future__ import division @@ -22,6 +22,8 @@ from core.downloader import Downloader from core.item import Item from platformcode import config, logger from platformcode import platformtools +from core.support import log, dbg, typo +from servers import torrent kb = '0xFF65B3DA' kg = '0xFF65DAA8' @@ -38,27 +40,28 @@ FOLDER_MOVIES = config.get_setting("folder_movies") FOLDER_TVSHOWS = config.get_setting("folder_tvshows") TITLE_FILE = "[COLOR %s]| %i%% |[/COLOR] - %s" TITLE_TVSHOW = "[COLOR %s]| %i%% |[/COLOR] - %s [%s]" +extensions_list = ['.aaf', '.3gp', '.asf', '.avi', '.flv', '.mpeg', '.m1v', '.m2v', '.m4v', '.mkv', '.mov', '.mpg', '.mpe', '.mp4', '.ogg', '.wmv'] def mainlist(item): - logger.info() + log() itemlist = [] - # Lista de archivos + # File list for file in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): - # Saltamos todos los que no sean JSON + # We skip all the non JSON if not file.endswith(".json"): continue - # cargamos el item + # we load the item file = filetools.join(DOWNLOAD_LIST_PATH, file) i = Item(path=file).fromjson(filetools.read(file)) i.thumbnail = i.contentThumbnail - # Listado principal + # Main listing if not item.contentType == "tvshow": # Series if i.contentType == "episode": - # Comprobamos que la serie no este ya en el itemlist + # We check that the series is not already in the itemlist if not [x for x in itemlist if x.contentSerieName == i.contentSerieName and x.contentChannel == i.contentChannel]: title = TITLE_TVSHOW % (STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentSerieName, i.contentChannel) @@ -79,12 +82,12 @@ def mainlist(item): s.title = TITLE_TVSHOW % (STATUS_COLORS[s.downloadStatus], downloadProgress, i.contentSerieName, i.contentChannel) - # Peliculas + # Movies elif i.contentType == "movie" or i.contentType == "video": i.title = TITLE_FILE % (STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentTitle) itemlist.append(i) - # Listado dentro de una serie + # Listed within a series else: if i.contentType == "episode" and i.contentSerieName == item.contentSerieName and i.contentChannel == item.contentChannel: i.title = TITLE_FILE % (STATUS_COLORS[i.downloadStatus], i.downloadProgress, "%dx%0.2d: %s" % (i.contentSeason, i.contentEpisodeNumber, i.contentTitle)) @@ -92,19 +95,19 @@ def mainlist(item): estados = [i.downloadStatus for i in itemlist] - # Si hay alguno completado + # If there is any completed if 2 in estados: itemlist.insert(0, Item(channel=item.channel, action="clean_ready", title=config.get_localized_string(70218), contentType=item.contentType, contentChannel=item.contentChannel, thumbnail=get_thumb('delete.png'), contentSerieName=item.contentSerieName, text_color=STATUS_COLORS[STATUS_CODES.completed])) - # Si hay alguno con error + # If there is any error if 3 in estados: itemlist.insert(0, Item(channel=item.channel, action="restart_error", title=config.get_localized_string(70219), contentType=item.contentType, contentChannel=item.contentChannel, thumbnail=get_thumb('update.png'), contentSerieName=item.contentSerieName, text_color=STATUS_COLORS[STATUS_CODES.error])) - # Si hay alguno pendiente + # If there is any pending if 1 in estados or 0 in estados: itemlist.insert(0, Item(channel=item.channel, action="download_all", title=support.typo(config.get_localized_string(70220),'bold'), contentType=item.contentType, contentChannel=item.contentChannel, thumbnail=get_thumb('downloads.png'), @@ -126,7 +129,7 @@ def mainlist(item): itemlist.insert(0, Item(channel=item.channel, action="browser", title=support.typo(config.get_localized_string(70222),'bold'), thumbnail=get_thumb('search.png'), url=DOWNLOAD_PATH)) if not item.contentType == "tvshow": - itemlist.append(Item(channel='shortcuts', action="SettingOnPosition", category=4, setting=0, title= support.typo(config.get_localized_string(70288),'bold color kod'), thumbnail=get_thumb('setting_0.png'))) + itemlist.append(Item(channel='shortcuts', action="SettingOnPosition", category=6, setting=0, title= support.typo(config.get_localized_string(70288),'bold color kod'), thumbnail=get_thumb('setting_0.png'))) # Reload if estados: @@ -144,7 +147,7 @@ def settings(item): def browser(item): - logger.info() + log() itemlist = [] for file in filetools.listdir(item.url): @@ -152,7 +155,11 @@ def browser(item): if filetools.isdir(filetools.join(item.url, file)): itemlist.append(Item(channel=item.channel, title=file, action=item.action, url=filetools.join(item.url, file), context=[{ 'title': config.get_localized_string(30037), 'channel': 'downloads', 'action': "del_dir"}])) else: - itemlist.append(Item(channel=item.channel, title=file, action="play", url=filetools.join(item.url, file), context=[{ 'title': config.get_localized_string(30039), 'channel': 'downloads', 'action': "del_file"}])) + if not item.infoLabels: + infoLabels = {"mediatype":"video"} + else: + infoLabels = item.infoLabels + itemlist.append(Item(channel=item.channel, title=file, action="play", infoLabels=infoLabels, url=filetools.join(item.url, file), context=[{ 'title': config.get_localized_string(30039), 'channel': 'downloads', 'action': "del_file"}])) return itemlist @@ -174,17 +181,17 @@ def del_dir(item): def clean_all(item): - logger.info() + log() stop_all() removeFiles = False if platformtools.dialog_yesno(config.get_localized_string(20000), config.get_localized_string(30300)): removeFiles = True - for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): - if fichero.endswith(".json"): - download_item = Item().fromjson(filetools.read(filetools.join(DOWNLOAD_LIST_PATH, fichero))) + for File in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): + if File.endswith(".json"): + download_item = Item().fromjson(filetools.read(filetools.join(DOWNLOAD_LIST_PATH, File))) if not item.contentType == "tvshow" or ( item.contentSerieName == download_item.contentSerieName and item.contentChannel == download_item.contentChannel): - filetools.remove(filetools.join(DOWNLOAD_LIST_PATH, fichero)) + filetools.remove(filetools.join(DOWNLOAD_LIST_PATH, File)) if removeFiles: filetools.remove(filetools.join(DOWNLOAD_PATH, download_item.downloadFilename)) dirName = filetools.join(DOWNLOAD_PATH, filetools.dirname(download_item.downloadFilename)) @@ -200,11 +207,16 @@ def reload(item): def stop_all(item=None): - logger.info() + log() for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): if fichero.endswith(".json"): download_item = Item().fromjson(filetools.read(filetools.join(DOWNLOAD_LIST_PATH, fichero))) + if download_item.TorrentName: + from inspect import stack + if stack()[1][3] == 'clean_all': action = 'delete' + else: action = 'pause' + torrent.elementum_actions(action, download_item.TorrentName) if download_item.downloadStatus == 4: update_json(filetools.join(DOWNLOAD_LIST_PATH, fichero), {"downloadStatus": STATUS_CODES.stoped}) xbmc.sleep(300) @@ -213,7 +225,7 @@ def stop_all(item=None): def clean_ready(item): - logger.info() + log() for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): if fichero.endswith(".json"): download_item = Item().fromjson(filetools.read(filetools.join(DOWNLOAD_LIST_PATH, fichero))) @@ -225,7 +237,7 @@ def clean_ready(item): def restart_error(item): - logger.info() + log() for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): if fichero.endswith(".json"): download_item = Item().fromjson(filetools.read(filetools.join(DOWNLOAD_LIST_PATH, fichero))) @@ -237,8 +249,7 @@ def restart_error(item): filetools.remove( filetools.join(DOWNLOAD_PATH, download_item.downloadFilename)) - update_json(item.path, - {"downloadStatus": STATUS_CODES.stoped, "downloadComplete": 0, "downloadProgress": 0}) + update_json(item.path, {"downloadStatus": STATUS_CODES.stoped, "downloadComplete": 0, "downloadProgress": 0}) platformtools.itemlist_refresh() @@ -260,91 +271,108 @@ def download_all_background(item): if download_item.downloadStatus in [STATUS_CODES.stoped, STATUS_CODES.canceled]: res = start_download(download_item) # platformtools.itemlist_refresh() - # Si se ha cancelado paramos + # If canceled, we stop if res == STATUS_CODES.canceled: break def menu(item): - logger.info() + log(item) if item.downloadServer: servidor = item.downloadServer.get("server", "Auto") else: servidor = "Auto" - # Opciones disponibles para el menu + # Options available for the menu op = [config.get_localized_string(70225), config.get_localized_string(70226), config.get_localized_string(70227), config.get_localized_string(30165) % (servidor.capitalize()), config.get_localized_string(60220), config.get_localized_string(60221)] opciones = [] - # Opciones para el menu + # Options for the menu if item.downloadStatus == STATUS_CODES.stoped: - opciones.append(op[0]) # Descargar - if not item.server: opciones.append(op[3]) # Elegir Servidor - opciones.append(op[1]) # Eliminar de la lista + opciones.append(op[0]) # Download + if not item.server: opciones.append(op[3]) # Choose Server + opciones.append(op[1]) # Remove from the list if item.downloadStatus == STATUS_CODES.canceled: - opciones.append(op[0]) # Descargar - if not item.server: opciones.append(op[3]) # Elegir Servidor - opciones.append(op[2]) # Reiniciar descarga - opciones.append(op[1]) # Eliminar de la lista + opciones.append(op[0]) # Download + if not item.server: opciones.append(op[3]) # Choose Server + opciones.append(op[2]) # Restart download + opciones.append(op[1]) # Remove from the list if item.downloadStatus == STATUS_CODES.completed: - opciones.append(op[5]) # Play - opciones.append(op[1]) # Eliminar de la lista - opciones.append(op[2]) # Reiniciar descarga + opciones.append(op[5]) # Play + opciones.append(op[1]) # Remove from the list + opciones.append(op[2]) # Restart download - if item.downloadStatus == STATUS_CODES.error: # descarga con error - opciones.append(op[2]) # Reiniciar descarga - opciones.append(op[1]) # Eliminar de la lista + if item.downloadStatus == STATUS_CODES.error: # Download with error + opciones.append(op[2]) # Restart download + opciones.append(op[1]) # Remove from the list if item.downloadStatus == STATUS_CODES.downloading: - opciones.append(op[5]) # Play - opciones.append(op[4]) # pause download - opciones.append(op[1]) # Eliminar de la lista + opciones.append(op[5]) # Play + opciones.append(op[4]) # Pause Download + opciones.append(op[1]) # Remove from the list - # Mostramos el dialogo + # Show Dialog seleccion = platformtools.dialog_select(config.get_localized_string(30163), opciones) + logger.info('SELECTION: '+ op[seleccion]) - # -1 es cancelar + # -1 is cancel if seleccion == -1: return - logger.info("option=%s" % (opciones[seleccion])) - # Opcion Eliminar + # Delete if opciones[seleccion] == op[1]: filetools.remove(item.path) - if platformtools.dialog_yesno(config.get_localized_string(20000), config.get_localized_string(30300)): - filetools.remove(filetools.join(DOWNLOAD_PATH, item.downloadFilename)) + if item.TorrentName: + torrent.elementum_actions('delete', item.TorrentName) + else: + if platformtools.dialog_yesno(config.get_localized_string(20000), config.get_localized_string(30300)): + filetools.remove(filetools.join(DOWNLOAD_PATH, item.downloadFilename)) - # Opcion inicaiar descarga + # Start Download if opciones[seleccion] == op[0]: item.action = "start_download" xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?" + item.tourl() + ")") - # Elegir Servidor + # Select Server if opciones[seleccion] == op[3]: select_server(item) - # Reiniciar descarga + # Restart Download if opciones[seleccion] == op[2]: if filetools.isfile(filetools.join(DOWNLOAD_PATH, item.downloadFilename)): filetools.remove(filetools.join(DOWNLOAD_PATH, item.downloadFilename)) - update_json(item.path, {"downloadStatus": STATUS_CODES.stoped, "downloadComplete": 0, "downloadProgress": 0, - "downloadServer": {}}) + update_json(item.path, {"downloadStatus": STATUS_CODES.stoped, "downloadComplete": 0, "downloadProgress": 0, "downloadServer": {}}) if opciones[seleccion] == op[4]: + if item.TorrentName: + torrent.elementum_actions('pause', item.TorrentName) update_json(item.path, {"downloadStatus": STATUS_CODES.stoped}) if opciones[seleccion] == op[5]: - xbmc.executebuiltin('PlayMedia(' + filetools.join(DOWNLOAD_PATH, item.downloadFilename) + ',resume)') + path = filetools.join(DOWNLOAD_PATH, item.downloadFilename) + if filetools.isdir(path): + videos = [] + files = filetools.listdir(path) + for f in files: + if os.path.splitext(f)[-1] in extensions_list: + videos.append(f) + if len(videos) > 1: + selection = platformtools.dialog_select(config.get_localized_string(30034), files) + else: + selection = 0 + xbmc.executebuiltin('PlayMedia(' + filetools.join(path, files[selection]) + ',resume)') + else: + xbmc.executebuiltin('PlayMedia(' + path + ',resume)') if opciones[seleccion] != op[5]: platformtools.itemlist_refresh() def move_to_libray(item): - logger.info() + log() if item.contentType == 'movie': FOLDER = FOLDER_MOVIES @@ -360,6 +388,9 @@ def move_to_libray(item): library_path = filetools.join(move_path, *filetools.split(item.downloadFilename)) final_path = download_path + if not filetools.isdir(filetools.dirname(library_path)): + filetools.mkdir(filetools.dirname(library_path)) + if item.contentType == "movie" and item.infoLabels["tmdb_id"]: contentTitle = item.contentTitle if item.contentTitle else item.fulltitle library_item = Item(title= filetools.split(item.downloadFilename)[-1], channel="downloads", contentTitle = contentTitle, @@ -374,14 +405,11 @@ def move_to_libray(item): fulltitle = item.fulltitle, infoLabels={"tmdb_id": item.infoLabels["tmdb_id"]}) videolibrarytools.save_tvshow(tvshow, [library_item], silent=True) - if not filetools.isdir(filetools.dirname(library_path)): - filetools.mkdir(filetools.dirname(library_path)) - if filetools.isfile(library_path) and filetools.isfile(download_path): filetools.remove(library_path) if filetools.isfile(download_path): - if filetools.move(download_path, library_path): + if filetools.move(download_path, library_path, silent=True): final_path = library_path if len(filetools.listdir(filetools.dirname(download_path))) == 0: @@ -398,7 +426,7 @@ def move_to_libray(item): if filename.startswith(name) and (filename.endswith('.strm') or (filename.endswith('.json') and 'downloads' not in filename)): clean = True file_path = filetools.join(config.get_setting("videolibrarypath"), FOLDER, path_title, File) - logger.info('Delete File: ' + str(file_path)) + log('Delete File:', str(file_path)) filetools.remove(file_path) if file_path.endswith('.strm'): file_strm_path = file_path @@ -406,9 +434,8 @@ def move_to_libray(item): if config.is_xbmc() and config.get_setting("videolibrary_kodi"): from platformcode import xbmc_videolibrary if clean == True: - strm_list = [] - strm_list.append(file_strm_path) - xbmc_videolibrary.clean(strm_list) + path_list = [file_strm_path] + xbmc_videolibrary.clean(path_list) xbmc_videolibrary.update(FOLDER, path_title) @@ -446,9 +473,7 @@ def get_server_position(server): servers = {} if server in servers: - pos = [s for s in sorted(servers, key=lambda x: (old_div(sum(servers[x]["speeds"]), (len(servers[x]["speeds"]) or 1)), - float(sum(servers[x]["success"])) / ( - len(servers[x]["success"]) or 1)), reverse=True)] + pos = [s for s in sorted(servers, key=lambda x: (old_div(sum(servers[x]["speeds"]), (len(servers[x]["speeds"]) or 1)), float(sum(servers[x]["success"])) / ( len(servers[x]["success"]) or 1)), reverse=True)] return pos.index(server) + 1 else: return 0 @@ -456,27 +481,26 @@ def get_server_position(server): def get_match_list(data, match_list, order_list=None, only_ascii=False, ignorecase=False): """ - Busca coincidencias en una cadena de texto, con un diccionario de "ID" / "Listado de cadenas de busqueda": - { "ID1" : ["Cadena 1", "Cadena 2", "Cadena 3"], - "ID2" : ["Cadena 4", "Cadena 5", "Cadena 6"] - } + Search for matches in a text string, with a dictionary of "ID" / "List of search strings": +    {"ID1": ["String 1", "String 2", "String 3"], +      "ID2": ["String 4", "String 5", "String 6"] +    } - El diccionario no pude contener una misma cadena de busqueda en varías IDs. - - La busqueda se realiza por orden de tamaño de cadena de busqueda (de mas larga a mas corta) si una cadena coincide, - se elimina de la cadena a buscar para las siguientes, para que no se detecten dos categorias si una cadena es parte de otra: - por ejemplo: "Idioma Español" y "Español" si la primera aparece en la cadena "Pablo sabe hablar el Idioma Español" - coincidira con "Idioma Español" pero no con "Español" ya que la coincidencia mas larga tiene prioridad. +    The dictionary could not contain the same search string in several IDs. +    The search is performed in order of search string size (from longest to shortest) if a string matches, +    it is removed from the search string for the following, so that two categories are not detected if one string is part of another: +    for example: "Spanish Language" and "Spanish" if the first appears in the string "Pablo knows how to speak the Spanish Language" +    It will match "Spanish Language" but not "Spanish" since the longest match has priority. """ match_dict = dict() matches = [] - # Pasamos la cadena a unicode + # We pass the string to unicode if not PY3: data = unicode(data, "utf8") - # Pasamos el diccionario a {"Cadena 1": "ID1", "Cadena 2", "ID1", "Cadena 4", "ID2"} y los pasamos a unicode + # We pass the dictionary to {"String 1": "ID1", "String 2", "ID1", "String 4", "ID2"} and we pass them to unicode for key in match_list: if order_list and not key in order_list: raise Exception("key '%s' not in match_list" % key) @@ -488,17 +512,17 @@ def get_match_list(data, match_list, order_list=None, only_ascii=False, ignoreca else: match_dict[value] = key - # Si ignorecase = True, lo pasamos todo a mayusculas + # If ignorecase = True, we pass everything to capital letters if ignorecase: data = data.upper() match_dict = dict((key.upper(), match_dict[key]) for key in match_dict) - # Si ascii = True, eliminamos todos los accentos y Ñ + # If ascii = True, we remove all accents and Ñ if only_ascii: data = ''.join((c for c in unicodedata.normalize('NFD', data) if unicodedata.category(c) != 'Mn')) match_dict = dict((''.join((c for c in unicodedata.normalize('NFD', key) if unicodedata.category(c) != 'Mn')), match_dict[key]) for key in match_dict) - # Ordenamos el listado de mayor tamaño a menor y buscamos. + # We sort the list from largest to smallest and search. for match in sorted(match_dict, key=lambda x: len(x), reverse=True): s = data for a in matches: @@ -507,8 +531,7 @@ def get_match_list(data, match_list, order_list=None, only_ascii=False, ignoreca matches.append(match) if matches: if order_list: - return type("Mtch_list", (), - {"key": match_dict[matches[-1]], "index": order_list.index(match_dict[matches[-1]])}) + return type("Mtch_list", (), {"key": match_dict[matches[-1]], "index": order_list.index(match_dict[matches[-1]])}) else: return type("Mtch_list", (), {"key": match_dict[matches[-1]], "index": None}) else: @@ -563,22 +586,23 @@ def sort_method(item): def download_from_url(url, item): - logger.info("Attempting to download: %s" % (url)) - if url.lower().endswith(".m3u8") or url.lower().startswith("rtmp"): + log("Attempting to download:", url) + if url.lower().split('|')[0].endswith(".m3u8") or url.lower().startswith("rtmp"): save_server_statistics(item.server, 0, False) + platformtools.dialog_notification('m3u8 Download',config.get_localized_string(60364), sound=False) return {"downloadStatus": STATUS_CODES.error} - # Obtenemos la ruta de descarga y el nombre del archivo + # We get the download path and the file name item.downloadFilename = item.downloadFilename download_path = filetools.dirname(filetools.join(DOWNLOAD_PATH, item.downloadFilename)) file_name = filetools.basename(filetools.join(DOWNLOAD_PATH, item.downloadFilename)) - # Creamos la carpeta si no existe + # We create the folder if it does not exist if not filetools.exists(download_path): filetools.mkdir(download_path) - # Lanzamos la descarga + # We launch the download d = Downloader(url, download_path, file_name, max_connections=1 + int(config.get_setting("max_connections", "downloads")), block_size=2 ** (17 + int(config.get_setting("block_size", "downloads"))), @@ -593,20 +617,20 @@ def download_from_url(url, item): d.start_dialog(config.get_localized_string(60332)) - # Descarga detenida. Obtenemos el estado: - # Se ha producido un error en la descarga + # Download stopped. We get the state: + # Download failed if d.state == d.states.error: - logger.info("Error trying to download %s" % (url)) + log("Error trying to download", url) status = STATUS_CODES.error - # La descarga se ha detenifdo + # Download has stopped elif d.state == d.states.stopped: - logger.info("Stop download") + log("Stop download") status = STATUS_CODES.canceled - # La descarga ha finalizado + # Download is complete elif d.state == d.states.completed: - logger.info("Downloaded correctly") + log("Downloaded correctly") status = STATUS_CODES.completed if (item.downloadSize and item.downloadSize != d.size[0]) or d.size[0] < 5000000: # if size don't correspond or file is too little (gounlimited for example send a little video to say the server is overloaded) @@ -622,7 +646,7 @@ def download_from_url(url, item): def download_from_server(item): - logger.info(item.tostring()) + log(item.tostring()) unsupported_servers = ["torrent"] if item.contentChannel == 'local': @@ -652,34 +676,39 @@ def download_from_server(item): item.video_urls = itemlist if not item.server: item.server = "directo" else: - logger.info("There is nothing to reproduce") + log("There is nothing to reproduce") return {"downloadStatus": STATUS_CODES.error} finally: progreso.close() - logger.info("contentAction: %s | contentChannel: %s | server: %s | url: %s" % ( - item.contentAction, item.contentChannel, item.server, item.url)) + log("contentAction: %s | contentChannel: %s | server: %s | url: %s" % (item.contentAction, item.contentChannel, item.server, item.url)) + + if item.server == 'torrent': + import xbmcgui + xlistitem = xbmcgui.ListItem(path=item.url) + xlistitem.setArt({'icon': item.thumbnail, 'thumb': item.thumbnail, 'poster': item.thumbnail, 'fanart': item.thumbnail}) + platformtools.set_infolabels(xlistitem, item) + platformtools.play_torrent(item, xlistitem, item.url) if not item.server or not item.url or not item.contentAction == "play" or item.server in unsupported_servers: logger.error("The Item does not contain the necessary parameters.") return {"downloadStatus": STATUS_CODES.error} if not item.video_urls: - video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(item.server, item.url, item.password, - True, True) + video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(item.server, item.url, item.password, True, True) else: video_urls, puedes, motivo = item.video_urls, True, "" - # Si no esta disponible, salimos + # If it is not available, we go out if not puedes: - logger.info("The video is NOT available") + log("The video is NOT available") return {"downloadStatus": STATUS_CODES.error} else: - logger.info("YES Video is available") + log("YES Video is available") result = {} - # Recorre todas las opciones hasta que consiga descargar una correctamente + # Go through all the options until I can download one correctly for video_url in reversed(video_urls): result = download_from_url(video_url[1], item) @@ -687,23 +716,23 @@ def download_from_server(item): if result["downloadStatus"] in [STATUS_CODES.canceled, STATUS_CODES.completed]: break - # Error en la descarga, continuamos con la siguiente opcion + # Download error, we continue with the next option if result["downloadStatus"] == STATUS_CODES.error: continue - # Devolvemos el estado + # We return the state return result def download_from_best_server(item): - logger.info("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) + log("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) result = {"downloadStatus": STATUS_CODES.error} progreso = platformtools.dialog_progress_bg(config.get_localized_string(30101), config.get_localized_string(70179)) try: if item.downloadItemlist: - logger.info('using cached servers') + log('using cached servers') play_items = [Item().fromurl(i) for i in item.downloadItemlist] else: if item.contentChannel in ['community', 'videolibrary']: @@ -730,7 +759,7 @@ def download_from_best_server(item): finally: progreso.close() - # Recorremos el listado de servers, hasta encontrar uno que funcione + # We go through the list of servers, until we find one that works for play_item in play_items: play_item = item.clone(**play_item.__dict__) play_item.contentAction = play_item.action @@ -741,7 +770,7 @@ def download_from_best_server(item): # if progreso.iscanceled(): # result["downloadStatus"] = STATUS_CODES.canceled - # Tanto si se cancela la descarga como si se completa dejamos de probar mas opciones + # Whether the download is canceled or completed, we stop trying more options if result["downloadStatus"] in [STATUS_CODES.canceled, STATUS_CODES.completed]: result["downloadServer"] = {"url": play_item.url, "server": play_item.server} break @@ -752,12 +781,11 @@ def download_from_best_server(item): def select_server(item): if item.server: return "Auto" - logger.info( - "contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) + log("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) progreso = platformtools.dialog_progress_bg(config.get_localized_string(30101), config.get_localized_string(70179)) try: if item.downloadItemlist: - logger.info('using cached servers') + log('using cached servers') play_items = [Item().fromurl(i) for i in item.downloadItemlist] else: if item.contentChannel in ['community', 'videolibrary']: @@ -785,7 +813,7 @@ def select_server(item): # if there is only one server select it seleccion = 1 else: - # altrimenti mostra la finestra di selezione + # otherwise it shows the selection window seleccion = platformtools.dialog_select(config.get_localized_string(70192), ["Auto"] + [s.title for s in play_items]) if seleccion >= 1: @@ -799,16 +827,15 @@ def select_server(item): def start_download(item): - logger.info( - "contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) - # Ya tenemnos server, solo falta descargar + log("contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) + # We already have a server, we just need to download if item.contentAction == "play": ret = download_from_server(item) elif item.downloadServer and item.downloadServer.get("server"): ret = download_from_server( item.clone(server=item.downloadServer.get("server"), url=item.downloadServer.get("url"), contentAction="play")) - # No tenemos server, necesitamos buscar el mejor + # We don't have a server, we need to find the best else: ret = download_from_best_server(item) @@ -820,29 +847,28 @@ def start_download(item): def get_episodes(item): - logger.info("contentAction: %s | contentChannel: %s | contentType: %s" % ( - item.contentAction, item.contentChannel, item.contentType)) + log("contentAction: %s | contentChannel: %s | contentType: %s" % (item.contentAction, item.contentChannel, item.contentType)) if 'dlseason' in item: season = True season_number = item.dlseason else: season = False - # El item que pretendemos descargar YA es un episodio + # The item we want to download NOW is an episode if item.contentType == "episode": episodes = [item.clone()] - # El item es uma serie o temporada + # The item is a series or season elif item.contentType in ["tvshow", "season"]: if item.downloadItemlist: episodes = [Item().fromurl(i) for i in item.downloadItemlist] else: - # importamos el canal + # The item is a series or season... if item.contentChannel in ['community', 'videolibrary']: channel = __import__('specials.%s' % item.contentChannel, None, None, ["specials.%s" % item.contentChannel]) else: channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) - # Obtenemos el listado de episodios + # We get the list of episodes episodes = getattr(channel, item.contentAction)(item) itemlist = [] @@ -855,38 +881,38 @@ def get_episodes(item): else: renumber(episodes, item) - # Tenemos las lista, ahora vamos a comprobar + # We get the list of episodes... for episode in episodes: - # Si partiamos de un item que ya era episodio estos datos ya están bien, no hay que modificarlos + # If we started from an item that was already an episode, this data is already good, it should not be modified if item.contentType != "episode": episode.contentAction = episode.action episode.contentChannel = episode.channel - # Si el resultado es una temporada, no nos vale, tenemos que descargar los episodios de cada temporada + # If the result is a season, it is not worth it, we have to download the episodes of each season if episode.contentType == "season": itemlist.extend(get_episodes(episode)) - # Si el resultado es un episodio ya es lo que necesitamos, lo preparamos para añadirlo a la descarga + # If the result is an episode is already what we need, we prepare it to add it to the download if episode.contentType == "episode": - # Pasamos el id al episodio + # We pass the id to the episode if not episode.infoLabels["tmdb_id"]: episode.infoLabels["tmdb_id"] = item.infoLabels["tmdb_id"] - # Episodio, Temporada y Titulo + # Episode, Season and Title if not episode.contentSeason or not episode.contentEpisodeNumber: season_and_episode = scrapertools.get_season_and_episode(episode.title) if season_and_episode: episode.contentSeason = season_and_episode.split("x")[0] episode.contentEpisodeNumber = season_and_episode.split("x")[1] - # Buscamos en tmdb + # Episode, Season and Title... if item.infoLabels["tmdb_id"]: scraper.find_and_set_infoLabels(episode) - # Episodio, Temporada y Titulo + # Episode, Season and Title if not episode.contentTitle: - episode.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)|\d*x\d*\s*-", "", episode.title).strip() + episode.contentTitle = re.sub(r"\[[^\]]+\]|\([^\)]+\)|\d*x\d*\s*-", "", episode.title).strip() episode.downloadFilename = filetools.validate_path(filetools.join(item.downloadFilename, "%dx%0.2d - %s" % (episode.contentSeason, episode.contentEpisodeNumber, episode.contentTitle.strip()))) if season: @@ -896,25 +922,27 @@ def get_episodes(item): itemlist.append(episode) - # Cualquier otro resultado no nos vale, lo ignoramos + # Any other result is not worth it, we ignore it else: - logger.info("Omitiendo item no válido: %s" % episode.tostring()) + log("Omitiendo item no válido:", episode.tostring()) - # if Multiple Languages or Qualities + # Any other result is not worth it, we ignore it... itemlist = videolibrarytools.filter_list(itemlist) return itemlist def write_json(item): - logger.info() + log() + channel = item.from_channel if item.from_channel else item.channel item.action = "menu" item.channel = "downloads" item.downloadStatus = STATUS_CODES.stoped item.downloadProgress = 0 item.downloadSize = 0 item.downloadCompleted = 0 + title = re.sub(r'(?:\[[^\]]+\]|%s[^-]+-\s*)' %config.get_localized_string(60356), '', item.title).strip() if not item.contentThumbnail: item.contentThumbnail = item.thumbnail @@ -922,7 +950,17 @@ def write_json(item): if name in item.__dict__: item.__dict__.pop(name) - path = filetools.join(DOWNLOAD_LIST_PATH, str(time.time()) + ".json") + if item.contentType == 'episode': + naming = title + typo(item.infoLabels['IMDBNumber'], '_ []') + typo(channel, '_ []') + else: + naming = item.fulltitle + typo(item.infoLabels['IMDBNumber'], '_ []') + typo(channel, '_ []') + naming += typo(item.contentLanguage, '_ []') if item.contentLanguage else '' + naming += typo(item.quality, '_ []') if item.quality else '' + + path = filetools.join(DOWNLOAD_LIST_PATH, naming + ".json") + if filetools.isfile(path): + filetools.remove(path) + item.path = path filetools.write(path, item.tojson()) time.sleep(0.1) @@ -940,7 +978,7 @@ def save_download(item): def save_download_background(item): - logger.info() + log() # Menu contextual if item.from_action and item.from_channel: item.channel = item.from_channel @@ -989,7 +1027,7 @@ def save_download_background(item): def save_download_videolibrary(item): - logger.info() + log() show_disclaimer() item.contentChannel = 'videolibrary' item.channel = "downloads" @@ -998,8 +1036,7 @@ def save_download_videolibrary(item): def save_download_video(item): - logger.info("contentAction: %s | contentChannel: %s | contentTitle: %s" % ( - item.contentAction, item.contentChannel, item.contentTitle)) + log("contentAction: %s | contentChannel: %s | contentTitle: %s" % (item.contentAction, item.contentChannel, item.contentTitle)) set_movie_title(item) @@ -1014,7 +1051,7 @@ def save_download_video(item): def save_download_movie(item): - logger.info("contentAction: %s | contentChannel: %s | contentTitle: %s" % ( item.contentAction, item.contentChannel, item.contentTitle)) + log("contentAction: %s | contentChannel: %s | contentTitle: %s" % ( item.contentAction, item.contentChannel, item.contentTitle)) progreso = platformtools.dialog_progress_bg(config.get_localized_string(30101), config.get_localized_string(70191)) @@ -1028,14 +1065,14 @@ def save_download_movie(item): progreso.update(0, config.get_localized_string(60062)) item.downloadFilename = filetools.validate_path("%s [%s]" % (item.contentTitle.strip(), item.infoLabels['IMDBNumber'])) + item.backupFilename = filetools.validate_path("%s [%s]" % (item.contentTitle.strip(), item.infoLabels['IMDBNumber'])) write_json(item) progreso.close() if not platformtools.dialog_yesno(config.get_localized_string(30101), config.get_localized_string(70189)): - platformtools.dialog_ok(config.get_localized_string(30101), item.contentTitle, - config.get_localized_string(30109)) + platformtools.dialog_ok(config.get_localized_string(30101), item.contentTitle, config.get_localized_string(30109)) else: play_item = select_server(item) if play_item == 'Auto': @@ -1048,8 +1085,7 @@ def save_download_movie(item): def save_download_tvshow(item): - logger.info("contentAction: %s | contentChannel: %s | contentType: %s | contentSerieName: %s" % ( - item.contentAction, item.contentChannel, item.contentType, item.contentSerieName)) + log("contentAction: %s | contentChannel: %s | contentType: %s | contentSerieName: %s" % (item.contentAction, item.contentChannel, item.contentType, item.contentSerieName)) progreso = platformtools.dialog_progress_bg(config.get_localized_string(30101), config.get_localized_string(70188)) try: @@ -1077,9 +1113,8 @@ def save_download_tvshow(item): progreso.close() if not platformtools.dialog_yesno(config.get_localized_string(30101), config.get_localized_string(70189)): - platformtools.dialog_ok(config.get_localized_string(30101), - str(len(episodes)) + config.get_localized_string(30110) + item.contentSerieName, - config.get_localized_string(30109)) + platformtools.dialog_ok(config.get_localized_string(30101), str(len(episodes)) + config.get_localized_string(30110) + item.contentSerieName, config.get_localized_string(30109)) + else: if len(episodes) == 1: play_item = select_server(episodes[0]) @@ -1101,10 +1136,10 @@ def save_download_tvshow(item): def set_movie_title(item): if not item.contentTitle: - item.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)", "", item.contentTitle).strip() + item.contentTitle = re.sub(r"\[[^\]]+\]|\([^\)]+\)", "", item.contentTitle).strip() if not item.contentTitle: - item.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)", "", item.title).strip() + item.contentTitle = re.sub(r"\[[^\]]+\]|\([^\)]+\)", "", item.title).strip() def show_disclaimer(): diff --git a/specials/filmontv.json b/specials/filmontv.json index fcf35bf5..39fb9c6b 100644 --- a/specials/filmontv.json +++ b/specials/filmontv.json @@ -3,7 +3,6 @@ "name": "Film in tv", "language": ["ita"], "active": false, - "adult": false, "thumbnail": null, "banner": null, "categories": [], diff --git a/specials/help.json b/specials/help.json index 5a49476b..8e51ddd1 100644 --- a/specials/help.json +++ b/specials/help.json @@ -2,6 +2,5 @@ "id": "help", "name": "Ayuda", "active": false, - "adult": false, "language": ["*"] } \ No newline at end of file diff --git a/specials/news.json b/specials/news.json index 63fb0be7..5c5e7f87 100644 --- a/specials/news.json +++ b/specials/news.json @@ -2,7 +2,6 @@ "id": "news", "name": "Novedades", "active": false, - "adult": false, "language": ["*"], "categories": [ "movie" diff --git a/specials/news.py b/specials/news.py index 63aad9dd..3d9a4ad1 100644 --- a/specials/news.py +++ b/specials/news.py @@ -119,7 +119,7 @@ def mainlist(item): set_category_context(new_item) itemlist.append(new_item) thumbnail = get_thumb("setting_0.png") - itemlist.append(Item(channel='shortcuts', action="SettingOnPosition", category=5, setting=0, + itemlist.append(Item(channel='shortcuts', action="SettingOnPosition", category=7, setting=1, title=typo(config.get_localized_string(70285), 'bold color kod'), thumbnail=thumbnail)) return itemlist @@ -155,10 +155,6 @@ def get_channels_list(): if not channel_parameters["active"]: continue - # No incluir si es un canal para adultos, y el modo adulto está desactivado - if channel_parameters["adult"] and config.get_setting("adult_mode") == 0: - continue - # No incluir si el canal es en un idioma filtrado if channel_language != "all" and channel_language not in str(channel_parameters["language"]) \ and "*" not in channel_parameters["language"]: @@ -630,10 +626,6 @@ def setting_channel(item): if not channel_parameters["active"]: continue - # No incluir si es un canal para adultos, y el modo adulto está desactivado - if channel_parameters["adult"] and config.get_setting("adult_mode") == 0: - continue - # No incluir si el canal es en un idioma filtrado if channel_language != "all" and channel_language not in str(channel_parameters["language"]) \ and "*" not in channel_parameters["language"]: diff --git a/specials/resolverdns.py b/specials/resolverdns.py index 56e5b126..f73ed6ae 100644 --- a/specials/resolverdns.py +++ b/specials/resolverdns.py @@ -10,6 +10,7 @@ from lib.requests_toolbelt.adapters import host_header_ssl from lib import doh from platformcode import logger, config import requests +from core import scrapertools try: import _sqlite3 as sql @@ -107,7 +108,10 @@ class CipherSuiteAdapter(host_header_ssl.HostHeaderSSLAdapter): domain = parse.netloc else: raise requests.exceptions.URLRequired - ip = self.getIp(domain) + if not scrapertools.find_single_match(domain, '\d+\.\d+\.\d+\.\d+'): + ip = self.getIp(domain) + else: + ip = None if ip: self.ssl_context = CustomContext(protocol, domain) if self.CF: diff --git a/specials/search.json b/specials/search.json index 0db387a0..947ebb08 100644 --- a/specials/search.json +++ b/specials/search.json @@ -2,7 +2,6 @@ "id": "search", "name": "search", "active": false, - "adult": false, "thumbnail": "", "banner": "", "categories": [], diff --git a/specials/search.py b/specials/search.py index 7e8eab43..4a22f3cf 100644 --- a/specials/search.py +++ b/specials/search.py @@ -46,7 +46,7 @@ def mainlist(item): Item(channel=item.channel, title=typo(config.get_localized_string(59994), 'color kod bold'), action='opciones', thumbnail=get_thumb('setting_0.png')), - Item(channel='shortcuts', title=typo(config.get_localized_string(70286), 'color kod bold'), action='SettingOnPosition', category=3, thumbnail=get_thumb('setting_0.png'))] + Item(channel='shortcuts', title=typo(config.get_localized_string(70286), 'color kod bold'), action='SettingOnPosition', category=5, setting=1, thumbnail=get_thumb('setting_0.png'))] itemlist = set_context(itemlist) @@ -298,34 +298,30 @@ def channel_search(item): def get_channel_results(ch, item): max_results = 10 results = list() + try: + ch_params = channeltools.get_channel_parameters(ch) - ch_params = channeltools.get_channel_parameters(ch) + module = __import__('channels.%s' % ch_params["channel"], fromlist=["channels.%s" % ch_params["channel"]]) + mainlist = getattr(module, 'mainlist')(Item(channel=ch_params["channel"])) + search_action = [elem for elem in mainlist if elem.action == "search" and (item.mode == 'all' or elem.contentType == item.mode)] - module = __import__('channels.%s' % ch_params["channel"], fromlist=["channels.%s" % ch_params["channel"]]) - mainlist = getattr(module, 'mainlist')(Item(channel=ch_params["channel"])) - search_action = [elem for elem in mainlist if elem.action == "search" and (item.mode == 'all' or elem.contentType == item.mode)] - - if search_action: - for search_ in search_action: - try: + if search_action: + for search_ in search_action: results.extend(module.search(search_, item.text)) - except: - pass - else: - try: + else: results.extend(module.search(item, item.text)) - except: - pass - if len(results) < 0 and len(results) < max_results and item.mode != 'all': + if len(results) < 0 and len(results) < max_results and item.mode != 'all': - if len(results) == 1: - if not results[0].action or config.get_localized_string(30992).lower() in results[0].title.lower(): - return [ch, []] + if len(results) == 1: + if not results[0].action or config.get_localized_string(30992).lower() in results[0].title.lower(): + return [ch, []] - results = get_info(results) + results = get_info(results) - return [ch, results] + return [ch, results] + except: + return [ch, results] def get_info(itemlist): @@ -418,8 +414,6 @@ def setting_channel_new(item): presel_values = ['skip', 'actual', 'all', 'none'] categs = ['movie', 'tvshow', 'documentary', 'anime', 'vos', 'direct', 'torrent'] - if config.get_setting('adult_mode') > 0: - categs.append('adult') for c in categs: preselecciones.append(config.get_localized_string(70577) + config.get_localized_category(c)) presel_values.append(c) diff --git a/specials/setting.py b/specials/setting.py index ed947e7f..27826152 100644 --- a/specials/setting.py +++ b/specials/setting.py @@ -107,142 +107,142 @@ def autostart(item): # item necessario launcher.py linea 265 # xbmcgui.Dialog().ok(config.get_localized_string(20000), config.get_localized_string(70710)) -def setting_torrent(item): - logger.info() +# def setting_torrent(item): +# logger.info() - LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default="") - LIBTORRENT_ERROR = config.get_setting("libtorrent_error", server="torrent", default="") - default = config.get_setting("torrent_client", server="torrent", default=0) - BUFFER = config.get_setting("mct_buffer", server="torrent", default="50") - DOWNLOAD_PATH = config.get_setting("mct_download_path", server="torrent", default=config.get_setting("downloadpath")) - if not DOWNLOAD_PATH: DOWNLOAD_PATH = filetools.join(config.get_data_path(), 'downloads') - BACKGROUND = config.get_setting("mct_background_download", server="torrent", default=True) - RAR = config.get_setting("mct_rar_unpack", server="torrent", default=True) - DOWNLOAD_LIMIT = config.get_setting("mct_download_limit", server="torrent", default="") - BUFFER_BT = config.get_setting("bt_buffer", server="torrent", default="50") - DOWNLOAD_PATH_BT = config.get_setting("bt_download_path", server="torrent", default=config.get_setting("downloadpath")) - if not DOWNLOAD_PATH_BT: DOWNLOAD_PATH_BT = filetools.join(config.get_data_path(), 'downloads') - MAGNET2TORRENT = config.get_setting("magnet2torrent", server="torrent", default=False) +# LIBTORRENT_PATH = config.get_setting("libtorrent_path", server="torrent", default="") +# LIBTORRENT_ERROR = config.get_setting("libtorrent_error", server="torrent", default="") +# default = config.get_setting("torrent_client", server="torrent", default=0) +# BUFFER = config.get_setting("mct_buffer", server="torrent", default="50") +# DOWNLOAD_PATH = config.get_setting("mct_download_path", server="torrent", default=config.get_setting("downloadpath")) +# if not DOWNLOAD_PATH: DOWNLOAD_PATH = filetools.join(config.get_data_path(), 'downloads') +# BACKGROUND = config.get_setting("mct_background_download", server="torrent", default=True) +# RAR = config.get_setting("mct_rar_unpack", server="torrent", default=True) +# DOWNLOAD_LIMIT = config.get_setting("mct_download_limit", server="torrent", default="") +# BUFFER_BT = config.get_setting("bt_buffer", server="torrent", default="50") +# DOWNLOAD_PATH_BT = config.get_setting("bt_download_path", server="torrent", default=config.get_setting("downloadpath")) +# if not DOWNLOAD_PATH_BT: DOWNLOAD_PATH_BT = filetools.join(config.get_data_path(), 'downloads') +# MAGNET2TORRENT = config.get_setting("magnet2torrent", server="torrent", default=False) - torrent_options = [config.get_localized_string(30006), config.get_localized_string(70254), config.get_localized_string(70255)] - torrent_options.extend(platformtools.torrent_client_installed()) +# torrent_options = [config.get_localized_string(30006), config.get_localized_string(70254), config.get_localized_string(70255)] +# torrent_options.extend(platformtools.torrent_client_installed()) - list_controls = [ - { - "id": "libtorrent_path", - "type": "text", - "label": "Libtorrent path", - "default": LIBTORRENT_PATH, - "enabled": True, - "visible": False - }, - { - "id": "libtorrent_error", - "type": "text", - "label": "libtorrent error", - "default": LIBTORRENT_ERROR, - "enabled": True, - "visible": False - }, - { - "id": "list_torrent", - "type": "list", - "label": config.get_localized_string(70256), - "default": default, - "enabled": True, - "visible": True, - "lvalues": torrent_options - }, - { - "id": "mct_buffer", - "type": "text", - "label": "MCT - " + config.get_localized_string(70758), - "default": BUFFER, - "enabled": True, - "visible": "eq(-1,%s)" % torrent_options[2] - }, - { - "id": "mct_download_path", - "type": "text", - "label": "MCT - " + config.get_localized_string(30017), - "default": DOWNLOAD_PATH, - "enabled": True, - "visible": "eq(-2,%s)" % torrent_options[2] - }, - { - "id": "bt_buffer", - "type": "text", - "label": "BT - " + config.get_localized_string(70758), - "default": BUFFER_BT, - "enabled": True, - "visible": "eq(-3,%s)" % torrent_options[1] - }, - { - "id": "bt_download_path", - "type": "text", - "label": "BT - " + config.get_localized_string(30017), - "default": DOWNLOAD_PATH_BT, - "enabled": True, - "visible": "eq(-4,%s)" % torrent_options[1] - }, - { - "id": "mct_download_limit", - "type": "text", - "label": config.get_localized_string(70759), - "default": DOWNLOAD_LIMIT, - "enabled": True, - "visible": "eq(-5,%s) | eq(-5,%s)" % (torrent_options[1], torrent_options[2]) - }, - { - "id": "mct_rar_unpack", - "type": "bool", - "label": config.get_localized_string(70760), - "default": RAR, - "enabled": True, - "visible": True - }, - { - "id": "mct_background_download", - "type": "bool", - "label": config.get_localized_string(70761), - "default": BACKGROUND, - "enabled": True, - "visible": True - }, - { - "id": "magnet2torrent", - "type": "bool", - "label": config.get_localized_string(70762), - "default": MAGNET2TORRENT, - "enabled": True, - "visible": True - } - ] +# list_controls = [ +# { +# "id": "libtorrent_path", +# "type": "text", +# "label": "Libtorrent path", +# "default": LIBTORRENT_PATH, +# "enabled": True, +# "visible": False +# }, +# { +# "id": "libtorrent_error", +# "type": "text", +# "label": "libtorrent error", +# "default": LIBTORRENT_ERROR, +# "enabled": True, +# "visible": False +# }, +# { +# "id": "list_torrent", +# "type": "list", +# "label": config.get_localized_string(70256), +# "default": default, +# "enabled": True, +# "visible": True, +# "lvalues": torrent_options +# }, +# { +# "id": "mct_buffer", +# "type": "text", +# "label": "MCT - " + config.get_localized_string(70758), +# "default": BUFFER, +# "enabled": True, +# "visible": "eq(-1,%s)" % torrent_options[2] +# }, +# { +# "id": "mct_download_path", +# "type": "text", +# "label": "MCT - " + config.get_localized_string(30017), +# "default": DOWNLOAD_PATH, +# "enabled": True, +# "visible": "eq(-2,%s)" % torrent_options[2] +# }, +# { +# "id": "bt_buffer", +# "type": "text", +# "label": "BT - " + config.get_localized_string(70758), +# "default": BUFFER_BT, +# "enabled": True, +# "visible": "eq(-3,%s)" % torrent_options[1] +# }, +# { +# "id": "bt_download_path", +# "type": "text", +# "label": "BT - " + config.get_localized_string(30017), +# "default": DOWNLOAD_PATH_BT, +# "enabled": True, +# "visible": "eq(-4,%s)" % torrent_options[1] +# }, +# { +# "id": "mct_download_limit", +# "type": "text", +# "label": config.get_localized_string(70759), +# "default": DOWNLOAD_LIMIT, +# "enabled": True, +# "visible": "eq(-5,%s) | eq(-5,%s)" % (torrent_options[1], torrent_options[2]) +# }, +# { +# "id": "mct_rar_unpack", +# "type": "bool", +# "label": config.get_localized_string(70760), +# "default": RAR, +# "enabled": True, +# "visible": True +# }, +# { +# "id": "mct_background_download", +# "type": "bool", +# "label": config.get_localized_string(70761), +# "default": BACKGROUND, +# "enabled": True, +# "visible": True +# }, +# { +# "id": "magnet2torrent", +# "type": "bool", +# "label": config.get_localized_string(70762), +# "default": MAGNET2TORRENT, +# "enabled": True, +# "visible": True +# } +# ] - platformtools.show_channel_settings(list_controls=list_controls, callback='save_setting_torrent', item=item, - caption=config.get_localized_string(70257), custom_button={'visible': False}) +# platformtools.show_channel_settings(list_controls=list_controls, callback='save_setting_torrent', item=item, +# caption=config.get_localized_string(70257), custom_button={'visible': False}) -def save_setting_torrent(item, dict_data_saved): - if dict_data_saved and "list_torrent" in dict_data_saved: - config.set_setting("torrent_client", dict_data_saved["list_torrent"], server="torrent") - if dict_data_saved and "mct_buffer" in dict_data_saved: - config.set_setting("mct_buffer", dict_data_saved["mct_buffer"], server="torrent") - if dict_data_saved and "mct_download_path" in dict_data_saved: - config.set_setting("mct_download_path", dict_data_saved["mct_download_path"], server="torrent") - if dict_data_saved and "mct_background_download" in dict_data_saved: - config.set_setting("mct_background_download", dict_data_saved["mct_background_download"], server="torrent") - if dict_data_saved and "mct_rar_unpack" in dict_data_saved: - config.set_setting("mct_rar_unpack", dict_data_saved["mct_rar_unpack"], server="torrent") - if dict_data_saved and "mct_download_limit" in dict_data_saved: - config.set_setting("mct_download_limit", dict_data_saved["mct_download_limit"], server="torrent") - if dict_data_saved and "bt_buffer" in dict_data_saved: - config.set_setting("bt_buffer", dict_data_saved["bt_buffer"], server="torrent") - if dict_data_saved and "bt_download_path" in dict_data_saved: - config.set_setting("bt_download_path", dict_data_saved["bt_download_path"], server="torrent") - if dict_data_saved and "magnet2torrent" in dict_data_saved: - config.set_setting("magnet2torrent", dict_data_saved["magnet2torrent"], server="torrent") +# def save_setting_torrent(item, dict_data_saved): +# if dict_data_saved and "list_torrent" in dict_data_saved: +# config.set_setting("torrent_client", dict_data_saved["list_torrent"], server="torrent") +# if dict_data_saved and "mct_buffer" in dict_data_saved: +# config.set_setting("mct_buffer", dict_data_saved["mct_buffer"], server="torrent") +# if dict_data_saved and "mct_download_path" in dict_data_saved: +# config.set_setting("mct_download_path", dict_data_saved["mct_download_path"], server="torrent") +# if dict_data_saved and "mct_background_download" in dict_data_saved: +# config.set_setting("mct_background_download", dict_data_saved["mct_background_download"], server="torrent") +# if dict_data_saved and "mct_rar_unpack" in dict_data_saved: +# config.set_setting("mct_rar_unpack", dict_data_saved["mct_rar_unpack"], server="torrent") +# if dict_data_saved and "mct_download_limit" in dict_data_saved: +# config.set_setting("mct_download_limit", dict_data_saved["mct_download_limit"], server="torrent") +# if dict_data_saved and "bt_buffer" in dict_data_saved: +# config.set_setting("bt_buffer", dict_data_saved["bt_buffer"], server="torrent") +# if dict_data_saved and "bt_download_path" in dict_data_saved: +# config.set_setting("bt_download_path", dict_data_saved["bt_download_path"], server="torrent") +# if dict_data_saved and "magnet2torrent" in dict_data_saved: +# config.set_setting("magnet2torrent", dict_data_saved["magnet2torrent"], server="torrent") def menu_servers(item): logger.info() @@ -468,16 +468,16 @@ def check_quickfixes(item): return False -def update_quasar(item): - logger.info() +# def update_quasar(item): +# logger.info() - from platformcode import custom_code, platformtools - stat = False - stat = custom_code.update_external_addon("quasar") - if stat: - platformtools.dialog_notification("Actualización Quasar", "Realizada con éxito") - else: - platformtools.dialog_notification("Actualización Quasar", "Ha fallado. Consulte el log") +# from platformcode import custom_code, platformtools +# stat = False +# stat = custom_code.update_external_addon("quasar") +# if stat: +# platformtools.dialog_notification("Actualización Quasar", "Realizada con éxito") +# else: +# platformtools.dialog_notification("Actualización Quasar", "Ha fallado. Consulte el log") def conf_tools(item): diff --git a/specials/shortcuts.py b/specials/shortcuts.py index b43b6478..32a552d0 100644 --- a/specials/shortcuts.py +++ b/specials/shortcuts.py @@ -8,12 +8,12 @@ def context(): # original # if config.get_setting('quick_menu'): context.append((config.get_localized_string(60360).upper(), "XBMC.RunPlugin(plugin://plugin.video.kod/?%s)" % Item(channel='shortcuts', action="shortcut_menu").tourl())) # if config.get_setting('side_menu'): context.append((config.get_localized_string(70737).upper(), "XBMC.RunPlugin(plugin://plugin.video.kod/?%s)" % Item(channel='shortcuts',action="side_menu").tourl())) - # if config.get_setting('kod_menu'): context.append((config.get_localized_string(30025), "XBMC.RunPlugin(plugin://plugin.video.kod/?%s)" % Item(channel='shortcuts', action="settings_menu").tourl())) + # if config.get_setting('kod_menu'): context.append((config.get_localized_string(60026), "XBMC.RunPlugin(plugin://plugin.video.kod/?%s)" % Item(channel='shortcuts', action="settings_menu").tourl())) # pre-serialised - if config.get_setting('quick_menu'): context.append((config.get_localized_string(60360).upper(), 'XBMC.RunPlugin(plugin://plugin.video.kod/?ewogICAgImFjdGlvbiI6ICJzaG9ydGN1dF9tZW51IiwgCiAgICAiY2hhbm5lbCI6ICJzaG9ydGN1dHMiLCAKICAgICJpbmZvTGFiZWxzIjoge30KfQ%3D%3D)')) - if config.get_setting('side_menu'): context.append((config.get_localized_string(70737).upper(), 'XBMC.RunPlugin(plugin://plugin.video.kod/?ewogICAgImFjdGlvbiI6ICJzaWRlX21lbnUiLCAKICAgICJjaGFubmVsIjogInNob3J0Y3V0cyIsIAogICAgImluZm9MYWJlbHMiOiB7fQp9)')) - if config.get_setting('kod_menu'): context.append((config.get_localized_string(30025), 'XBMC.RunPlugin(plugin://plugin.video.kod/?ewogICAgImFjdGlvbiI6ICJzZXR0aW5nc19tZW51IiwgCiAgICAiY2hhbm5lbCI6ICJzaG9ydGN1dHMiLCAKICAgICJpbmZvTGFiZWxzIjoge30KfQ%3D%3D)')) + if config.get_setting('quick_menu'): context.append((config.get_localized_string(60360), 'XBMC.RunPlugin(plugin://plugin.video.kod/?ewogICAgImFjdGlvbiI6ICJzaG9ydGN1dF9tZW51IiwgCiAgICAiY2hhbm5lbCI6ICJzaG9ydGN1dHMiLCAKICAgICJpbmZvTGFiZWxzIjoge30KfQ%3D%3D)')) + if config.get_setting('side_menu'): context.append((config.get_localized_string(70737), 'XBMC.RunPlugin(plugin://plugin.video.kod/?ewogICAgImFjdGlvbiI6ICJzaWRlX21lbnUiLCAKICAgICJjaGFubmVsIjogInNob3J0Y3V0cyIsIAogICAgImluZm9MYWJlbHMiOiB7fQp9)')) + if config.get_setting('kod_menu'): context.append((config.get_localized_string(60026), 'XBMC.RunPlugin(plugin://plugin.video.kod/?ewogICAgImFjdGlvbiI6ICJzZXR0aW5nc19tZW51IiwgCiAgICAiY2hhbm5lbCI6ICJzaG9ydGN1dHMiLCAKICAgICJpbmZvTGFiZWxzIjoge30KfQ%3D%3D)')) return context @@ -29,36 +29,6 @@ def settings_menu(item): from platformcode import config config.open_settings() -def view_mode(item): - logger.info(str(item)) - import xbmc - from core import filetools, jsontools - from core.support import typo - from platformcode import config, platformtools - - skin_name = xbmc.getSkinDir() - config.set_setting('skin_name', skin_name) - - path = filetools.join(config.get_runtime_path(), 'resources', 'views', skin_name + '.json') - if filetools.isfile(path): - json_file = open(path, "r").read() - json = jsontools.load(json_file) - - Type = 'addon'if item.type in ['channel', 'server'] else item.type - skin = json[Type] - - list_type = [] - for key in skin: - list_type.append(key) - list_type.sort() - list_type.insert(0, config.get_localized_string(70003)) - - select = platformtools.dialog_select(config.get_localized_string(70754), list_type) - value = list_type[select] + ' , ' + str(skin[list_type[select]] if list_type[select] in skin else 0) - config.set_setting('view_mode_%s' % item.type, value) - else: - platformtools.dialog_ok(config.get_localized_string(30141), config.get_localized_string(30142) % typo(skin_name.replace('skin.','').replace('.',' '), 'capitalize bold')) - def servers_menu(item): # from core.support import dbg; dbg() from core import servertools @@ -177,5 +147,5 @@ def select(item): else: values = item.values.split('|') - select = platformtools.dialog_select(label, values) + select = platformtools.dialog_select(label, values, config.get_setting(item.id)) config.set_setting(item.id, values[select]) \ No newline at end of file diff --git a/specials/trailertools.py b/specials/trailertools.py index 683ae7ca..7db64695 100644 --- a/specials/trailertools.py +++ b/specials/trailertools.py @@ -64,7 +64,7 @@ def buscartrailer(item, trailers=[]): itemlist = [] if item.search_title: - item.contentTitle = item.search_title + item.contentTitle = urllib.unquote_plus(item.search_title) elif item.contentTitle != "": item.contentTitle = item.contentTitle.strip() elif keyboard: diff --git a/specials/tvmoviedb.json b/specials/tvmoviedb.json index 7a97a93c..250e4adc 100644 --- a/specials/tvmoviedb.json +++ b/specials/tvmoviedb.json @@ -2,7 +2,6 @@ "id": "tvmoviedb", "name": "TvMovieDB", "active": false, - "adult": false, "language": ["*"], "thumbnail": "http://i.imgur.com/HA7fvgD.png", "categories": [ diff --git a/specials/url.json b/specials/url.json index 16b4a1b4..693984be 100644 --- a/specials/url.json +++ b/specials/url.json @@ -2,7 +2,6 @@ "id": "url", "name": "URL", "active": false, - "adult": false, "thumbnail": "url.png", "banner": "url.png" } \ No newline at end of file diff --git a/specials/videolibrary.json b/specials/videolibrary.json index 89e04bc2..20d3056f 100644 --- a/specials/videolibrary.json +++ b/specials/videolibrary.json @@ -2,9 +2,78 @@ "id": "videolibrary", "name": "Videoteca", "active": false, - "adult": false, "language": ["*"], "settings": [ + { + "id": "update", + "type": "list", + "label": "@60601", + "default": 1, + "visible": true, + "lvalues": [ + "@60602", + "@60603", + "@60604", + "@60605" + ] + }, + { + "id": "update_wait", + "type": "list", + "label": "@60606", + "default": 0, + "enabled": "eq(-1,@60603)|eq(-1,@60605)", + "lvalues": [ + "No", + "@60609", + "@60610", + "@60611", + "@60612" + ] + }, + { + "id": "everyday_delay", + "type": "list", + "label": "@60613", + "default": 1, + "enabled": "eq(-2,@60604)|eq(-2,@60605)", + "lvalues": [ + "00:00", + "04:00", + "08:00", + "12:00", + "16:00", + "20:00" + ] + }, + { + "id": "updatetvshows_interval", + "type": "list", + "label": "@60614", + "default": 0, + "enabled": "!eq(-3,@60615)", + "lvalues": [ + "@60616", + "@60617" + ] + }, + { + "id": "search_new_content", + "type": "list", + "label": "@60618", + "default": 0, + "enabled": "!eq(-4,@60615)", + "lvalues": [ + "@60619", + "@60620" + ] + }, + { + "id": "local_episodes", + "type": "bool", + "label": "@80042", + "default": false + }, { "id": "lab_1", "type": "label", @@ -56,73 +125,6 @@ "pt" ] }, - { - "id": "update", - "type": "list", - "label": "@60601", - "default": 1, - "visible": true, - "lvalues": [ - "@60602", - "@60603", - "@60604", - "@60605" - ] - }, - { - "id": "update_wait", - "type": "list", - "label": "@60606", - "default": 0, - "visible": true, - "enabled": "eq(-1,@60603)|eq(-1,@60605)", - "lvalues": [ - "No", - "@60609", - "@60610", - "@60611", - "@60612" - ] - }, - { - "id": "everyday_delay", - "type": "list", - "label": "@60613", - "default": 1, - "visible": true, - "enabled": "eq(-2,@60604)|eq(-2,@60605)", - "lvalues": [ - "00:00", - "04:00", - "08:00", - "12:00", - "16:00", - "20:00" - ] - }, - { - "id": "updatetvshows_interval", - "type": "list", - "label": "@60614", - "default": 0, - "visible": true, - "enabled": "!eq(-3,@60615)", - "lvalues": [ - "@60616", - "@60617" - ] - }, - { - "id": "search_new_content", - "type": "list", - "label": "@60618", - "default": 0, - "enabled": "!eq(-4,@60615)", - "lvalues": [ - "@60619", - "@60620" - ] - }, { "id": "window_type", "type": "list", diff --git a/specials/videolibrary.py b/specials/videolibrary.py index dfb16983..2e56cf85 100644 --- a/specials/videolibrary.py +++ b/specials/videolibrary.py @@ -32,7 +32,7 @@ def mainlist(item): category=config.get_localized_string(70271), thumbnail=get_thumb("videolibrary_tvshow.png"))) itemlist.append(Item(channel='shortcuts', action="SettingOnPosition", - category=2, setting=0, title=typo(config.get_localized_string(70287),'bold color kod'), + category=2, setting=1, title=typo(config.get_localized_string(70287),'bold color kod'), thumbnail = get_thumb("setting_0.png"))) return itemlist @@ -306,6 +306,14 @@ def list_tvshows(item): {"title": config.get_localized_string(70269), "action": "update_tvshow", "channel": "videolibrary"}] + if item_tvshow.local_episodes_path == "": + item_tvshow.context.append({"title": config.get_localized_string(80048), + "action": "add_local_episodes", + "channel": "videolibrary"}) + else: + item_tvshow.context.append({"title": config.get_localized_string(80049), + "action": "remove_local_episodes", + "channel": "videolibrary"}) # ,{"title": "Cambiar contenido (PENDIENTE)", # "action": "", # "channel": "videolibrary"}] @@ -322,7 +330,7 @@ def list_tvshows(item): itemlist = sorted(itemlist, key=lambda it: it.title.lower()) itemlist.append(Item(channel=item.channel, action="update_videolibrary", thumbnail=item.thumbnail, - title=config.get_localized_string(60026), folder=False)) + title=typo(config.get_localized_string(70269), 'bold color kod'), folder=False)) return itemlist @@ -468,7 +476,7 @@ def findvideos(item): item_local = None # Desactiva autoplay - autoplay.set_status(False) + # autoplay.set_status(False) if not item.contentTitle or not item.strm_path: logger.debug("Unable to search for videos due to lack of parameters") @@ -587,7 +595,7 @@ def findvideos(item): list_servers = servertools.filter_servers(list_servers) elif item_json.action == 'play': from platformcode import platformtools - autoplay.set_status(True) + # autoplay.set_status(True) item_json.contentChannel = item_json.channel item_json.channel = "videolibrary" platformtools.play_video(item_json) @@ -629,7 +637,8 @@ def findvideos(item): itemlist.append(server) # return sorted(itemlist, key=lambda it: it.title.lower()) - autoplay.play_multi_channel(item, itemlist) + if autoplay.play_multi_channel(item, itemlist): # hideserver + return [] from inspect import stack from specials import nextep if nextep.check(item) and stack()[1][3] == 'run': @@ -678,7 +687,7 @@ def play(item): return itemlist -def update_videolibrary(item): +def update_videolibrary(item=''): logger.info() # Actualizar las series activas sobreescribiendo @@ -767,17 +776,15 @@ def delete_videolibrary(item): p_dialog = platformtools.dialog_progress_bg(config.get_localized_string(20000), config.get_localized_string(80038)) p_dialog.update(0) - filetools.rmdirtree(videolibrarytools.MOVIES_PATH) - p_dialog.update(40) - filetools.rmdirtree(videolibrarytools.TVSHOWS_PATH) - p_dialog.update(80) if config.is_xbmc() and config.get_setting("videolibrary_kodi"): from platformcode import xbmc_videolibrary - strm_list = [] - strm_list.append(config.get_setting('videolibrarypath')) - xbmc_videolibrary.clean(strm_list) - + xbmc_videolibrary.clean() + p_dialog.update(10) + filetools.rmdirtree(videolibrarytools.MOVIES_PATH) + p_dialog.update(50) + filetools.rmdirtree(videolibrarytools.TVSHOWS_PATH) p_dialog.update(90) + config.verify_directories_created() p_dialog.update(100) xbmc.sleep(1000) @@ -795,12 +802,59 @@ def update_tvshow(item): p_dialog.update(0, heading, item.contentSerieName) import service - if service.update(item.path, p_dialog, 1, 1, item, False) and config.is_xbmc() and config.get_setting("videolibrary_kodi"): + if service.update(item.path, p_dialog, 0, 100, item, False) and config.is_xbmc() and config.get_setting("videolibrary_kodi"): from platformcode import xbmc_videolibrary xbmc_videolibrary.update(folder=filetools.basename(item.path)) p_dialog.close() + # check if the TV show is ended or has been canceled and ask the user to remove it from the video library update + nfo_path = filetools.join(item.path, "tvshow.nfo") + head_nfo, item_nfo = videolibrarytools.read_nfo(nfo_path) + if item.active and not item_nfo.active: + if not platformtools.dialog_yesno(config.get_localized_string(60037).replace('...',''), config.get_localized_string(70268) % item.contentSerieName): + item_nfo.active = 1 + filetools.write(nfo_path, head_nfo + item_nfo.tojson()) + + platformtools.itemlist_refresh() + + +def add_local_episodes(item): + logger.info() + + done, local_episodes_path = videolibrarytools.config_local_episodes_path(item.path, item.contentSerieName, silent=True) + if done < 0: + logger.info("An issue has occurred while configuring local episodes") + elif local_episodes_path: + nfo_path = filetools.join(item.path, "tvshow.nfo") + head_nfo, item_nfo = videolibrarytools.read_nfo(nfo_path) + item_nfo.local_episodes_path = local_episodes_path + if not item_nfo.active: + item_nfo.active = 1 + filetools.write(nfo_path, head_nfo + item_nfo.tojson()) + + update_tvshow(item) + + platformtools.itemlist_refresh() + + +def remove_local_episodes(item): + logger.info() + + nfo_path = filetools.join(item.path, "tvshow.nfo") + head_nfo, item_nfo = videolibrarytools.read_nfo(nfo_path) + + for season_episode in item_nfo.local_episodes_list: + filetools.remove(filetools.join(item.path, season_episode + '.strm')) + + item_nfo.local_episodes_list = [] + item_nfo.local_episodes_path = '' + filetools.write(nfo_path, head_nfo + item_nfo.tojson()) + + update_tvshow(item) + + platformtools.itemlist_refresh() + def verify_playcount_series(item, path): logger.info() @@ -1024,22 +1078,24 @@ def delete(item): for file in filetools.listdir(_item.path): if file.endswith(".strm") or file.endswith(".nfo") or file.endswith(".json")or file.endswith(".torrent"): filetools.remove(filetools.join(_item.path, file)) - raiz, carpeta_serie, ficheros = next(filetools.walk(_item.path)) - if ficheros == []: - filetools.rmdir(_item.path) + + if _item.contentType == 'movie': + heading = config.get_localized_string(70084) else: - if _item.contentType == 'movie': - heading = config.get_localized_string(70084) - else: - heading = config.get_localized_string(70085) - if platformtools.dialog_yesno(heading, config.get_localized_string(70081)): - filetools.rmdirtree(_item.path) + heading = config.get_localized_string(70085) if config.is_xbmc() and config.get_setting("videolibrary_kodi"): from platformcode import xbmc_videolibrary - strm_list = [] - strm_list.append(_item.extra) - xbmc_videolibrary.clean(strm_list) + if _item.local_episodes_path: + platformtools.dialog_ok(heading, config.get_localized_string(80047) % _item.infoLabels['title']) + path_list = [_item.extra] + xbmc_videolibrary.clean(path_list) + + raiz, carpeta_serie, ficheros = next(filetools.walk(_item.path)) + if ficheros == []: + filetools.rmdir(_item.path) + elif platformtools.dialog_yesno(heading, config.get_localized_string(70081) % os.path.basename(_item.path)): + filetools.rmdirtree(_item.path) logger.info("All links removed") xbmc.sleep(1000) @@ -1067,8 +1123,8 @@ def delete(item): if index == 0: # Seleccionado Eliminar pelicula/serie - canal = None delete_all(item) + return elif index > 0: # Seleccionado Eliminar canal X @@ -1079,41 +1135,43 @@ def delete(item): else: canal = item.dead - if canal: - num_enlaces = 0 - strm_list = [] - for fd in filetools.listdir(item.path): - if fd.endswith(canal + '].json') or scrapertools.find_single_match(fd, '%s]_\d+.torrent' % canal): - if filetools.remove(filetools.join(item.path, fd)): - num_enlaces += 1 - # Remove strm and nfo if no other channel - episode = fd.replace(' [' + canal + '].json', '') - found_ch = False - for ch in channels: - if filetools.exists(filetools.join(item.path, episode + ' [' + ch + '].json')): - found_ch = True - break - if found_ch == False: - filetools.remove(filetools.join(item.path, episode + '.nfo')) - filetools.remove(filetools.join(item.path, episode + '.strm')) - strm_list.append(filetools.join(item.extra, episode + '.strm')) + num_enlaces = 0 + path_list = [] + for fd in filetools.listdir(item.path): + if fd.endswith(canal + '].json') or scrapertools.find_single_match(fd, '%s]_\d+.torrent' % canal): + if filetools.remove(filetools.join(item.path, fd)): + num_enlaces += 1 + # Remove strm and nfo if no other channel + episode = fd.replace(' [' + canal + '].json', '') + found_ch = False + for ch in channels: + if filetools.exists(filetools.join(item.path, episode + ' [' + ch + '].json')): + found_ch = True + break + if found_ch == False: + filetools.remove(filetools.join(item.path, episode + '.nfo')) + strm_path = filetools.join(item.path, episode + '.strm') + # if it is a local episode, do not delete the strm + if 'plugin://plugin.video.kod/?' in filetools.read(strm_path): + filetools.remove(strm_path) + path_list.append(filetools.join(item.extra, episode + '.strm')) - if config.is_xbmc() and config.get_setting("videolibrary_kodi") and strm_list: - from platformcode import xbmc_videolibrary - xbmc_videolibrary.clean(strm_list) + if config.is_xbmc() and config.get_setting("videolibrary_kodi") and path_list: + from platformcode import xbmc_videolibrary + xbmc_videolibrary.clean(path_list) - if num_enlaces > 0: - # Actualizar .nfo - head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo) - del item_nfo.library_urls[canal] - if item_nfo.emergency_urls and item_nfo.emergency_urls.get(canal, False): - del item_nfo.emergency_urls[canal] - filetools.write(item.nfo, head_nfo + item_nfo.tojson()) + if num_enlaces > 0: + # Actualizar .nfo + head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo) + del item_nfo.library_urls[canal] + if item_nfo.emergency_urls and item_nfo.emergency_urls.get(canal, False): + del item_nfo.emergency_urls[canal] + filetools.write(item.nfo, head_nfo + item_nfo.tojson()) - msg_txt = config.get_localized_string(70087) % (num_enlaces, canal) - logger.info(msg_txt) - platformtools.dialog_notification(heading, msg_txt) - platformtools.itemlist_refresh() + msg_txt = config.get_localized_string(70087) % (num_enlaces, canal) + logger.info(msg_txt) + platformtools.dialog_notification(heading, msg_txt) + platformtools.itemlist_refresh() else: if platformtools.dialog_yesno(heading, config.get_localized_string(70088) % item.infoLabels['title']):