KoD 0.7
- nuovo metodo di override DNS - aggiunta opzione nascondi server, se usi l'autoplay - migliorie al codice e fix vari
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
<addon id="plugin.video.kod" name="Kodi on Demand" provider-name="KOD Team" version="0.6">
|
||||
<addon id="plugin.video.kod" name="Kodi on Demand" provider-name="KOD Team" version="0.7">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,9 +19,9 @@
|
||||
<screenshot>resources/media/themes/ss/2.png</screenshot>
|
||||
<screenshot>resources/media/themes/ss/3.png</screenshot>
|
||||
</assets>
|
||||
<news>-Nuova ricerca globale
|
||||
-migliorie prestazionali in generale
|
||||
-fix vari ai server</news>
|
||||
<news>- nuovo metodo di override DNS
|
||||
- aggiunta opzione nascondi server, se usi l'autoplay
|
||||
- migliorie al codice e fix vari</news>
|
||||
<description lang="it">Naviga velocemente sul web e guarda i contenuti presenti</description>
|
||||
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]
|
||||
[COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR]</disclaimer>
|
||||
|
||||
@@ -1,34 +1,31 @@
|
||||
{
|
||||
"altadefinizione01": "https://www.altadefinizione01.cc",
|
||||
"altadefinizione01_club": "https://www.altadefinizione01.cc",
|
||||
"altadefinizione01_link": "http://altadefinizione01.cx",
|
||||
"altadefinizioneclick": "https://altadefinizione.cloud",
|
||||
"animeforce": "https://ww1.animeforce.org",
|
||||
"altadefinizione01": "https://www.altadefinizione01.tel",
|
||||
"altadefinizione01_link": "https://altadefinizione01.date",
|
||||
"animeforce": "https://ww1.animeforce.org",
|
||||
"animeleggendari": "https://animepertutti.com",
|
||||
"animespace": "http://www.animespace.tv",
|
||||
"animespace": "https://animespace.tv",
|
||||
"animestream": "https://www.animeworld.it",
|
||||
"animesubita": "http://www.animesubita.org",
|
||||
"animetubeita": "http://www.animetubeita.com",
|
||||
"animeworld": "https://www1.animeworld.tv",
|
||||
"casacinema": "https://www.casacinema.uno",
|
||||
"casacinemainfo": "https://www.casacinema.info",
|
||||
"casacinema": "https://www.casacinema.cloud",
|
||||
"casacinemaInfo": "https://casacinema.space",
|
||||
"cb01anime": "https://www.cineblog01.ink",
|
||||
"cinemalibero": "https://www.cinemalibero.live",
|
||||
"cinetecadibologna" : "http://cinestore.cinetecadibologna.it",
|
||||
"documentaristreamingda": "https://documentari-streaming-da.com",
|
||||
"dreamsub": "https://www.dreamsub.stream",
|
||||
"eurostreaming": "https://eurostreaming.pink",
|
||||
"dreamsub": "https://www.dreamsub.stream",
|
||||
"fastsubita": "https://fastsubita.com",
|
||||
"filmgratis": "https://www.filmaltadefinizione.net",
|
||||
"filmgratis": "https://www.filmaltadefinizione.org",
|
||||
"filmigratis": "https://filmigratis.org",
|
||||
"filmpertutti": "https://www.filmpertutti.gratis",
|
||||
"filmpertutti": "https://www.filmpertutti.casa",
|
||||
"filmsenzalimiticc": "https://www.filmsenzalimiti.monster",
|
||||
"filmstreaming01": "https://filmstreaming01.com",
|
||||
"guardarefilm": "https://www.guardarefilm.red",
|
||||
"guardaserie_stream": "https://guardaserie.co",
|
||||
"guardaserieclick": "https://www.guardaserie.media",
|
||||
"ilgeniodellostreaming": "https://igds.red",
|
||||
"italiafilm": "https://www.italia-film.video",
|
||||
"italiaserie": "https://italiaserie.org",
|
||||
"italiaserie": "https://italiaserie.org",
|
||||
"mondoserietv": "https://mondoserietv.com",
|
||||
"netfreex": "https://www.netfreex.online",
|
||||
"piratestreaming": "https://www.piratestreaming.gratis",
|
||||
|
||||
@@ -48,7 +48,7 @@ from core import support
|
||||
from platformcode import config
|
||||
|
||||
# in caso di necessità
|
||||
#from core import scrapertoolsV2, httptools, servertools, tmdb
|
||||
#from core import scrapertools, httptools, servertools, tmdb
|
||||
from core.item import Item # per newest
|
||||
#from lib import unshortenit
|
||||
|
||||
@@ -74,10 +74,9 @@ def findhost():
|
||||
permUrl = httptools.downloadpage('INSERIRE-URL-QUI', follow_redirects=False).headers
|
||||
host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
# cancellare host non utilizzato
|
||||
host = scrapertoolsV2.find_single_match(permUrl, r'<div class="elementor-button-wrapper"> <a href="([^"]+)"')
|
||||
host = scrapertools.find_single_match(permUrl, r'<div class="elementor-button-wrapper"> <a href="([^"]+)"')
|
||||
headers = [['Referer', host]]
|
||||
|
||||
findhost() # così le imposta una volta per tutte
|
||||
# così le imposta una volta per tutte
|
||||
### fine findhost
|
||||
|
||||
# server di esempio...
|
||||
@@ -221,7 +220,7 @@ def select(item):
|
||||
# pulizia di data, in caso commentare le prossime 2 righe
|
||||
data = re.sub('\n|\t', ' ', data)
|
||||
data = re.sub(r'>\s+<', '> <', data)
|
||||
block = scrapertoolsV2.find_single_match(data, r'')
|
||||
block = scrapertools.find_single_match(data, r'')
|
||||
if re.findall('', data, re.IGNORECASE):
|
||||
support.log('select = ### è una serie ###')
|
||||
return episodios(Item(channel=item.channel,
|
||||
|
||||
@@ -13,27 +13,27 @@
|
||||
Ulteriori info:
|
||||
|
||||
"""
|
||||
from core import scrapertoolsV2, httptools, support
|
||||
from core import scrapertools, httptools, support
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
#impostati dinamicamente da findhost()
|
||||
host = "https://www.altadefinizione01.cc"
|
||||
headers = ""
|
||||
|
||||
def findhost():
|
||||
pass
|
||||
# global host, headers
|
||||
# data = httptools.downloadpage('https://altadefinizione01-nuovo.link/').data
|
||||
# host = scrapertoolsV2.find_single_match(data, '<div class="elementor-button-wrapper"> <a href="([^"]+)"')
|
||||
# headers = [['Referer', host]]
|
||||
data = httptools.downloadpage('https://altadefinizione01-nuovo.link/').data
|
||||
host = scrapertools.find_single_match(data, '<div class="elementor-button-wrapper"> <a href="([^"]+)"')
|
||||
return host
|
||||
|
||||
|
||||
host = config.get_channel_url(findhost)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['verystream','openload','rapidvideo','streamango']
|
||||
list_quality = ['default']
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
findhost()
|
||||
|
||||
film = [
|
||||
('Al Cinema', ['/cinema/', 'peliculas', 'pellicola']),
|
||||
('Ultimi Aggiornati-Aggiunti', ['','peliculas', 'update']),
|
||||
@@ -48,7 +48,7 @@ def mainlist(item):
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
support.log('peliculas',item)
|
||||
findhost()
|
||||
|
||||
## deflang = 'ITA'
|
||||
action="findvideos"
|
||||
|
||||
@@ -106,7 +106,7 @@ def orderalf(item):
|
||||
|
||||
def search(item, text):
|
||||
support.log(item, text)
|
||||
findhost()
|
||||
|
||||
|
||||
itemlist = []
|
||||
text = text.replace(" ", "+")
|
||||
@@ -123,7 +123,7 @@ def search(item, text):
|
||||
|
||||
def newest(categoria):
|
||||
support.log(categoria)
|
||||
findhost()
|
||||
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
|
||||
@@ -23,7 +23,7 @@ from platformcode import config, logger
|
||||
__channel__ = "altadefinizione01_link"
|
||||
|
||||
# ======== def per utility INIZIO ============================
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
|
||||
|
||||
@@ -21,9 +21,12 @@ from core import support
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
|
||||
__channel__ = 'altadefinizioneclick'
|
||||
def findhost():
|
||||
data = support.httptools.downloadpage('https://altadefinizione-nuovo.link/').data
|
||||
host = support.scrapertools.find_single_match(data, '<div class="elementor-button-wrapper"> <a href="([^"]+)"')
|
||||
return host
|
||||
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url(findhost)
|
||||
headers = [['Referer', host]]
|
||||
list_servers = ['verystream', 'rapidvideo', 'openload', 'streamango', 'vidoza',
|
||||
'vidcloud', 'thevideo', 'okru', 'hdload', 'youtube']
|
||||
|
||||
@@ -6,8 +6,7 @@
|
||||
from servers.decrypters import adfly
|
||||
from core import support
|
||||
|
||||
__channel__ = "animeforce"
|
||||
host = support.config.get_channel_url(__channel__)
|
||||
host = support.config.get_channel_url()
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
@@ -82,8 +81,8 @@ def peliculas(item):
|
||||
if item.args == 'newest':
|
||||
url = support.match(item, '<a href="([^"]+)" title="[^"]+" target="[^"]+" class="btn', headers=headers)[0]
|
||||
item.url = url[0] if url else ''
|
||||
delete = support.scrapertoolsV2.find_single_match(item.fulltitle, r'( Episodi.*)')
|
||||
episode = support.scrapertoolsV2.find_single_match(item.title, r'Episodi(?:o)? (?:\d+÷)?(\d+)')
|
||||
delete = support.scrapertools.find_single_match(item.fulltitle, r'( Episodi.*)')
|
||||
episode = support.scrapertools.find_single_match(item.title, r'Episodi(?:o)? (?:\d+÷)?(\d+)')
|
||||
item.title = support.typo(episode + ' - ','bold') + item.title.replace(delete,'')
|
||||
item.fulltitle = item.show = item.title.replace(delete,'')
|
||||
item.episode = episode
|
||||
|
||||
@@ -6,8 +6,7 @@
|
||||
from core import support
|
||||
from lib.js2py.host import jsfunctions
|
||||
|
||||
__channel__ = "animeleggendari"
|
||||
host = support.config.get_channel_url(__channel__)
|
||||
host = support.config.get_channel_url()
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
@@ -15,8 +15,7 @@ from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
from specials import renumbertools
|
||||
|
||||
__channel__ = "animespace"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
|
||||
checklinks = config.get_setting('checklinks', 'animespace')
|
||||
checklinks_number = config.get_setting('checklinks_number', 'animespace')
|
||||
|
||||
@@ -6,8 +6,7 @@
|
||||
|
||||
from core import support
|
||||
|
||||
__channel__ = "animesubita"
|
||||
host = support.config.get_channel_url(__channel__)
|
||||
host = support.config.get_channel_url()
|
||||
headers = {'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
|
||||
|
||||
list_servers = ['directo']
|
||||
|
||||
@@ -6,8 +6,7 @@ import re
|
||||
import urllib
|
||||
from core import support
|
||||
|
||||
__channel__ = "animetubeita"
|
||||
host = support.config.get_channel_url(__channel__)
|
||||
host = support.config.get_channel_url()
|
||||
|
||||
headers = {'Upgrade-Insecure-Requests': '1',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
|
||||
@@ -119,8 +118,8 @@ def findvideos(item):
|
||||
headers['Referer'] = item.url
|
||||
headers['Cookie'] = cookies[:-1]
|
||||
|
||||
url = support.scrapertoolsV2.find_single_match(data, """<source src="([^"]+)" type='video/mp4'>""")
|
||||
if not url: url = support.scrapertoolsV2.find_single_match(data, 'file: "([^"]+)"')
|
||||
url = support.scrapertools.find_single_match(data, """<source src="([^"]+)" type='video/mp4'>""")
|
||||
if not url: url = support.scrapertools.find_single_match(data, 'file: "([^"]+)"')
|
||||
if url:
|
||||
url += '|' + urllib.urlencode(headers)
|
||||
itemlist.append(
|
||||
|
||||
@@ -5,10 +5,11 @@
|
||||
|
||||
from core import support, jsontools
|
||||
|
||||
__channel__ = "animeworld"
|
||||
host = support.config.get_channel_url(__channel__)
|
||||
host = support.config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
__channel__ = 'animeworld'
|
||||
|
||||
list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo']
|
||||
list_quality = ['default', '480p', '720p', '1080p']
|
||||
|
||||
@@ -146,9 +147,9 @@ def findvideos(item):
|
||||
videoData = ''
|
||||
|
||||
for serverid in matches:
|
||||
if not item.number: item.number = support.scrapertoolsV2.find_single_match(item.title,r'(\d+) -')
|
||||
block = support.scrapertoolsV2.find_multiple_matches(data,'data-id="' + serverid + '">(.*?)<div class="server')
|
||||
ID = support.scrapertoolsV2.find_single_match(str(block),r'<a data-id="([^"]+)" data-base="' + (item.number if item.number else '1') + '"')
|
||||
if not item.number: item.number = support.scrapertools.find_single_match(item.title, r'(\d+) -')
|
||||
block = support.scrapertools.find_multiple_matches(data, 'data-id="' + serverid + '">(.*?)<div class="server')
|
||||
ID = support.scrapertools.find_single_match(str(block), r'<a data-id="([^"]+)" data-base="' + (item.number if item.number else '1') + '"')
|
||||
support.log('ID= ',serverid)
|
||||
if id:
|
||||
if serverid == '26':
|
||||
|
||||
@@ -24,13 +24,12 @@ from core import support
|
||||
from platformcode import config
|
||||
|
||||
# in caso di necessità
|
||||
from core import scrapertoolsV2, httptools
|
||||
from core import scrapertools, httptools
|
||||
from core.item import Item
|
||||
|
||||
|
||||
##### fine import
|
||||
__channel__ = "casacinema"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['verystream', 'openload', 'wstream', 'speedvideo']
|
||||
@@ -125,7 +124,7 @@ def select(item):
|
||||
data = re.sub('\n|\t', ' ', data)
|
||||
data = re.sub(r'>\s+<', '> <', data)
|
||||
if 'continua con il video' in data.lower():
|
||||
## block = scrapertoolsV2.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<div style="margin-left: 0.5%; color: #FFF;">')
|
||||
## block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<div style="margin-left: 0.5%; color: #FFF;">')
|
||||
## if re.findall('rel="category tag">serie', data, re.IGNORECASE):
|
||||
support.log('select = ### è un film ###')
|
||||
return findvideos(Item(channel=item.channel,
|
||||
|
||||
@@ -19,21 +19,19 @@
|
||||
"""
|
||||
|
||||
from core import support
|
||||
from core import scrapertoolsV2, httptools
|
||||
from core import scrapertools, httptools
|
||||
from core.item import Item
|
||||
|
||||
host = "https://casacinema.stream"
|
||||
headers = ""
|
||||
|
||||
def findhost():
|
||||
pass
|
||||
# global host, headers
|
||||
# data = httptools.downloadpage('https://casacinema.nuovo.link').data
|
||||
# host = scrapertoolsV2.find_single_match(data, r'<div class="elementor-widget-container"><div class="elementor-button-wrapper"> <a href="([^"]+)"')
|
||||
# headers = [['Referer', host]]
|
||||
# if host.endswith('/'):
|
||||
# host = host[:-1]
|
||||
# def findhost():
|
||||
# data = httptools.downloadpage('https://casacinema.nuovo.link').data
|
||||
# host = scrapertools.find_single_match(data, r'<div class="elementor-widget-container"><div class="elementor-button-wrapper"> <a href="([^"]+)"')
|
||||
# if host.endswith('/'):
|
||||
# host = host[:-1]
|
||||
# return host
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
|
||||
list_quality = ['default', 'HD', '3D', '4K', 'DVD', 'SD']
|
||||
@@ -41,7 +39,7 @@ list_quality = ['default', 'HD', '3D', '4K', 'DVD', 'SD']
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
support.log(item)
|
||||
findhost()
|
||||
|
||||
film = ['',
|
||||
('Al Cinema', ['/category/in-sala/', 'peliculas', '']),
|
||||
('Novità', ['/category/nuove-uscite/', 'peliculas', '']),
|
||||
@@ -69,7 +67,7 @@ def peliculas(item):
|
||||
patronNext = '<a href="([^"]+)"\s+?><i class="glyphicon glyphicon-chevron-right"'
|
||||
|
||||
#support.regexDbg(item, patronBlock, headers)
|
||||
#debug = True
|
||||
# debug = True
|
||||
return locals()
|
||||
|
||||
|
||||
@@ -89,7 +87,7 @@ def genres(item):
|
||||
|
||||
def search(item, text):
|
||||
support.log('search', item)
|
||||
findhost()
|
||||
|
||||
itemlist = []
|
||||
text = text.replace(' ', '+')
|
||||
item.args = 'search'
|
||||
@@ -105,7 +103,7 @@ def search(item, text):
|
||||
|
||||
def newest(categoria):
|
||||
support.log('newest ->', categoria)
|
||||
findhost()
|
||||
|
||||
itemlist = []
|
||||
item = Item()
|
||||
|
||||
|
||||
@@ -6,15 +6,11 @@
|
||||
|
||||
from core import support
|
||||
|
||||
__channel__ = "cb01anime"
|
||||
host = support.config.get_channel_url(__channel__) + '/anime'
|
||||
host = support.config.get_channel_url() + '/anime'
|
||||
|
||||
Blacklist = ['AVVISO IMPORTANTE – CB01.ROCKS', 'Lista Alfabetica Completa Anime/Cartoon', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE','Lista Richieste Up & Re-Up']
|
||||
list_servers = ['verystream', 'openload', 'streamango', 'thevideome']
|
||||
list_quality = ['1080p', '720p', '480p', '360']
|
||||
|
||||
checklinks = support.config.get_setting('checklinks', __channel__)
|
||||
checklinks_number = support.config.get_setting('checklinks_number', __channel__)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
@support.menu
|
||||
|
||||
@@ -5,28 +5,22 @@
|
||||
|
||||
import re
|
||||
|
||||
from core import scrapertoolsV2, httptools, servertools, tmdb, support
|
||||
from core import scrapertools, httptools, servertools, tmdb, support
|
||||
from core.item import Item
|
||||
from lib import unshortenit
|
||||
from platformcode import logger, config
|
||||
|
||||
#impostati dinamicamente da findhost()
|
||||
host = "https://cb01.nl"
|
||||
headers = ""
|
||||
|
||||
|
||||
def findhost():
|
||||
pass
|
||||
# global host, headers
|
||||
# permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers
|
||||
# if 'google' in permUrl['location']:
|
||||
# if host[:4] != 'http':
|
||||
# host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
# else:
|
||||
# host = permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
# else:
|
||||
# host = permUrl['location']
|
||||
# headers = [['Referer', host]]
|
||||
permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers
|
||||
if 'google' in permUrl['location']:
|
||||
host = permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
else:
|
||||
host = permUrl['location']
|
||||
return host
|
||||
|
||||
host = config.get_channel_url(findhost)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['verystream', 'openload', 'streamango', 'wstream']
|
||||
list_quality = ['HD', 'SD', 'default']
|
||||
@@ -37,7 +31,6 @@ checklinks_number = config.get_setting('checklinks_number', 'cineblog01')
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
findhost()
|
||||
film = [
|
||||
('HD', ['', 'menu', 'Film HD Streaming']),
|
||||
('Generi', ['', 'menu', 'Film per Genere']),
|
||||
@@ -60,7 +53,6 @@ def mainlist(item):
|
||||
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
findhost()
|
||||
patronBlock = item.args + r'<span.*?><\/span>.*?<ul.*?>(?P<block>.*?)<\/ul>'
|
||||
patronMenu = r'href="?(?P<url>[^">]+)"?>(?P<title>.*?)<\/a>'
|
||||
action = 'peliculas'
|
||||
@@ -70,7 +62,7 @@ def menu(item):
|
||||
|
||||
# @support.scrape
|
||||
# def newest(categoria):
|
||||
# findhost()
|
||||
#
|
||||
# # debug = True
|
||||
# patron = r'<a href="?(?P<url>[^">]+)"?>(?P<title>[^<([]+)(?:\[(?P<lang>Sub-ITA|B/N|SUB-ITA)\])?\s*(?:\[(?P<quality>HD|SD|HD/3D)\])?\s*\((?P<year>[0-9]{4})\)<\/a>'
|
||||
|
||||
@@ -100,7 +92,7 @@ def menu(item):
|
||||
|
||||
def newest(categoria):
|
||||
support.log(categoria)
|
||||
findhost()
|
||||
|
||||
item = support.Item()
|
||||
try:
|
||||
if categoria == "series":
|
||||
@@ -175,13 +167,13 @@ def episodios(item):
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
findhost()
|
||||
|
||||
|
||||
if item.contentType == "episode":
|
||||
return findvid_serie(item)
|
||||
|
||||
def load_links(itemlist, re_txt, color, desc_txt, quality=""):
|
||||
streaming = scrapertoolsV2.find_single_match(data, re_txt).replace('"', '')
|
||||
streaming = scrapertools.find_single_match(data, re_txt).replace('"', '')
|
||||
support.log('STREAMING',streaming)
|
||||
support.log('STREAMING=', streaming)
|
||||
# patron = '<td><a.*?href=(.*?) (?:target|rel)[^>]+>([^<]+)<'
|
||||
@@ -215,7 +207,7 @@ def findvideos(item):
|
||||
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
|
||||
QualityStr = ""
|
||||
for match in matches:
|
||||
QualityStr = scrapertoolsV2.decodeHtmlentities(match.group(1))[6:]
|
||||
QualityStr = scrapertools.decodeHtmlentities(match.group(1))[6:]
|
||||
|
||||
# Estrae i contenuti - Streaming
|
||||
load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable', "orange", "Streaming", "SD")
|
||||
@@ -315,12 +307,12 @@ def play(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if "window.location.href" in data:
|
||||
try:
|
||||
data = scrapertoolsV2.find_single_match(data, 'window.location.href = "([^"]+)";')
|
||||
data = scrapertools.find_single_match(data, 'window.location.href = "([^"]+)";')
|
||||
except IndexError:
|
||||
data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
|
||||
data, c = unshortenit.unwrap_30x_only(data)
|
||||
else:
|
||||
data = scrapertoolsV2.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
|
||||
data = scrapertools.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
|
||||
|
||||
logger.debug("##### play go.php data ##\n%s\n##" % data)
|
||||
else:
|
||||
|
||||
@@ -5,15 +5,14 @@
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools, support, scrapertoolsV2
|
||||
from core import httptools, support, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
|
||||
list_servers = ['akstream', 'wstream', 'backin', 'clipwatching', 'cloudvideo', 'verystream', 'onlystream', 'mixdrop']
|
||||
list_quality = ['default']
|
||||
|
||||
__channel__ = "cinemalibero"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
@support.menu
|
||||
@@ -90,7 +89,7 @@ def episodios(item):
|
||||
patronBlock = r'<p><strong>(?:.+?[Ss]tagione\s)?(?:(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?.*?</strong>(?P<block>.+?)(?:</span|</p)'
|
||||
item.contentType = 'tvshow'
|
||||
def itemHook(item):
|
||||
if not scrapertoolsV2.find_single_match(item.title, r'(\d+x\d+)'):
|
||||
if not scrapertools.find_single_match(item.title, r'(\d+x\d+)'):
|
||||
item.title = re.sub(r'(\d+) -', '1x\\1', item.title)
|
||||
return item
|
||||
|
||||
@@ -149,7 +148,7 @@ def check(item):
|
||||
support.log()
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
if data:
|
||||
blockAnime = scrapertoolsV2.find_single_match(data, r'<div id="container" class="container">(.+?<div style="margin-left)')
|
||||
blockAnime = scrapertools.find_single_match(data, r'<div id="container" class="container">(.+?<div style="margin-left)')
|
||||
|
||||
if blockAnime and ('episodio' in blockAnime.lower() or 'saga' in blockAnime.lower()):
|
||||
item.contentType = 'tvshow'
|
||||
@@ -157,7 +156,7 @@ def check(item):
|
||||
item.data = blockAnime
|
||||
return episodios(item)
|
||||
|
||||
elif scrapertoolsV2.find_single_match(blockAnime,r'\d+(?:×|×)?\d+\-\d+|\d+(?:×|×)\d+'):
|
||||
elif scrapertools.find_single_match(blockAnime, r'\d+(?:×|×)?\d+\-\d+|\d+(?:×|×)\d+'):
|
||||
item.contentType = 'tvshow'
|
||||
item.data = data
|
||||
return episodios(item)
|
||||
|
||||
@@ -1,36 +1,12 @@
|
||||
{
|
||||
"id": "cinetecadibologna",
|
||||
"name": "Cinetecadibologna",
|
||||
"name": "Cineteca di Bologna",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://cinestore.cinetecadibologna.it/pics/logo.gif",
|
||||
"banner": "http://cinestore.cinetecadibologna.it/pics/logo.gif",
|
||||
"thumbnail": "cinetecadibologna.png",
|
||||
"banner": "cinetecadibologna.png",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Documentari",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
"not_active":["include_in_newest"],
|
||||
"settings": []
|
||||
}
|
||||
|
||||
@@ -12,145 +12,78 @@ from core import httptools, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
host = "http://cinestore.cinetecadibologna.it"
|
||||
from core import support
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
logger.info("kod.cinetecadibologna mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Elenco Film - Cineteca di Bologna[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/video/alfabetico_completo" % host,
|
||||
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Epoche - Cineteca di Bologna[/COLOR]",
|
||||
action="epoche",
|
||||
url="%s/video/epoche" % host,
|
||||
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Percorsi Tematici - Cineteca di Bologna[/COLOR]",
|
||||
action="percorsi",
|
||||
url="%s/video/percorsi" % host,
|
||||
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif")]
|
||||
|
||||
return itemlist
|
||||
film = ['/video/alfabetico_completo',
|
||||
('Anni',['/video/epoche', 'menu']),
|
||||
('Registi',['/video/registi', 'menu']),
|
||||
('Attori',['/video/attori', 'menu']),
|
||||
('Percorsi Tematici',['/video/percorsi','menu'])]
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
action = 'peliculas'
|
||||
if 'epoche' in item.url:
|
||||
patronMenu =r'<li>\s*<a href="(?P<url>[^"]+)">(?P<title>[^>]+)<'
|
||||
elif 'percorsi' in item.url:
|
||||
patron = r'<div class="cover_percorso">\s*<a href="(?P<url>[^"]+)">\s*<img src="(?P<thumb>[^"]+)"[^>]+>\s*[^>]+>(?P<title>.*?)<'
|
||||
else:
|
||||
patron = r'<h2>\s*<a href="(?P<url>[^"]+)"\s*>(?P<title>[^<]+)<'
|
||||
patronNext = r'<div class="dx">\s*<a href="(.*?)">pagina suc'
|
||||
return locals()
|
||||
|
||||
|
||||
def search(item, text):
|
||||
support.log(text)
|
||||
item.args = 'noorder'
|
||||
item.url = host + '/ricerca/type_ALL/ricerca_' + text
|
||||
item.contentType = 'movie'
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
logger.info("kod.cinetecadibologna peliculas")
|
||||
itemlist = []
|
||||
if 'alfabetico' in item.url:
|
||||
patron = r'<img src="(?P<thumb>[^"]+)"[^>]+>\s*[^>]+>\s*<div[^>]+>\s*<div[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>(?:\[)?(?P<title>[^\]<]+)(?:\]|<)'
|
||||
else:
|
||||
if 'type_ALL' in item.url: patronBlock = r'Video:(?P<block>.*?)(?:<div class=""|<!--)'
|
||||
elif not 'NomePersona' in item.url: patronBlock = r'<h3>Film</h3>(?P<block>.*?)<div class="list_wrapper'
|
||||
patron = r'<a href="(?P<url>[^"]+)"\s*class="[^"]+"\s*title="(?:\[)?(?P<title>[^\]"]+)(?:\])?"\s*rel="(?P<thumb>[^"]+)"'
|
||||
patronNext = r'<div class="dx">\s*<a href="(.*?)">pagina suc'
|
||||
return locals()
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<img src="([^"]+)"[^>]+>\s*[^>]+>\s*<div[^>]+>\s*<div[^>]+>[^>]+>\s*<a href="([^"]+)"[^>]+>(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
scrapedurl = host + scrapedurl
|
||||
if not "/video/" in scrapedurl:
|
||||
continue
|
||||
## html = scrapertools.cache_page(scrapedurl)
|
||||
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
start = html.find("Sinossi:")
|
||||
end = html.find('<div class="sx_col">', start)
|
||||
scrapedplot = html[start:end]
|
||||
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle,
|
||||
title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<div class="footerList clearfix">\s*<div class="sx">\s*[^>]+>[^g]+gina[^>]+>\s*[^>]+>\s*<div class="dx">\s*<a href="(.*?)">pagina suc'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url= scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def epoche(item):
|
||||
logger.info("kod.cinetecadibologna categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.find_single_match(data, '<h1 class="pagetitle">Epoche</h1>(.*?)</ul>')
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<a href="([^"]+)">(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedplot = ""
|
||||
if scrapedtitle.startswith(("'")):
|
||||
scrapedtitle = scrapedtitle.replace("'", "Anni '")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://www.cinetecadibologna.it/pics/cinema-ritrovato-alcinema.png",
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
def percorsi(item):
|
||||
logger.info("kod.cinetecadibologna categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = '<div class="cover_percorso">\s*<a href="([^"]+)">\s*<img src="([^"]+)"[^>]+>\s*[^>]+>(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.cinetecadibologna findvideos")
|
||||
support.log()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
matches = support.match(item, 'filename: "(.*?)"')[0]
|
||||
|
||||
patron = 'filename: "(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for video in matches:
|
||||
video = host + video
|
||||
for url in matches:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
|
||||
url=video,
|
||||
title='Diretto',
|
||||
server='directo',
|
||||
url=host + url,
|
||||
folder=False))
|
||||
|
||||
return itemlist
|
||||
return support.server(item, itemlist=itemlist)
|
||||
|
||||
|
||||
@@ -10,8 +10,7 @@ from core import httptools, scrapertools, servertools, support
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
__channel__ = "documentaristreamingda"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
|
||||
list_servers = ['']
|
||||
list_quality = ['']
|
||||
|
||||
@@ -47,12 +47,11 @@ import re
|
||||
|
||||
from core import support
|
||||
from platformcode import config
|
||||
from core import scrapertoolsV2, httptools, servertools, tmdb
|
||||
from core import scrapertools, httptools, servertools, tmdb
|
||||
from core.item import Item
|
||||
|
||||
##### fine import
|
||||
__channel__ = "dreamsub"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
# server di esempio...
|
||||
@@ -229,8 +228,8 @@ def findvideos(item):
|
||||
data = re.sub(r'>\s\s*<', '><', data)
|
||||
patronBlock = r'LINK STREAMING(?P<block>.*?)LINK DOWNLOAD'
|
||||
patron = r'href="(.+?)"'
|
||||
block = scrapertoolsV2.find_single_match(data, patronBlock)
|
||||
urls = scrapertoolsV2.find_multiple_matches(block, patron)
|
||||
block = scrapertools.find_single_match(data, patronBlock)
|
||||
urls = scrapertools.find_multiple_matches(block, patron)
|
||||
#support.regexDbg(item, patron, headers, data=data)
|
||||
|
||||
for url in urls:
|
||||
@@ -242,7 +241,7 @@ def findvideos(item):
|
||||
lang = 'ITA'
|
||||
|
||||
if 'keepem.online' in data:
|
||||
urls = scrapertoolsV2.find_multiple_matches(data, r'(https://keepem\.online/f/[^"]+)"')
|
||||
urls = scrapertools.find_multiple_matches(data, r'(https://keepem\.online/f/[^"]+)"')
|
||||
for url in urls:
|
||||
url = httptools.downloadpage(url).url
|
||||
itemlist += servertools.find_video_items(data=url)
|
||||
@@ -255,14 +254,14 @@ def findvideos(item):
|
||||
|
||||
data = httptools.downloadpage(data).data
|
||||
support.log("LINK-DATA2 :", data)
|
||||
video_urls = scrapertoolsV2.find_single_match(data, r'<meta name="description" content="([^"]+)"')
|
||||
video_urls = scrapertools.find_single_match(data, r'<meta name="description" content="([^"]+)"')
|
||||
|
||||
else:
|
||||
|
||||
data = httptools.downloadpage(url).data
|
||||
#host_video = scrapertoolsV2.find_single_match(data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
|
||||
host_video = scrapertoolsV2.find_single_match(data, r'(?:let|var) thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
|
||||
link = scrapertoolsV2.find_single_match(data, r'<video src="([^"]+)"')
|
||||
#host_video = scrapertools.find_single_match(data, r'var thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
|
||||
host_video = scrapertools.find_single_match(data, r'(?:let|var) thisPageUrl = "(http[s]\:\/\/[^\/]+).+?"')
|
||||
link = scrapertools.find_single_match(data, r'<video src="([^"]+)"')
|
||||
video_urls = host_video+link
|
||||
|
||||
title_show = support.typo(titles,'_ bold') + support.typo(lang,'_ [] color kod')
|
||||
|
||||
@@ -12,20 +12,16 @@
|
||||
- serie, anime
|
||||
"""
|
||||
import re
|
||||
from core import scrapertoolsV2, httptools, support
|
||||
from core import scrapertools, httptools, support
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
|
||||
#impostati dinamicamente da findhost()
|
||||
host = "https://eurostreaming.pink"
|
||||
headers = ""
|
||||
|
||||
def findhost():
|
||||
pass
|
||||
# global host, headers
|
||||
# permUrl = httptools.downloadpage('https://eurostreaming.link/', follow_redirects=False).headers
|
||||
# host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
# headers = [['Referer', host]]
|
||||
permUrl = httptools.downloadpage('https://eurostreaming.link/', follow_redirects=False).headers
|
||||
host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
return host
|
||||
|
||||
host = support.config.get_channel_url(findhost)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
|
||||
@@ -35,7 +31,7 @@ list_quality = ['default']
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
support.log()
|
||||
findhost()
|
||||
|
||||
|
||||
tvshow = [''
|
||||
]
|
||||
@@ -98,13 +94,13 @@ def pagina(url):
|
||||
data = httptools.downloadpage(url, headers=headers).data.replace("'", '"')
|
||||
#support.log("DATA ----###----> ", data)
|
||||
if 'clicca qui per aprire' in data.lower():
|
||||
url = scrapertoolsV2.find_single_match(data, '"go_to":"([^"]+)"')
|
||||
url = scrapertools.find_single_match(data, '"go_to":"([^"]+)"')
|
||||
url = url.replace("\\","")
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(url, headers=headers).data.replace("'", '"')
|
||||
|
||||
elif 'clicca qui</span>' in data.lower():
|
||||
url = scrapertoolsV2.find_single_match(data, '<h2 style="text-align: center;"><a href="([^"]+)">')
|
||||
url = scrapertools.find_single_match(data, '<h2 style="text-align: center;"><a href="([^"]+)">')
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(url, headers=headers).data.replace("'", '"')
|
||||
|
||||
@@ -113,7 +109,7 @@ def pagina(url):
|
||||
# =========== def ricerca =============
|
||||
def search(item, texto):
|
||||
support.log()
|
||||
findhost()
|
||||
|
||||
item.url = "%s/?s=%s" % (host, texto)
|
||||
item.contentType = 'tvshow'
|
||||
|
||||
@@ -131,7 +127,7 @@ def search(item, texto):
|
||||
|
||||
def newest(categoria):
|
||||
support.log()
|
||||
findhost()
|
||||
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.contentType = 'tvshow'
|
||||
|
||||
@@ -16,13 +16,12 @@
|
||||
- SOLO SUB-ITA
|
||||
|
||||
"""
|
||||
from core import support, httptools, scrapertoolsV2
|
||||
from core import support, httptools, scrapertools
|
||||
from core.item import Item
|
||||
from core.support import log
|
||||
from platformcode import config
|
||||
|
||||
__channel__ = 'fastsubita'
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vidoza', 'vidtome']
|
||||
list_quality = ['default']
|
||||
@@ -159,7 +158,7 @@ def findvideos(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = r'>Posted in <a href="https?://fastsubita.com/serietv/([^/]+)/(?:[^"]+)?"'
|
||||
series = scrapertoolsV2.find_single_match(data, patron)
|
||||
series = scrapertools.find_single_match(data, patron)
|
||||
titles = support.typo(series.upper().replace('-', ' '), 'bold color kod')
|
||||
goseries = support.typo("Vai alla Serie:", ' bold color kod')
|
||||
itemlist.append(
|
||||
|
||||
@@ -17,8 +17,7 @@ from core import servertools, httptools, support
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
|
||||
__channel__ = 'filmigratis'
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
|
||||
list_servers = ['verystream', 'openload', 'streamango', 'vidoza', 'okru']
|
||||
list_quality = ['1080p', '720p', '480p', '360']
|
||||
|
||||
@@ -17,13 +17,12 @@
|
||||
"""
|
||||
import re
|
||||
|
||||
from core import scrapertoolsV2, httptools, support
|
||||
from core import scrapertools, httptools, support
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
|
||||
|
||||
__channel__ = 'filmpertutti'
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
list_servers = ['speedvideo', 'verystream', 'openload', 'streamango', 'wstream', 'akvideo']
|
||||
list_quality = ['HD', 'SD']
|
||||
@@ -114,7 +113,7 @@ def select(item):
|
||||
support.log()
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patronBlock = scrapertoolsV2.find_single_match(data, r'class="taxonomy category" ><span property="name">(.*?)</span></a><meta property="position" content="2">')
|
||||
patronBlock = scrapertools.find_single_match(data, r'class="taxonomy category" ><span property="name">(.*?)</span></a><meta property="position" content="2">')
|
||||
if patronBlock.lower() != 'film':
|
||||
support.log('select = ### è una serie ###')
|
||||
item.contentType='tvshow'
|
||||
@@ -170,6 +169,10 @@ def newest(categoria):
|
||||
|
||||
def findvideos(item):
|
||||
if item.contentType == 'movie':
|
||||
return support.server(item)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
toUnshorten = scrapertools.find_multiple_matches(data, 'https?://buckler.link/[a-zA-Z0-9]+')
|
||||
for link in toUnshorten:
|
||||
data += '\n' + httptools.downloadpage(link, follow_redirects=False).headers["Location"]
|
||||
return support.server(item, data=data)
|
||||
else:
|
||||
return support.server(item, item.url)
|
||||
|
||||
@@ -14,8 +14,7 @@ from platformcode import logger
|
||||
from specials import autoplay
|
||||
|
||||
# Necessario per Autoplay
|
||||
__channel__ = 'filmsenzalimiticc'
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
@@ -19,8 +19,7 @@ from core.item import Item
|
||||
from platformcode import config
|
||||
from core.support import log
|
||||
|
||||
__channel__ = 'guardaserieclick'
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['speedvideo', 'openload']
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"id": "hdblog",
|
||||
"name": "Hdblog",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://css.hd-cdn.it/new_files/templates/theme_darklight/img/logos_wt/logohdhardware.png",
|
||||
"banner": "http://css.hd-cdn.it/new_files/templates/theme_darklight/img/logos_wt/logohdhardware.png",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi in Ricerca Globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Documentari",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale hdblog
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = "https://www.hdblog.it"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.hdblog mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Video recensioni tecnologiche[/COLOR]",
|
||||
action="peliculas",
|
||||
url=host + "/video/",
|
||||
thumbnail="http://www.crat-arct.org/uploads/images/tic%201.jpg"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categorias",
|
||||
url=host + "/video/",
|
||||
thumbnail="http://www.crat-arct.org/uploads/images/tic%201.jpg")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("kod.hdblog categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info(data)
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
start = data.find('<section class="left_toolbar" style="float: left;width: 125px;margin-right: 18px;">')
|
||||
end = data.find('</section>', start)
|
||||
bloque = data[start:end]
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<a href="([^"]+)"[^>]+><span>(.*?)</span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl + "video/",
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.hdblog peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a class="thumb_new_image" href="([^"]+)">\s*<img[^s]+src="([^"]+)"[^>]+>\s*</a>\s*[^>]+>\s*(.*?)\s*<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedplot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle,
|
||||
title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span class="attiva">[^>]+>[^=]+="next" href="(.*?)" class="inattiva">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title="[COLOR orange]Avanti >>[/COLOR]", url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
@@ -33,13 +33,12 @@
|
||||
|
||||
import re
|
||||
|
||||
from core import scrapertoolsV2, httptools, support
|
||||
from core import scrapertools, httptools, support
|
||||
from core.support import log
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
|
||||
__channel__ = 'ilgeniodellostreaming'
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
|
||||
list_servers = ['verystream', 'openload', 'streamango']
|
||||
list_quality = ['default']
|
||||
@@ -217,7 +216,7 @@ def findvideos(item):
|
||||
matches, data = support.match(item, '<iframe class="metaframe rptss" src="([^"]+)"[^>]+>',headers=headers)
|
||||
for url in matches:
|
||||
html = httptools.downloadpage(url, headers=headers).data
|
||||
data += str(scrapertoolsV2.find_multiple_matches(html, '<meta name="og:url" content="([^"]+)">'))
|
||||
data += str(scrapertools.find_multiple_matches(html, '<meta name="og:url" content="([^"]+)">'))
|
||||
|
||||
itemlist = support.server(item, data)
|
||||
|
||||
@@ -225,7 +224,7 @@ def findvideos(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = r'<div class="item"><a href="'+host+'/serietv/([^"\/]+)\/"><i class="icon-bars">'
|
||||
series = scrapertoolsV2.find_single_match(data, patron)
|
||||
series = scrapertools.find_single_match(data, patron)
|
||||
titles = support.typo(series.upper().replace('-', ' '), 'bold color kod')
|
||||
goseries = support.typo("Vai alla Serie:", ' bold')
|
||||
itemlist.append(
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"id": "ilgiramondo",
|
||||
"name": "IlGiramondo",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://www.ilgiramondo.net/wp-content/uploads/2013/05/logo-fixed.jpg",
|
||||
"banner": "http://www.ilgiramondo.net/wp-content/uploads/2013/05/logo-fixed.jpg",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Documentari",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale ilgiramondo
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = "http://www.ilgiramondo.net"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.ilgiramondo mainlist")
|
||||
itemlist = [Item(channel=item.channel, title="[COLOR azure]Video di Viaggi[/COLOR]", action="peliculas",
|
||||
url=host + "/video-vacanze-viaggi/",
|
||||
thumbnail="http://hotelsjaisalmer.com/wp-content/uploads/2016/10/Travel1.jpg")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.ilgiramondo peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<article id=[^>]+><div class="space">\s*<a href="([^"]+)"><img[^s]+src="(.*?)"[^>]+><\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedthumbnail in matches:
|
||||
html = httptools.downloadpage(scrapedurl).data
|
||||
start = html.find("</script></div>")
|
||||
end = html.find("</p>", start)
|
||||
scrapedplot = html[start:end]
|
||||
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
html = httptools.downloadpage(scrapedurl).data
|
||||
start = html.find("<title>")
|
||||
end = html.find("</title>", start)
|
||||
scrapedtitle = html[start:end]
|
||||
scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle)
|
||||
scrapedtitle = scrapedtitle.replace(" | Video Di Viaggi E Vacanze", "")
|
||||
# scrapedplot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle,
|
||||
title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<a class="next page-numbers" href="(.*?)">Successivo'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title="[COLOR orange]Avanti >>[/COLOR]", url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"id": "istitutoluce",
|
||||
"name": "Istituto Luce",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://www.archivioluce.com/wp-content/themes/wpbootstrap/bootstrap/img/luce-logo.png",
|
||||
"banner": "http://www.archivioluce.com/wp-content/themes/wpbootstrap/bootstrap/img/luce-logo.png",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi in Ricerca Globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Documentari",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,288 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale istitutoluce
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools, servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
host = "https://patrimonio.archivioluce.com"
|
||||
host2 = "https://www.archivioluce.com"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
PERPAGE = 7
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.istitutoluce mainlist")
|
||||
itemlist = [
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR azure]Archivio - Tutti i Filmati[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/luce-web/search/result.html?query=&perPage=7" % host,
|
||||
thumbnail="http://www.archivioluce.com/wp-content/themes/wpbootstrap/bootstrap/img/luce-logo.png"
|
||||
),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR azure]Categorie Tematiche[/COLOR]",
|
||||
action="categorie",
|
||||
url="%s/navigazione-tematica/" % host2,
|
||||
thumbnail="http://www.archivioluce.com/wp-content/themes/wpbootstrap/bootstrap/img/luce-logo.png"
|
||||
),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
)
|
||||
]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorie(item):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
bloque = scrapertools.find_single_match(data, '<section class="container directory">(.*?)<footer class="main">')
|
||||
patron = '<a class="label label-white" href="(.*?)">\s*(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.title()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="cat_results",
|
||||
fulltitle=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
viewmode="movie_with_plot",
|
||||
Folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def cat_results(item):
|
||||
logger.info("kod.istitutoluce cat_results")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a href="([^"]+)" class="thumbnail">\s*<h1>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl in matches:
|
||||
scrapedtitle = scrapedurl
|
||||
scrapedtitle = scrapedtitle.rsplit('/', 1)[-1].rsplit(".", 1)[0].replace("-", " ").title()
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedplot = ""
|
||||
# html = scrapertools.cache_page(scrapedurl)
|
||||
# start = html.find('<p class="abstract">')
|
||||
# end = html.find('</p>', start)
|
||||
# scrapedplot = html[start:end]
|
||||
# scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
# scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
scrapedthumbnail = ""
|
||||
# cache = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
# patron = 'image: "(.*?)"'
|
||||
# matches = re.compile(patron, re.DOTALL).findall(cache)
|
||||
# for scrapedthumbnail in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patron = r'</span></td>\s*<td>\s*<a href="([^"]+)" class="btn-pag-luce">'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if next_page > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="cat_results",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.istitutoluce peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a href="([^"]+)" class="thumbnail">\s*<h1>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl in matches:
|
||||
scrapedtitle = scrapedurl
|
||||
scrapedtitle = scrapedtitle.rsplit('/', 1)[-1].rsplit(".", 1)[0].replace("-", " ").title()
|
||||
scrapedurl = host + scrapedurl
|
||||
|
||||
## html = scrapertools.cache_page(scrapedurl)
|
||||
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
start = html.find('<p class="abstract">')
|
||||
end = html.find('</p>', start)
|
||||
scrapedplot = html[start:end]
|
||||
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
|
||||
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
patron = 'image: "(.*?)"'
|
||||
scrapedthumbnail = scrapertools.find_single_match(html, patron)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patron = r'</span></td>\s*<td>\s*<a href="([^"]+)" class="btn-pag-luce">'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if next_page:
|
||||
scrapedurl = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_src(item):
|
||||
logger.info("kod.istitutoluce peliculas")
|
||||
itemlist = []
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a href="([^"]+)" class="thumbnail">\s*<h1>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for i, (scrapedurl) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
scrapedtitle = scrapedurl
|
||||
scrapedtitle = scrapedtitle.rsplit('/', 1)[-1].rsplit(".", 1)[0].replace("-", " ").title()
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
|
||||
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
start = html.find('<p class="abstract">')
|
||||
end = html.find('</p>', start)
|
||||
scrapedplot = html[start:end]
|
||||
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
|
||||
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
patron = 'image: "(.*?)"'
|
||||
scrapedthumbnail = scrapertools.find_single_match(html, patron)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="peliculas_src",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[istitutoluce.py] search")
|
||||
|
||||
item.url = host + '/luce-web/search/result.html?archiveType_string="xDamsCineLuce"&archiveName_string="luceFondoCinegiornali"&archiveName_string="luceFondoDocumentari"&archiveName_string="luceFondoRepertori"&titoloADV=&descrizioneADV="' + texto + '"'
|
||||
|
||||
try:
|
||||
return peliculas_src(item)
|
||||
|
||||
# Continua la ricerca in caso di errore .
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.istitutoluce findvideos")
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = 'file: "rtsp:([^"]+)"\s*}'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for video in matches:
|
||||
video = "rtsp:" + video
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
|
||||
url=video,
|
||||
folder=False))
|
||||
|
||||
return itemlist
|
||||
@@ -1,44 +0,0 @@
|
||||
{
|
||||
"id": "italiafilm",
|
||||
"name": "Italia-Film.co",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/italiafilm.png",
|
||||
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/italiafilm.png",
|
||||
"categories": ["movie", "tvshow"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,497 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale per italiafilm
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
__channel__ = 'italiafilm'
|
||||
host = config.get_channel_url(__channel__)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[italiafilm.py] mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Film - Novita'[/COLOR]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/novita-streaming-1/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film HD[/COLOR]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/category/film-hd/" % host,
|
||||
thumbnail="http://i.imgur.com/3ED6lOP.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categorias",
|
||||
extra="movie",
|
||||
url="%s/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Serie TV[/COLOR]",
|
||||
action="peliculas_tv",
|
||||
extra="tvshow",
|
||||
url="%s/category/serie-tv/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultime serie TV[/COLOR]",
|
||||
action="pel_tv",
|
||||
extra="tvshow",
|
||||
url="%s/ultimi-telefilm-streaming/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultimi Episodi[/COLOR]",
|
||||
action="latestep",
|
||||
extra="tvshow",
|
||||
url="%s/ultime-serie-tv-streaming/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
|
||||
action="search",
|
||||
extra="tvshow",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = "%s/novita-streaming-1/" % host
|
||||
item.action = "peliculas"
|
||||
item.extra = "movie"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
elif categoria == "series":
|
||||
item.url = "%s/ultime-serie-tv-streaming/" % host
|
||||
item.action = "latestep"
|
||||
itemlist = latestep(item)
|
||||
|
||||
if itemlist[-1].action == "series":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("[italiafilm.py] categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = scrapertools.find_single_match(data, '<a href=".">Categorie</a>(.*?)</div>')
|
||||
|
||||
patron = '<li[^>]+><a href="([^"]+)">Film([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for url, title in matches:
|
||||
scrapedtitle = title
|
||||
scrapedurl = urlparse.urljoin(item.url, url)
|
||||
|
||||
if scrapedtitle.startswith((" Porno")):
|
||||
continue
|
||||
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='peliculas',
|
||||
extra=item.extra,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[italiafilm.py] search " + texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return peliculas(item)
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_tv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def latestep(item):
|
||||
logger.info("[italiafilm.py] latestep")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.find_single_match(data, r'<li class="section_date">(.*?)<li class="section_date">')
|
||||
patron = r'<li class="[^"]+">\s*[^>]+>([^<|^(]+)[^>]+>\s*<a href="([^"]+)"'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>(?:[^>]+>[^>]+>|)([^<]+)(?:[^>]+>[^>]+>|)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedtitle, scrapedurl, scrapedepisode in matches:
|
||||
scrapedepisode = scrapertools.decodeHtmlentities(scrapedepisode)
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
completetitle = "%s - %s" % (scrapedtitle, scrapedepisode)
|
||||
|
||||
unsupportedeps = re.compile(r'\d+\-\d+', re.DOTALL).findall(scrapedepisode)
|
||||
if len(unsupportedeps) > 0:
|
||||
continue
|
||||
|
||||
if 'completa' in scrapedtitle.lower():
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
title=completetitle,
|
||||
contentSerieName=completetitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
folder=True))
|
||||
else:
|
||||
if 'episodio' not in scrapedepisode:
|
||||
replace = re.compile(r'(\d+)x(\d+)')
|
||||
ep_pattern = r'%s(.*?(?:<br\s*/>|</p>))' % replace.sub(r'\g<1>×\g<2>', scrapedepisode)
|
||||
else:
|
||||
ep_pattern = r'%s(.*?(?:<br\s*/>|</p>))' % scrapedepisode
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos_single_ep",
|
||||
title=completetitle,
|
||||
contentSerieName=completetitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra=ep_pattern,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("[italiafilm.py] peliculas")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<article(.*?)</article>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for match in matches:
|
||||
title = scrapertools.find_single_match(match, '<h3[^<]+<a href="[^"]+"[^<]+>([^<]+)</a>')
|
||||
title = title.replace("Streaming", "")
|
||||
title = scrapertools.decodeHtmlentities(title).strip()
|
||||
url = scrapertools.find_single_match(match, '<h3[^<]+<a href="([^"]+)"')
|
||||
if 'film-porno' in url: continue
|
||||
plot = ""
|
||||
thumbnail = scrapertools.find_single_match(match, 'data-echo="([^"]+)"')
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action='findvideos',
|
||||
contentType="movie",
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
viewmode="movie_with_plot",
|
||||
folder=True))
|
||||
|
||||
# Pagina successiva
|
||||
try:
|
||||
pagina_siguiente = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
extra=item.extra,
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=pagina_siguiente,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
except:
|
||||
pass
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvid(item):
|
||||
logger.info("kod.italiafilm findvid")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti redirect
|
||||
urls = scrapertools.find_multiple_matches(data, '<a href="([^"]+)" target="_blank" rel="noopener">') #
|
||||
for url in urls: # Fix
|
||||
page = httptools.downloadpage(url, headers=headers).data #
|
||||
data += '\n' + scrapertools.find_single_match(page,'<meta name="og:url" content="([^=]+)">') #
|
||||
|
||||
|
||||
for videoitem in servertools.find_video_items(data=data):
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
itemlist.append(videoitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_tv(item):
|
||||
logger.info("[italiafilm.py] peliculas")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<article(.*?)</article>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for match in matches:
|
||||
title = scrapertools.find_single_match(match, '<h3[^<]+<a href="[^"]+"[^<]+>([^<]+)</a>')
|
||||
title = title.replace("Streaming", "")
|
||||
title = scrapertools.decodeHtmlentities(title).strip()
|
||||
show_title = re.sub('\(.*?\)', '', title.replace('Serie TV', ''))
|
||||
url = scrapertools.find_single_match(match, '<h3[^<]+<a href="([^"]+)"')
|
||||
plot = ""
|
||||
thumbnail = scrapertools.find_single_match(match, 'data-echo="([^"]+)"')
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action='episodios',
|
||||
fulltitle=title,
|
||||
show=show_title,
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
viewmode="movie_with_plot",
|
||||
folder=True))
|
||||
|
||||
# Successivo
|
||||
try:
|
||||
pagina_siguiente = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_tv",
|
||||
extra=item.extra,
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=pagina_siguiente,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
except:
|
||||
pass
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def pel_tv(item):
|
||||
logger.info("[italiafilm.py] peliculas")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<span class="tvseries_name">(.*?)</span>\s*<a href="([^"]+)"[^>]+><i class="icon-link"></i>(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scraptitle1, scrapedurl, scraptitle2 in matches:
|
||||
title = scraptitle1 + scraptitle2
|
||||
plot = ""
|
||||
thumbnail = ""
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action='episodios',
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
viewmode="movie_with_plot",
|
||||
folder=True))
|
||||
|
||||
# Siguiente
|
||||
try:
|
||||
pagina_siguiente = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="pel_tv",
|
||||
extra=item.extra,
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=pagina_siguiente,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
except:
|
||||
pass
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
def load_episodios(html, item, itemlist, lang_title):
|
||||
for data in scrapertools.decodeHtmlentities(html).splitlines():
|
||||
# Estrae i contenuti
|
||||
end = data.find('<a ')
|
||||
if end > 0:
|
||||
scrapedtitle = re.sub(r'<[^>]*>', '', data[:end]).strip()
|
||||
else:
|
||||
scrapedtitle = ''
|
||||
if scrapedtitle == '':
|
||||
patron = '<a.*?href="[^"]+".*?>([^<]+)</a>'
|
||||
scrapedtitle = scrapertools.find_single_match(data, patron).strip()
|
||||
title = scrapertools.find_single_match(scrapedtitle, '\d+[^\d]+\d+')
|
||||
if title == '':
|
||||
title = scrapedtitle
|
||||
if title != '':
|
||||
title = re.sub(r"(\d+)[^\d]+(\d+)", r"\1x\2", title)
|
||||
title += " (" + lang_title + ")"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=title,
|
||||
url=data,
|
||||
thumbnail=item.thumbnail,
|
||||
extra=item.extra,
|
||||
fulltitle=title + ' - ' + item.show,
|
||||
show=item.show))
|
||||
|
||||
logger.info("[italiafilm.py] episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data.replace('<br>','\n') # fix
|
||||
|
||||
start = data.find('id="pd_rating_holder')
|
||||
end = data.find('id="linkcorrotto-show"', start)
|
||||
|
||||
data = data[start:end]
|
||||
|
||||
lang_titles = []
|
||||
starts = []
|
||||
patron = r"STAGION[I|E](.*?ITA)?"
|
||||
matches = re.compile(patron, re.IGNORECASE).finditer(data)
|
||||
for match in matches:
|
||||
season_title = match.group()
|
||||
# if season_title != '':
|
||||
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
|
||||
starts.append(match.end())
|
||||
|
||||
i = 1
|
||||
len_lang_titles = len(lang_titles)
|
||||
|
||||
while i <= len_lang_titles:
|
||||
inizio = starts[i - 1]
|
||||
fine = starts[i] if i < len_lang_titles else -1
|
||||
|
||||
html = data[inizio:fine]
|
||||
lang_title = lang_titles[i - 1]
|
||||
|
||||
load_episodios(html, item, itemlist, lang_title)
|
||||
|
||||
i += 1
|
||||
|
||||
if len(itemlist) == 0:
|
||||
load_episodios(data, item, itemlist, 'ITA')
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.italiafilm findvideos")
|
||||
|
||||
if item.contentType == "movie":
|
||||
return findvid(item)
|
||||
|
||||
# Carica la pagina
|
||||
data = item.url
|
||||
|
||||
urls = scrapertools.find_multiple_matches(data, '<a.*?href="([^"]+)".*?>') #
|
||||
for url in urls: # Fix
|
||||
page = httptools.downloadpage(url, headers=headers).data #
|
||||
data += '\n' + scrapertools.find_single_match(page,'<meta name="og:url" content="([^=]+)">') #
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos_single_ep(item):
|
||||
logger.info("[italiafilm.py] findvideos_single_ep")
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
data = scrapertools.find_single_match(data, item.extra)
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
|
||||
videoitem.title = "".join(["[[COLOR orange]%s[/COLOR]] " % server.capitalize(), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
@@ -15,12 +15,11 @@
|
||||
"""
|
||||
|
||||
import re
|
||||
from core import support, httptools, scrapertoolsV2
|
||||
from core import support, httptools, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
|
||||
__channel__ = 'italiaserie'
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['speedvideo']
|
||||
@@ -131,8 +130,8 @@ def findvideos(item):
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = re.sub('\n|\t', ' ', data)
|
||||
data = re.sub(r'>\s+<', '> <', data)
|
||||
url_video = scrapertoolsV2.find_single_match(data, r'<a rel="[^"]+" target="[^"]+" act="[^"]+"\s+href="([^"]+)" class="[^"]+-link".+?\d+.+?</strong> </a>', -1)
|
||||
url_serie = scrapertoolsV2.find_single_match(data, r'<link rel="canonical" href="([^"]+)" />')
|
||||
url_video = scrapertools.find_single_match(data, r'<a rel="[^"]+" target="[^"]+" act="[^"]+"\s+href="([^"]+)" class="[^"]+-link".+?\d+.+?</strong> </a>', -1)
|
||||
url_serie = scrapertools.find_single_match(data, r'<link rel="canonical" href="([^"]+)" />')
|
||||
goseries = support.typo("Vai alla Serie:", ' bold')
|
||||
series = support.typo(item.contentSerieName, ' bold color kod')
|
||||
itemlist = support.server(item, data=url_video)
|
||||
|
||||
@@ -5,8 +5,7 @@
|
||||
|
||||
from core import support
|
||||
|
||||
__channel__ = "mondoserietv"
|
||||
host = support.config.get_channel_url(__channel__)
|
||||
host = support.config.get_channel_url()
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
@@ -7,8 +7,7 @@ from core import support
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
__channel__ = "netfreex"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = ""
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
|
||||
@@ -7,8 +7,7 @@
|
||||
from core import support
|
||||
from core.support import config, log
|
||||
|
||||
__channel__ = "piratestreaming"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
|
||||
list_servers = ['mixdrop', 'speedvideo', 'gounlimited', 'onlystream', 'youtube']
|
||||
list_quality = ['default']
|
||||
|
||||
@@ -9,8 +9,7 @@ from core.item import Item
|
||||
from platformcode import config
|
||||
import json, datetime
|
||||
|
||||
__channel__ = "polpotv"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
|
||||
headers = [['Accept', 'application/ld+json']]
|
||||
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"id": "programmazione",
|
||||
"name": "Programmazione",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://www.smartworld.it/wp-content/uploads/2015/02/codice-code-programmazione-fhd-720x480.png",
|
||||
"banner": "http://www.smartworld.it/wp-content/uploads/2015/02/codice-code-programmazione-fhd-720x480.png",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale Video Corsi Programmazione
|
||||
# Creato da iSOD
|
||||
# https://alfa-addon.com/categories/kod-addon.50/.
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
site = "https://www.youtube.com"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.programmazione mainlist")
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Html 5[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL7A4A3449C649048F", thumbnail="http://i.ytimg.com/vi/TyCvfNt20nM/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Css[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PLD74C5E763D39793D", thumbnail="http://i.ytimg.com/vi/hd8k82aG_O4/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Javascript[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL1A447BA7F7F9EB9E", thumbnail="http:////i.ytimg.com/vi/eXlzdxyThLM/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso PHP[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJc664i2Cv0X0ibM9b1YqRyd", thumbnail="http://i.ytimg.com/vi/0nA1gPWdBWw/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso PHP Mysql[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL101314D973955661", thumbnail="http://i.ytimg.com/vi/QIxmITjITY8/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Jquery[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PLC959BB22285B353F", thumbnail="http://i.ytimg.com/vi/mxl2IcNdbrk/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Java da Zero[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJe2dpE7di4aPJwrQuRD6IDD", thumbnail="http://i.ytimg.com/vi/7PGPLqFpDMc/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Java 2 OOP[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJee1dk24wX-68yHTnMfzdX5", thumbnail="http://i.ytimg.com/vi/h6VoxIAUZoo/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Java Interfaccia Grafica[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJfRML8EDs7v9nwjdOt6dvaf", thumbnail="http://i.ytimg.com/vi/fS7OxhbIlw4/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Java Android[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJeqmBWbE1Rbac2QWHoPCjR2", thumbnail="http://i.ytimg.com/vi/GINLfdq-elE/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Progettazione DB[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJcJPSV4sOfhLtPbtQ-yycFH", thumbnail="http://i.ytimg.com/vi/FnkL4YdWAwE/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso SQL[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PLE555DB6188C967AC", thumbnail="http://i.ytimg.com/vi/jM55Fb9YTfE/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Python[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PLC64779F4E2E7EB10", thumbnail="http://i.ytimg.com/vi/_iX9CSX09Z8/mqdefault.jpg"))
|
||||
itemlist.append( Item(channel=item.channel, title="[COLOR azure]Corso Unit 3D[/COLOR]", action="corsi", url="https://www.youtube.com/playlist?list=PL0qAPtx8YtJcbl6ZHwtFIkFxWY-adCeS7", thumbnail="http://i.ytimg.com/vi/QiFBrHp3IGk/mqdefault.jpg"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def corsi(item):
|
||||
logger.info("kod.programmazione peliculas")
|
||||
itemlist = []
|
||||
|
||||
# scarrico il canale
|
||||
html = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estraggo l'elenco dei video e titoli
|
||||
patron = '<a class="pl-video-title-link.*?href="(.*?)"[^>]+>(.*?)</a>'
|
||||
trovati = re.compile(patron, re.DOTALL).findall(html)
|
||||
scrapertools.printMatches(trovati)
|
||||
max = len(trovati)
|
||||
min = 0
|
||||
|
||||
# ciclo sull'elenco trovato
|
||||
for VideoUrl, VideoTitolo in trovati:
|
||||
# Decodifico Html
|
||||
titolo = scrapertools.decodeHtmlentities(VideoTitolo)
|
||||
# contatore
|
||||
min += 1
|
||||
# aggiungo alla lista
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=titolo,
|
||||
show=titolo,
|
||||
title="[COLOR azure]" + item.title + " - " + str(min) + "x" + str(max) + "[/COLOR]",
|
||||
url=site + VideoUrl,
|
||||
thumbnail=item.thumbnail,
|
||||
plot=titolo,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"id": "ricettevideo",
|
||||
"name": "Ricette Video",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://ricettevideo.net/wp-content/uploads/2013/08/Ricette-Video-Logo.png",
|
||||
"banner": "http://ricettevideo.net/wp-content/uploads/2013/08/Ricette-Video-Logo.png",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Documentari",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale ricettevideo
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = "http://ricettevideo.net"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.ricettevideo mainlist")
|
||||
itemlist = [Item(channel=item.channel, title="[COLOR azure]Videoricette[/COLOR]", action="peliculas",
|
||||
url=host,
|
||||
thumbnail="http://www.brinkmanscountrycorner.com/images/Recipies.png")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.ricettevideo peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="post-item-small">\s*<a href="([^"]+)"[^t]+title="Permanent Link: ([^"]+)"><img[^s]+src="([^"]+)"[^>]+>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedplot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle,
|
||||
title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<link rel=\'next\' href=\'([^\']+)\' />'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title="[COLOR orange]Avanti >>[/COLOR]", url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# test update
|
||||
@@ -4,29 +4,19 @@
|
||||
# ------------------------------------------------------------
|
||||
|
||||
|
||||
from core import scrapertoolsV2, httptools, support
|
||||
from core import scrapertools, httptools, support
|
||||
from core.item import Item
|
||||
|
||||
__channel__ = 'seriehd'
|
||||
host = support.config.get_channel_url(__channel__)
|
||||
host = support.config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
# host = 'https://www.seriehd.watch'
|
||||
headers = ''
|
||||
|
||||
def findhost():
|
||||
pass
|
||||
# global host, headers
|
||||
# data= httptools.downloadpage('https://seriehd.nuovo.link/').data
|
||||
# global host, headers
|
||||
# host = scrapertoolsV2.find_single_match(data, r'<div class="elementor-button-wrapper"> <a href="([^"]+)"')
|
||||
# headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['verystream', 'openload', 'streamango', 'thevideome']
|
||||
list_servers = ['mixdrop', 'vidoza', 'vcstream', 'gounlimited']
|
||||
list_quality = ['1080p', '720p', '480p', '360']
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
findhost()
|
||||
|
||||
tvshow = [('Genere', ['', 'genre']),
|
||||
('Americane', ['/serie-tv-streaming/serie-tv-americane', 'peliculas']),
|
||||
('Italiane', ['/serie-tv-streaming/serie-tv-italiane', 'peliculas']),]
|
||||
@@ -35,8 +25,6 @@ def mainlist(item):
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
#findhost()
|
||||
# debug=True
|
||||
patron = r'<h2>(?P<title>.*?)</h2>\s*<img src="(?P<thumb>[^"]+)" alt="[^"]*" />\s*<A HREF="(?P<url>[^"]+)">.*?<span class="year">(?:(?P<year>[0-9]{4}))?.*?<span class="calidad">(?:(?P<quality>[A-Z]+))?.*?</span>'
|
||||
patronNext=r'<span class="current">\d+</span><a rel="nofollow" class="page larger" href="([^"]+)">\d+</a>'
|
||||
action='episodios'
|
||||
@@ -45,7 +33,6 @@ def peliculas(item):
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
#findhost()
|
||||
data =''
|
||||
url = support.match(item, patronBlock=r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">')[1]
|
||||
seasons = support.match(item, r'<a href="([^"]+)">(\d+)<', r'<h3>STAGIONE</h3><ul>(.*?)</ul>', headers, url)[0]
|
||||
@@ -63,7 +50,6 @@ def episodios(item):
|
||||
|
||||
@support.scrape
|
||||
def genre(item):
|
||||
#findhost()
|
||||
patronMenu = '<a href="(?P<url>[^"]+)">(?P<title>[^<]+)</a>'
|
||||
blacklist = ['Serie TV','Serie TV Americane','Serie TV Italiane','altadefinizione']
|
||||
patronBlock = '<ul class="sub-menu">(?P<block>.*)</ul>'
|
||||
@@ -73,7 +59,7 @@ def genre(item):
|
||||
|
||||
def search(item, texto):
|
||||
support.log(texto)
|
||||
findhost()
|
||||
|
||||
|
||||
item.contentType = 'tvshow'
|
||||
item.url = host + "/?s=" + texto
|
||||
@@ -88,7 +74,7 @@ def search(item, texto):
|
||||
|
||||
def newest(categoria):
|
||||
support.log(categoria)
|
||||
findhost()
|
||||
|
||||
itemlist = []
|
||||
item = support.Item()
|
||||
try:
|
||||
|
||||
@@ -16,21 +16,18 @@
|
||||
- Prima fare la 'Rinumerazione' dal menu contestuale dal titolo della serie
|
||||
"""
|
||||
import re
|
||||
from core import support, httptools, scrapertoolsV2
|
||||
from core import support, httptools, scrapertools
|
||||
from platformcode import config
|
||||
from core.item import Item
|
||||
|
||||
__channel__ = "serietvonline"
|
||||
|
||||
host = "https://serietvonline.monster"
|
||||
headers = ""
|
||||
|
||||
def findhost():
|
||||
pass
|
||||
# global host, headers
|
||||
# data = httptools.downloadpage('https://serietvonline.me/').data
|
||||
# host = scrapertoolsV2.find_single_match(data, r'<a class="pure-button pure-button-primary" title=\'serie tv online\' href="([^"]+)">')
|
||||
# headers = [['Referer', host]]
|
||||
data = httptools.downloadpage('https://serietvonline.me/').data
|
||||
host = scrapertools.find_single_match(data, r'<a class="pure-button pure-button-primary" title=\'serie tv online\' href="([^"]+)">')
|
||||
return host
|
||||
|
||||
host = config.get_channel_url(findhost)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['akvideo', 'wstream', 'backin', 'vidtome', 'nowvideo']
|
||||
list_quality = ['default']
|
||||
@@ -39,7 +36,7 @@ list_quality = ['default']
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
support.log()
|
||||
findhost()
|
||||
|
||||
|
||||
film = ['/ultimi-film-aggiunti/',
|
||||
('Lista', ['/lista-film/', 'peliculas', 'lista'])
|
||||
@@ -55,7 +52,7 @@ def mainlist(item):
|
||||
|
||||
anime = ['/lista-cartoni-animati-e-anime/']
|
||||
|
||||
documentari = [('Documentari', ['/lista-documentari/' , 'peliculas' , 'doc', 'tvshow'])]
|
||||
documentari = [('Documentari bullet bold', ['/lista-documentari/' , 'peliculas' , 'doc', 'tvshow'])]
|
||||
|
||||
search = ''
|
||||
|
||||
@@ -129,7 +126,7 @@ def episodios(item):
|
||||
|
||||
def search(item, text):
|
||||
support.log("CERCA :" ,text, item)
|
||||
findhost()
|
||||
|
||||
item.url = "%s/?s=%s" % (host, text)
|
||||
|
||||
try:
|
||||
@@ -144,7 +141,7 @@ def search(item, text):
|
||||
|
||||
def newest(categoria):
|
||||
support.log(categoria)
|
||||
findhost()
|
||||
|
||||
itemlist = []
|
||||
item = Item()
|
||||
|
||||
@@ -183,8 +180,8 @@ def findvideos(item):
|
||||
data = re.sub('\n|\t', ' ', data)
|
||||
data = re.sub(r'>\s+<', '> <', data)
|
||||
#support.log("DATA - HTML:\n", data)
|
||||
url_video = scrapertoolsV2.find_single_match(data, r'<tr><td>(.+?)</td><tr>', -1)
|
||||
url_serie = scrapertoolsV2.find_single_match(data, r'<link rel="canonical" href="([^"]+)"\s?/>')
|
||||
url_video = scrapertools.find_single_match(data, r'<tr><td>(.+?)</td><tr>', -1)
|
||||
url_serie = scrapertools.find_single_match(data, r'<link rel="canonical" href="([^"]+)"\s?/>')
|
||||
goseries = support.typo("Vai alla Serie:", ' bold')
|
||||
series = support.typo(item.contentSerieName, ' bold color kod')
|
||||
itemlist = support.server(item, data=url_video)
|
||||
|
||||
@@ -12,8 +12,7 @@ from core.item import Item
|
||||
from core.support import log
|
||||
from platformcode import logger, config
|
||||
|
||||
__channel__ = "serietvsubita"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
|
||||
@@ -8,13 +8,12 @@
|
||||
"""
|
||||
import re
|
||||
|
||||
from core import support, httptools, scrapertoolsV2
|
||||
from core import support, httptools, scrapertools
|
||||
from core.item import Item
|
||||
from core.support import log
|
||||
from platformcode import config
|
||||
|
||||
__channel__ = 'serietvu'
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['speedvideo']
|
||||
@@ -119,8 +118,8 @@ def findvideos(item):
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = re.sub('\n|\t', ' ', data)
|
||||
data = re.sub(r'>\s+<', '> <', data)
|
||||
url_video = scrapertoolsV2.find_single_match(data, r'<div class="item"> <a data-id="[^"]+" data-href="([^"]+)" data-original="[^"]+"[^>]+> <div> <div class="title">Episodio \d+', -1)
|
||||
url_serie = scrapertoolsV2.find_single_match(data, r'<link rel="canonical" href="([^"]+)"\s?/>')
|
||||
url_video = scrapertools.find_single_match(data, r'<div class="item"> <a data-id="[^"]+" data-href="([^"]+)" data-original="[^"]+"[^>]+> <div> <div class="title">Episodio \d+', -1)
|
||||
url_serie = scrapertools.find_single_match(data, r'<link rel="canonical" href="([^"]+)"\s?/>')
|
||||
goseries = support.typo(">> Vai alla Serie:", ' bold')
|
||||
series = support.typo(item.contentSerieName, ' bold color kod')
|
||||
|
||||
|
||||
@@ -7,28 +7,26 @@ from core import support, httptools
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
|
||||
# __channel__ = "streamingaltadefinizione"
|
||||
# host = config.get_channel_url(__channel__)
|
||||
|
||||
host = headers = ''
|
||||
list_servers = ['verystream', 'openload', 'wstream']
|
||||
list_quality = ['1080p', 'HD', 'DVDRIP', 'SD', 'CAM']
|
||||
|
||||
def findhost():
|
||||
global host, headers
|
||||
permUrl = httptools.downloadpage('https://www.popcornstream.info', follow_redirects=False).headers
|
||||
if 'google' in permUrl['location']:
|
||||
host = permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
if host[:4] != 'http':
|
||||
host = 'https://'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
else:
|
||||
host = permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
else:
|
||||
host = permUrl['location']
|
||||
headers = [['Referer', host]]
|
||||
return host
|
||||
|
||||
host = config.get_channel_url(findhost)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
findhost()
|
||||
|
||||
film = ["/film/"]
|
||||
anime = ["/genere/anime/"]
|
||||
tvshow = ["/serietv/"]
|
||||
@@ -54,17 +52,17 @@ def generos(item):
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
findhost()
|
||||
|
||||
return support.dooplay_peliculas(item, True if "/genere/" in item.url else False)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
findhost()
|
||||
|
||||
return support.dooplay_get_episodes(item)
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
findhost()
|
||||
|
||||
itemlist = []
|
||||
for link in support.dooplay_get_links(item, host):
|
||||
if link['title'] != 'Guarda il trailer':
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from core import support, httptools, scrapertoolsV2
|
||||
from core import support, httptools, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
@@ -9,8 +9,7 @@ Nota per i tester: questo non è un canale 'tradizionale', essendo un canale tel
|
||||
la lista delle pagine non sarà affatto 'uniforme' (a seconda di come viene presentata la preview)
|
||||
"""
|
||||
|
||||
__channel__ = "streamtime"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', 'org.telegram.messenger']]
|
||||
list_servers = ['directo']
|
||||
list_quality = ['default']
|
||||
@@ -114,7 +113,7 @@ def episodios(item):
|
||||
stagioni[st] = nEp
|
||||
|
||||
itemlist = []
|
||||
domain, id = scrapertoolsV2.find_single_match(url, r'(https?://[a-z0-9.-]+)/[^/]+/([^-/]+)')
|
||||
domain, id = scrapertools.find_single_match(url, r'(https?://[a-z0-9.-]+)/[^/]+/([^-/]+)')
|
||||
for st in sorted(stagioni.keys()):
|
||||
season = st[1:]
|
||||
episode = stagioni[st]
|
||||
@@ -141,7 +140,7 @@ def episodios(item):
|
||||
|
||||
def findvideos(item):
|
||||
# support.dbg()
|
||||
domain = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+')
|
||||
domain = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+')
|
||||
if item.contentType == 'movie':
|
||||
id = item.url.split('/')[-1]
|
||||
url = domain + '/play_f.php?f=' + id
|
||||
|
||||
@@ -5,25 +5,22 @@
|
||||
|
||||
import re
|
||||
|
||||
from core import scrapertoolsV2, httptools, tmdb, support
|
||||
from core import scrapertools, httptools, tmdb, support
|
||||
from core.item import Item
|
||||
from core.support import log
|
||||
from core.support import menu, log
|
||||
from platformcode import logger
|
||||
from specials import autorenumber
|
||||
from platformcode import config, unify
|
||||
from lib.unshortenit import unshorten_only
|
||||
|
||||
host = 'https://www.tantifilm.eu'
|
||||
headers = ''
|
||||
def findhost():
|
||||
pass
|
||||
# global host, headers
|
||||
# permUrl = httptools.downloadpage('https://www.tantifilm.info/', follow_redirects=False).data
|
||||
# host = scrapertoolsV2.find_single_match(permUrl, r'<h2 style="text-align: center;"><a href="([^"]+)">Il nuovo indirizzo di Tantifilm è:</a></h2>')
|
||||
# if host.endswith('/'):
|
||||
# host = host[:-1]
|
||||
# headers = [['Referer', host]]
|
||||
|
||||
def findhost():
|
||||
permUrl = httptools.downloadpage('https://www.tantifilm.info/', follow_redirects=False).data
|
||||
host = 'https://' + scrapertools.find_single_match(permUrl, r'Ora siamo ([A-Za-z0-9./]+)')
|
||||
return host
|
||||
|
||||
host = config.get_channel_url(findhost)
|
||||
headers = [['Referer', host]]
|
||||
list_servers = ['verystream', 'openload', 'streamango', 'vidlox', 'youtube']
|
||||
list_quality = ['default']
|
||||
|
||||
@@ -31,7 +28,7 @@ list_quality = ['default']
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
log()
|
||||
findhost()
|
||||
|
||||
|
||||
#top = [(support.typo('Novità Film/Serie/Anime/Altro', 'bold'),['/film/'])]
|
||||
top = [('Novità Film/Serie/Anime/Altro', ['/film/', 'peliculas', 'all'])]
|
||||
@@ -59,7 +56,7 @@ def mainlist(item):
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
log()
|
||||
findhost()
|
||||
|
||||
|
||||
if item.args == 'search':
|
||||
patron = r'<a href="(?P<url>[^"]+)" title="Permalink to\s(?P<title>[^"]+) \((?P<year>[^<]+)\).*?".*?<img[^s]+src="(?P<thumb>[^"]+)".*?<div class="calitate">\s*<p>(?P<quality>[^<]+)<\/p>'
|
||||
@@ -79,7 +76,7 @@ def peliculas(item):
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
log()
|
||||
findhost()
|
||||
|
||||
if not item.data:
|
||||
data_check = httptools.downloadpage(item.url, headers=headers).data
|
||||
data_check = re.sub('\n|\t', ' ', data_check)
|
||||
@@ -87,7 +84,7 @@ def episodios(item):
|
||||
else:
|
||||
data_check = item.data
|
||||
patron_check = r'<iframe src="([^"]+)" scrolling="no" frameborder="0" width="626" height="550" allowfullscreen="true" webkitallowfullscreen="true" mozallowfullscreen="true">'
|
||||
item.url = scrapertoolsV2.find_single_match(data_check, patron_check)
|
||||
item.url = scrapertools.find_single_match(data_check, patron_check)
|
||||
|
||||
patronBlock = r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(?P<block>.*?)<\/ul>'
|
||||
patron = r'<a href="(?P<url>[^"]+)"\s*>\s*<i[^>]+><\/i>\s*(?P<episode>\d+)<\/a>'
|
||||
@@ -102,8 +99,8 @@ def episodios(item):
|
||||
season_data = httptools.downloadpage(item.url).data
|
||||
season_data = re.sub('\n|\t', ' ', season_data)
|
||||
season_data = re.sub(r'>\s+<', '> <', season_data)
|
||||
block = scrapertoolsV2.find_single_match(season_data, 'Episodi.*?<ul class="nav navbar-nav">(.*?)</ul>')
|
||||
episodes = scrapertoolsV2.find_multiple_matches(block, '<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>')
|
||||
block = scrapertools.find_single_match(season_data, 'Episodi.*?<ul class="nav navbar-nav">(.*?)</ul>')
|
||||
episodes = scrapertools.find_multiple_matches(block, '<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>')
|
||||
for url, episode in episodes:
|
||||
i = item.clone()
|
||||
i.action = 'findvideos'
|
||||
@@ -116,13 +113,9 @@ def episodios(item):
|
||||
#debug = True
|
||||
return locals()
|
||||
|
||||
def player_or_not(item):
|
||||
|
||||
return item
|
||||
|
||||
def category(item):
|
||||
log()
|
||||
findhost()
|
||||
|
||||
blacklist = ['Serie TV Altadefinizione', 'HD AltaDefinizione', 'Al Cinema', 'Serie TV', 'Miniserie', 'Programmi Tv', 'Live', 'Trailers', 'Serie TV Aggiornate', 'Aggiornamenti', 'Featured']
|
||||
itemlist = support.scrape(item, '<li><a href="([^"]+)"><span></span>([^<]+)</a></li>', ['url', 'title'], headers, blacklist, patron_block='<ul class="table-list">(.*?)</ul>', action='peliculas')
|
||||
return support.thumb(itemlist)
|
||||
@@ -133,9 +126,9 @@ def anime(item):
|
||||
|
||||
seasons = support.match(item, r'<div class="sp-body[^"]+">(.*?)<\/div>')[0]
|
||||
for season in seasons:
|
||||
episodes = scrapertoolsV2.find_multiple_matches(season, r'<a.*?href="([^"]+)"[^>]+>([^<]+)<\/a>(.*?)<(:?br|\/p)')
|
||||
episodes = scrapertools.find_multiple_matches(season, r'<a.*?href="([^"]+)"[^>]+>([^<]+)<\/a>(.*?)<(:?br|\/p)')
|
||||
for url, title, urls, none in episodes:
|
||||
urls = scrapertoolsV2.find_multiple_matches(urls, '<a.*?href="([^"]+)"[^>]+>')
|
||||
urls = scrapertools.find_multiple_matches(urls, '<a.*?href="([^"]+)"[^>]+>')
|
||||
|
||||
for url2 in urls:
|
||||
url += url2 + '\n'
|
||||
@@ -160,7 +153,7 @@ def anime(item):
|
||||
|
||||
def search(item, texto):
|
||||
log(texto)
|
||||
findhost()
|
||||
|
||||
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
@@ -189,8 +182,8 @@ def search(item, texto):
|
||||
## for url, title, year, thumb, quality in matches:
|
||||
## infoLabels = {}
|
||||
## infoLabels['year'] = year
|
||||
## title = scrapertoolsV2.decodeHtmlentities(title)
|
||||
## quality = scrapertoolsV2.decodeHtmlentities(quality)
|
||||
## title = scrapertools.decodeHtmlentities(title)
|
||||
## quality = scrapertools.decodeHtmlentities(quality)
|
||||
## longtitle = title + support.typo(quality,'_ [] color kod')
|
||||
## itemlist.append(
|
||||
## Item(channel=item.channel,
|
||||
@@ -218,7 +211,7 @@ def newest(categoria):
|
||||
matches = support.match(item, r'mediaWrapAlt recomended_videos"[^>]+>\s*<a href="([^"]+)" title="([^"]+)" rel="bookmark">\s*<img[^s]+src="([^"]+)"[^>]+>')[0]
|
||||
|
||||
for url, title, thumb in matches:
|
||||
title = scrapertoolsV2.decodeHtmlentities(title).replace("Permalink to ", "").replace("streaming", "")
|
||||
title = scrapertools.decodeHtmlentities(title).replace("Permalink to ", "").replace("streaming", "")
|
||||
title = re.sub(r'\s\(\d+\)','',title)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
@@ -247,7 +240,7 @@ def findvideos(item):
|
||||
|
||||
data = re.sub('\n|\t', ' ', data)
|
||||
data = re.sub(r'>\s+<', '> <', data)
|
||||
check = scrapertoolsV2.find_single_match(data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>')
|
||||
check = scrapertools.find_single_match(data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>')
|
||||
if 'sub' in check.lower():
|
||||
item.contentLanguage = 'Sub-ITA'
|
||||
support.log("CHECK : ", check)
|
||||
@@ -262,7 +255,7 @@ def findvideos(item):
|
||||
return episodios(item)
|
||||
|
||||
if 'protectlink' in data:
|
||||
urls = scrapertoolsV2.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
|
||||
urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
|
||||
support.log("SONO QUI: ", urls)
|
||||
for url in urls:
|
||||
url = url.decode('base64')
|
||||
@@ -270,11 +263,11 @@ def findvideos(item):
|
||||
url, c = unshorten_only(url)
|
||||
if 'nodmca' in url:
|
||||
page = httptools.downloadpage(url, headers=headers).data
|
||||
url = '\t' + scrapertoolsV2.find_single_match(page,'<meta name="og:url" content="([^=]+)">')
|
||||
if url:
|
||||
listurl.add(url)
|
||||
url = '\t' + scrapertools.find_single_match(page, '<meta name="og:url" content="([^=]+)">')
|
||||
if url:
|
||||
listurl.add(url)
|
||||
data += '\n'.join(listurl)
|
||||
return support.server(item, data) # , headers=headers)
|
||||
return support.server(item, data)#, headers=headers)
|
||||
# return itemlist
|
||||
|
||||
##def findvideos(item):
|
||||
@@ -284,7 +277,7 @@ def findvideos(item):
|
||||
## data = item.url if item.contentType == "episode" else httptools.downloadpage(item.url, headers=headers).data
|
||||
##
|
||||
## if 'protectlink' in data:
|
||||
## urls = scrapertoolsV2.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
|
||||
## urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
|
||||
## for url in urls:
|
||||
## url = url.decode('base64')
|
||||
## data += '\t' + url
|
||||
|
||||
@@ -5,8 +5,7 @@
|
||||
|
||||
from core import support
|
||||
|
||||
__channel__ = "toonitalia"
|
||||
host = support.config.get_channel_url(__channel__)
|
||||
host = support.config.get_channel_url()
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
@@ -3,13 +3,12 @@
|
||||
# Canale per vedohd
|
||||
# ------------------------------------------------------------
|
||||
|
||||
from core import scrapertoolsV2, httptools, support
|
||||
from core import scrapertools, httptools, support
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
|
||||
__channel__ = "vedohd"
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url()
|
||||
headers = ""
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
@@ -48,7 +47,7 @@ def findvideos(item):
|
||||
for link in support.dooplay_get_links(item, host):
|
||||
if link['title'] != 'Trailer':
|
||||
logger.info(link['title'])
|
||||
server, quality = scrapertoolsV2.find_single_match(link['title'], '([^ ]+) ?(HD|3D)?')
|
||||
server, quality = scrapertools.find_single_match(link['title'], '([^ ]+) ?(HD|3D)?')
|
||||
if quality:
|
||||
title = server + " [COLOR blue][" + quality + "][/COLOR]"
|
||||
else:
|
||||
|
||||
@@ -8,8 +8,8 @@ from core.item import Item
|
||||
from specials import autorenumber
|
||||
from lib.concurrent import futures
|
||||
|
||||
__channel__ = "vvvvid"
|
||||
host = support.config.get_channel_url(__channel__)
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
|
||||
# Creating persistent session
|
||||
current_session = requests.Session()
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------------------------------
|
||||
# Cloudflare decoder
|
||||
# --------------------------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import time
|
||||
import urllib
|
||||
|
||||
import urlparse
|
||||
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
class Cloudflare:
|
||||
def __init__(self, response):
|
||||
self.timeout = 5
|
||||
self.domain = urlparse.urlparse(response["url"])[1]
|
||||
self.protocol = urlparse.urlparse(response["url"])[0]
|
||||
self.js_data = {}
|
||||
self.header_data = {}
|
||||
if not "var s,t,o,p,b,r,e,a,k,i,n,g,f" in response["data"] or "chk_jschl" in response["url"]:
|
||||
return
|
||||
try:
|
||||
self.js_data["data"] = response["data"]
|
||||
self.js_data["auth_url"] = \
|
||||
re.compile('<form id="challenge-form" action="([^"]+)" method="get">').findall(response["data"])[0]
|
||||
self.js_data["params"] = {}
|
||||
self.js_data["params"]["jschl_vc"] = \
|
||||
re.compile('<input type="hidden" name="jschl_vc" value="([^"]+)"/>').findall(response["data"])[0]
|
||||
self.js_data["params"]["pass"] = \
|
||||
re.compile('<input type="hidden" name="pass" value="([^"]+)"/>').findall(response["data"])[0]
|
||||
self.js_data["wait"] = int(re.compile("\}, ([\d]+)\);", re.MULTILINE).findall(response["data"])[0]) / 1000
|
||||
self.js_data["params"]["s"] = \
|
||||
re.compile('<input type="hidden" name="s" value="([^"]+)"').findall(response["data"])[0]
|
||||
except:
|
||||
logger.debug("Metodo #1 (javascript): NO disponible")
|
||||
self.js_data = {}
|
||||
if "refresh" in response["headers"]:
|
||||
try:
|
||||
self.header_data["wait"] = int(response["headers"]["refresh"].split(";")[0])
|
||||
self.header_data["auth_url"] = response["headers"]["refresh"].split("=")[1].split("?")[0]
|
||||
self.header_data["params"] = {}
|
||||
self.header_data["params"]["pass"] = response["headers"]["refresh"].split("=")[2]
|
||||
except:
|
||||
logger.debug("Metodo #2 (headers): NO disponible")
|
||||
self.header_data = {}
|
||||
|
||||
def solve_cf(self, body, domain):
|
||||
js = re.search(
|
||||
r"setTimeout\(function\(\){\s+(var s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n",
|
||||
body
|
||||
).group(1)
|
||||
|
||||
js = re.sub(r"a\.value = ((.+).toFixed\(10\))?", r"\1", js)
|
||||
js = re.sub(r'(e\s=\sfunction\(s\)\s{.*?};)', '', js, flags=re.DOTALL|re.MULTILINE)
|
||||
js = re.sub(r"\s{3,}[a-z](?: = |\.).+", "", js).replace("t.length", str(len(domain)))
|
||||
js = js.replace('; 121', '')
|
||||
js = re.sub(r"[\n\\']", "", js)
|
||||
jsEnv = """
|
||||
var t = "{domain}";
|
||||
var g = String.fromCharCode;
|
||||
o = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
e = function(s) {{
|
||||
s += "==".slice(2 - (s.length & 3));
|
||||
var bm, r = "", r1, r2, i = 0;
|
||||
for (; i < s.length;) {{
|
||||
bm = o.indexOf(s.charAt(i++)) << 18 | o.indexOf(s.charAt(i++)) << 12 | (r1 = o.indexOf(s.charAt(i++))) << 6 | (r2 = o.indexOf(s.charAt(i++)));
|
||||
r += r1 === 64 ? g(bm >> 16 & 255) : r2 === 64 ? g(bm >> 16 & 255, bm >> 8 & 255) : g(bm >> 16 & 255, bm >> 8 & 255, bm & 255);
|
||||
}}
|
||||
return r;
|
||||
}};
|
||||
function italics (str) {{ return '<i>' + this + '</i>'; }};
|
||||
var document = {{
|
||||
getElementById: function () {{
|
||||
return {{'innerHTML': '{innerHTML}'}};
|
||||
}}
|
||||
}};
|
||||
{js}
|
||||
"""
|
||||
innerHTML = re.search('<div(?: [^<>]*)? id="([^<>]*?)">([^<>]*?)<\/div>', body , re.MULTILINE | re.DOTALL)
|
||||
innerHTML = innerHTML.group(2).replace("'", r"\'") if innerHTML else ""
|
||||
import js2py
|
||||
from jsc import jsunc
|
||||
js = jsunc(jsEnv.format(domain=domain, innerHTML=innerHTML, js=js))
|
||||
def atob(s):
|
||||
return base64.b64decode('{}'.format(s)).decode('utf-8')
|
||||
js2py.disable_pyimport()
|
||||
context = js2py.EvalJs({'atob': atob})
|
||||
result = context.eval(js)
|
||||
return float(result)
|
||||
|
||||
|
||||
@property
|
||||
def wait_time(self):
|
||||
if self.js_data.get("wait", 0):
|
||||
return self.js_data["wait"]
|
||||
else:
|
||||
return self.header_data.get("wait", 0)
|
||||
|
||||
@property
|
||||
def is_cloudflare(self):
|
||||
return self.header_data.get("wait", 0) > 0 or self.js_data.get("wait", 0) > 0
|
||||
|
||||
def get_url(self):
|
||||
# Metodo #1 (javascript)
|
||||
if self.js_data.get("wait", 0):
|
||||
self.js_data["params"]["jschl_answer"] = self.solve_cf(self.js_data["data"], self.domain)
|
||||
response = "%s://%s%s?%s" % (
|
||||
self.protocol, self.domain, self.js_data["auth_url"], urllib.urlencode(self.js_data["params"]))
|
||||
time.sleep(self.js_data["wait"])
|
||||
return response
|
||||
@@ -17,7 +17,7 @@ from threading import Lock
|
||||
from core.jsontools import to_utf8
|
||||
from platformcode import config, logger
|
||||
from platformcode.logger import WebErrorException
|
||||
from core import scrapertoolsV2
|
||||
from core import scrapertools
|
||||
|
||||
# Get the addon version
|
||||
__version = config.get_addon_version()
|
||||
@@ -48,7 +48,7 @@ def get_user_agent():
|
||||
|
||||
def get_url_headers(url, forced=False):
|
||||
domain = urlparse.urlparse(url)[1]
|
||||
sub_dom = scrapertoolsV2.find_single_match(domain, r'\.(.*?\.\w+)')
|
||||
sub_dom = scrapertools.find_single_match(domain, r'\.(.*?\.\w+)')
|
||||
if sub_dom and not 'google' in url:
|
||||
domain = sub_dom
|
||||
domain_cookies = cj._cookies.get("." + domain, {}).get("/", {})
|
||||
@@ -144,34 +144,6 @@ def random_useragent():
|
||||
|
||||
return default_headers["User-Agent"]
|
||||
|
||||
def channel_proxy_list(url, forced_proxy=None):
|
||||
import base64
|
||||
import ast
|
||||
|
||||
try:
|
||||
proxy_channel_bloqued_str = base64.b64decode(config.get_setting
|
||||
('proxy_channel_bloqued')).decode('utf-8')
|
||||
proxy_channel_bloqued = dict()
|
||||
proxy_channel_bloqued = ast.literal_eval(proxy_channel_bloqued_str)
|
||||
except:
|
||||
logger.debug('Proxytools not initialized correctly')
|
||||
return False
|
||||
|
||||
if not url.endswith('/'):
|
||||
url += '/'
|
||||
if scrapertoolsV2.find_single_match(url, r'(?:http.*\:)?\/\/(?:www\.)?([^\?|\/]+)(?:\?|\/)') \
|
||||
in proxy_channel_bloqued:
|
||||
if forced_proxy and forced_proxy not in ['Total', 'ProxyDirect', 'ProxyCF', 'ProxyWeb']:
|
||||
if forced_proxy in proxy_channel_bloqued[scrapertoolsV2.find_single_match(url, r'(?:http.*\:)?\/\/(?:www\.)?([^\?|\/]+)(?:\?|\/)')]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
if forced_proxy:
|
||||
return True
|
||||
if not 'OFF' in proxy_channel_bloqued[scrapertoolsV2.find_single_match(url, r'(?:http.*\:)?\/\/(?:www\.)?([^\?|\/]+)(?:\?|\/)')]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def show_infobox(info_dict):
|
||||
logger.info()
|
||||
@@ -232,137 +204,6 @@ def show_infobox(info_dict):
|
||||
logger.info('%s%s%s' % (box['r_dn_corner'], box['fill'] * width, box['l_dn_corner']))
|
||||
return
|
||||
|
||||
def check_proxy(url, **opt):
|
||||
proxy_data = dict()
|
||||
proxy_data['dict'] = {}
|
||||
proxy = opt.get('proxy', True)
|
||||
proxy_web = opt.get('proxy_web', False)
|
||||
proxy_addr_forced = opt.get('proxy_addr_forced', None)
|
||||
forced_proxy = opt.get('forced_proxy', None)
|
||||
|
||||
try:
|
||||
if (proxy or proxy_web) and (forced_proxy or proxy_addr_forced or
|
||||
channel_proxy_list(url, forced_proxy=forced_proxy)):
|
||||
import proxytools
|
||||
proxy_data['addr'], proxy_data['CF_addr'], proxy_data['web_name'], \
|
||||
proxy_data['log'] = proxytools.get_proxy_addr(url, post=opt.get('post', None), forced_proxy=forced_proxy)
|
||||
|
||||
if proxy_addr_forced and proxy_data['log']:
|
||||
proxy_data['log'] = scrapertoolsV2.find_single_match(str(proxy_addr_forced), r"{'http.*':\s*'(.*?)'}")
|
||||
|
||||
if proxy and proxy_data['addr']:
|
||||
if proxy_addr_forced: proxy_data['addr'] = proxy_addr_forced
|
||||
proxy_data['dict'] = proxy_data['addr']
|
||||
proxy_data['stat'] = ', Proxy Direct ' + proxy_data['log']
|
||||
elif proxy and proxy_data['CF_addr']:
|
||||
if proxy_addr_forced: proxy_data['CF_addr'] = proxy_addr_forced
|
||||
proxy_data['dict'] = proxy_data['CF_addr']
|
||||
proxy_data['stat'] = ', Proxy CF ' + proxy_data['log']
|
||||
elif proxy and proxy_addr_forced:
|
||||
proxy_data['addr'] = proxy_addr_forced
|
||||
proxy_data['dict'] = proxy_data['addr']
|
||||
proxy_data['stat'] = ', Proxy Direct ' + proxy_data['log']
|
||||
elif proxy and not proxy_data['addr'] and not proxy_data['CF_addr'] \
|
||||
and not proxy_addr_forced:
|
||||
proxy = False
|
||||
if not proxy_data['web_name']:
|
||||
proxy_data['addr'], proxy_data['CF_addr'], proxy_data['web_name'], \
|
||||
proxy_data['log'] = proxytools.get_proxy_addr(url, forced_proxy='Total')
|
||||
if proxy_data['web_name']:
|
||||
proxy_web = True
|
||||
else:
|
||||
proxy_web = False
|
||||
if proxy_data['addr']:
|
||||
proxy = True
|
||||
proxy_data['dict'] = proxy_data['addr']
|
||||
proxy_data['stat'] = ', Proxy Direct ' + proxy_data['log']
|
||||
|
||||
if proxy_web and proxy_data['web_name']:
|
||||
if opt.get('post', None): proxy_data['log'] = '(POST) ' + proxy_data['log']
|
||||
url, opt['post'], headers_proxy, proxy_data['web_name'] = \
|
||||
proxytools.set_proxy_web(url, proxy_data['web_name'], post=opt.get('post', None))
|
||||
if proxy_data['web_name']:
|
||||
proxy_data['stat'] = ', Proxy Web ' + proxy_data['log']
|
||||
if headers_proxy:
|
||||
request_headers.update(dict(headers_proxy))
|
||||
if proxy_web and not proxy_data['web_name']:
|
||||
proxy_web = False
|
||||
proxy_data['addr'], proxy_data['CF_addr'], proxy_data['web_name'], \
|
||||
proxy_data['log'] = proxytools.get_proxy_addr(url, forced_proxy='Total')
|
||||
if proxy_data['CF_addr']:
|
||||
proxy = True
|
||||
proxy_data['dict'] = proxy_data['CF_addr']
|
||||
proxy_data['stat'] = ', Proxy CF ' + proxy_data['log']
|
||||
elif proxy_data['addr']:
|
||||
proxy = True
|
||||
proxy_data['dict'] = proxy_data['addr']
|
||||
proxy_data['stat'] = ', Proxy Direct ' + proxy_data['log']
|
||||
|
||||
except:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
opt['proxy'] = ''
|
||||
opt['proxy_web'] = ''
|
||||
proxy_data['stat'] = ''
|
||||
proxy_data['addr'] = ''
|
||||
proxy_data['CF_addr'] = ''
|
||||
proxy_data['dict'] = {}
|
||||
proxy_data['web_name'] = ''
|
||||
proxy_data['log'] = ''
|
||||
url = opt['url_save']
|
||||
try:
|
||||
proxy_data['addr']['https'] = str('https://'+ proxy_data['addr']['https'])
|
||||
except:
|
||||
pass
|
||||
return url, proxy_data, opt
|
||||
|
||||
|
||||
def proxy_post_processing(url, proxy_data, response, opt):
|
||||
opt['out_break'] = False
|
||||
try:
|
||||
if ', Proxy Web' in proxy_data.get('stat', ''):
|
||||
import proxytools
|
||||
response["data"] = proxytools.restore_after_proxy_web(response["data"],
|
||||
proxy_data['web_name'], opt['url_save'])
|
||||
if response["data"] == 'ERROR':
|
||||
response['sucess'] = False
|
||||
if response["code"] == 302:
|
||||
proxy_data['stat'] = ', Proxy Direct'
|
||||
opt['forced_proxy'] = 'ProxyDirect'
|
||||
url = opt['url_save']
|
||||
opt['post'] = opt['post_save']
|
||||
response['sucess'] = False
|
||||
|
||||
if proxy_data.get('stat', '') and response['sucess'] == False and \
|
||||
opt.get('proxy_retries_counter', 0) <= opt.get('proxy_retries', 1) and opt.get('count_retries_tot', 5) > 1:
|
||||
import proxytools
|
||||
if ', Proxy Direct' in proxy_data.get('stat', ''):
|
||||
proxytools.get_proxy_list_method(proxy_init='ProxyDirect',
|
||||
error_skip=proxy_data['addr'], url_test=url)
|
||||
elif ', Proxy CF' in proxy_data.get('stat', ''):
|
||||
proxytools.get_proxy_list_method(proxy_init='ProxyCF',
|
||||
error_skip=proxy_data['CF_addr'])
|
||||
url = opt['url_save']
|
||||
elif ', Proxy Web' in proxy_data.get('stat', ''):
|
||||
if channel_proxy_list(opt['url_save'], forced_proxy=proxy_data['web_name']):
|
||||
opt['forced_proxy'] = 'ProxyCF'
|
||||
url =opt['url_save']
|
||||
opt['post'] = opt['post_save']
|
||||
else:
|
||||
proxytools.get_proxy_list_method(proxy_init='ProxyWeb',
|
||||
error_skip=proxy_data['web_name'])
|
||||
url =opt['url_save']
|
||||
opt['post'] = opt['post_save']
|
||||
|
||||
else:
|
||||
opt['out_break'] = True
|
||||
except:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
opt['out_break'] = True
|
||||
|
||||
return response["data"], response['sucess'], url, opt
|
||||
|
||||
|
||||
|
||||
def downloadpage(url, **opt):
|
||||
@@ -410,29 +251,21 @@ def downloadpage(url, **opt):
|
||||
|
||||
"""
|
||||
load_cookies()
|
||||
|
||||
# if scrapertoolsV2.get_domain_from_url(url) in ['www.seriehd.moda', 'wstream.video', 'www.guardaserie.media', 'akvideo.stream','www.piratestreaming.top']: # cloudflare urls
|
||||
# if opt.get('session', False):
|
||||
# session = opt['session'] # same session to speed up search
|
||||
# else:
|
||||
# from lib import cloudscraper
|
||||
# session = cloudscraper.create_scraper()
|
||||
# else:
|
||||
# from lib import requests
|
||||
# session = requests.session()
|
||||
|
||||
if opt.get('session', False):
|
||||
session = opt['session'] # same session to speed up search
|
||||
logger.info('same session')
|
||||
elif opt.get('use_requests', False):
|
||||
from lib import requests
|
||||
session = requests.session()
|
||||
else:
|
||||
if urlparse.urlparse(url).netloc in ['www.guardaserie.media', 'casacinema.space']:
|
||||
from lib import cloudscraper
|
||||
session = cloudscraper.create_scraper()
|
||||
elif opt.get('session', False):
|
||||
session = opt['session'] # same session to speed up search
|
||||
logger.info('same session')
|
||||
elif config.get_setting('resolver_dns') and not opt.get('use_requests', False):
|
||||
from specials import resolverdns
|
||||
session = resolverdns.session()
|
||||
else:
|
||||
from lib import requests
|
||||
session = requests.session()
|
||||
|
||||
# Headers by default, if nothing is specified
|
||||
req_headers = default_headers.copy()
|
||||
verify = opt.get('verify', True)
|
||||
|
||||
# Headers passed as parameters
|
||||
if opt.get('headers', None) is not None:
|
||||
@@ -445,148 +278,132 @@ def downloadpage(url, **opt):
|
||||
req_headers['User-Agent'] = random_useragent()
|
||||
url = urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]")
|
||||
|
||||
opt['proxy_retries_counter'] = 0
|
||||
opt['url_save'] = url
|
||||
opt['post_save'] = opt.get('post', None)
|
||||
|
||||
while opt['proxy_retries_counter'] <= opt.get('proxy_retries', 1):
|
||||
response = {}
|
||||
info_dict = []
|
||||
payload = dict()
|
||||
files = {}
|
||||
file_name = ''
|
||||
opt['proxy_retries_counter'] += 1
|
||||
response = {}
|
||||
info_dict = []
|
||||
payload = dict()
|
||||
files = {}
|
||||
file_name = ''
|
||||
|
||||
session.verify = opt.get('verify', True)
|
||||
session.verify = opt.get('verify', verify)
|
||||
|
||||
if opt.get('cookies', True):
|
||||
session.cookies = cj
|
||||
session.headers.update(req_headers)
|
||||
if opt.get('cookies', True):
|
||||
session.cookies = cj
|
||||
session.headers.update(req_headers)
|
||||
|
||||
# Prepare the url in case you need a proxy, or if proxies are sent from the channel
|
||||
# url, proxy_data, opt = check_proxy(url, **opt)
|
||||
# if opt.get('proxies', None) is not None:
|
||||
# session.proxies = opt['proxies']
|
||||
# elif proxy_data.get('dict', {}):
|
||||
# session.proxies = proxy_data['dict']
|
||||
proxy_data = {'dict': {}}
|
||||
proxy_data = {'dict': {}}
|
||||
|
||||
inicio = time.time()
|
||||
inicio = time.time()
|
||||
|
||||
if opt.get('timeout', None) is None and HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT is not None:
|
||||
opt['timeout'] = HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT
|
||||
if opt['timeout'] == 0: opt['timeout'] = None
|
||||
if opt.get('timeout', None) is None and HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT is not None:
|
||||
opt['timeout'] = HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT
|
||||
if opt['timeout'] == 0: opt['timeout'] = None
|
||||
|
||||
if len(url) > 0:
|
||||
try:
|
||||
if opt.get('post', None) is not None or opt.get('file', None) is not None:
|
||||
if opt.get('post', None) is not None:
|
||||
# Convert string post in dict
|
||||
try:
|
||||
json.loads(opt['post'])
|
||||
payload = opt['post']
|
||||
except:
|
||||
if not isinstance(opt['post'], dict):
|
||||
post = urlparse.parse_qs(opt['post'], keep_blank_values=1)
|
||||
payload = dict()
|
||||
if len(url) > 0:
|
||||
try:
|
||||
if opt.get('post', None) is not None or opt.get('file', None) is not None:
|
||||
if opt.get('post', None) is not None:
|
||||
# Convert string post in dict
|
||||
try:
|
||||
json.loads(opt['post'])
|
||||
payload = opt['post']
|
||||
except:
|
||||
if not isinstance(opt['post'], dict):
|
||||
post = urlparse.parse_qs(opt['post'], keep_blank_values=1)
|
||||
payload = dict()
|
||||
|
||||
for key, value in post.items():
|
||||
try:
|
||||
payload[key] = value[0]
|
||||
except:
|
||||
payload[key] = ''
|
||||
else:
|
||||
payload = opt['post']
|
||||
|
||||
# Verify 'file' and 'file_name' options to upload a buffer or file
|
||||
if opt.get('file', None) is not None:
|
||||
if os.path.isfile(opt['file']):
|
||||
if opt.get('file_name', None) is None:
|
||||
path_file, opt['file_name'] = os.path.split(opt['file'])
|
||||
files = {'file': (opt['file_name'], open(opt['file'], 'rb'))}
|
||||
file_name = opt['file']
|
||||
for key, value in post.items():
|
||||
try:
|
||||
payload[key] = value[0]
|
||||
except:
|
||||
payload[key] = ''
|
||||
else:
|
||||
files = {'file': (opt.get('file_name', 'Default'), opt['file'])}
|
||||
file_name = opt.get('file_name', 'Default') + ', Buffer de memoria'
|
||||
payload = opt['post']
|
||||
|
||||
info_dict = fill_fields_pre(url, opt, proxy_data, file_name)
|
||||
if opt.get('only_headers', False):
|
||||
# Makes the request with HEAD method
|
||||
req = session.head(url, allow_redirects=opt.get('follow_redirects', True),
|
||||
timeout=opt['timeout'])
|
||||
# Verify 'file' and 'file_name' options to upload a buffer or file
|
||||
if opt.get('file', None) is not None:
|
||||
if os.path.isfile(opt['file']):
|
||||
if opt.get('file_name', None) is None:
|
||||
path_file, opt['file_name'] = os.path.split(opt['file'])
|
||||
files = {'file': (opt['file_name'], open(opt['file'], 'rb'))}
|
||||
file_name = opt['file']
|
||||
else:
|
||||
# Makes the request with POST method
|
||||
req = session.post(url, data=payload, allow_redirects=opt.get('follow_redirects', True),
|
||||
files=files, timeout=opt['timeout'])
|
||||
files = {'file': (opt.get('file_name', 'Default'), opt['file'])}
|
||||
file_name = opt.get('file_name', 'Default') + ', Buffer de memoria'
|
||||
|
||||
elif opt.get('only_headers', False):
|
||||
info_dict = fill_fields_pre(url, opt, proxy_data, file_name)
|
||||
info_dict = fill_fields_pre(url, opt, proxy_data, file_name)
|
||||
if opt.get('only_headers', False):
|
||||
# Makes the request with HEAD method
|
||||
req = session.head(url, allow_redirects=opt.get('follow_redirects', True),
|
||||
timeout=opt['timeout'])
|
||||
else:
|
||||
info_dict = fill_fields_pre(url, opt, proxy_data, file_name)
|
||||
# Makes the request with GET method
|
||||
req = session.get(url, allow_redirects=opt.get('follow_redirects', True),
|
||||
timeout=opt['timeout'])
|
||||
# Makes the request with POST method
|
||||
req = session.post(url, data=payload, allow_redirects=opt.get('follow_redirects', True),
|
||||
files=files, timeout=opt['timeout'])
|
||||
|
||||
except Exception as e:
|
||||
from lib import requests
|
||||
if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''):
|
||||
req = requests.Response()
|
||||
response['data'] = ''
|
||||
response['sucess'] = False
|
||||
info_dict.append(('Success', 'False'))
|
||||
response['code'] = str(e)
|
||||
info_dict.append(('Response code', str(e)))
|
||||
info_dict.append(('Finalizado en', time.time() - inicio))
|
||||
if not opt.get('alfa_s', False):
|
||||
show_infobox(info_dict)
|
||||
return type('HTTPResponse', (), response)
|
||||
else:
|
||||
req = requests.Response()
|
||||
req.status_code = str(e)
|
||||
elif opt.get('only_headers', False):
|
||||
info_dict = fill_fields_pre(url, opt, proxy_data, file_name)
|
||||
# Makes the request with HEAD method
|
||||
req = session.head(url, allow_redirects=opt.get('follow_redirects', True),
|
||||
timeout=opt['timeout'])
|
||||
else:
|
||||
info_dict = fill_fields_pre(url, opt, proxy_data, file_name)
|
||||
# Makes the request with GET method
|
||||
req = session.get(url, allow_redirects=opt.get('follow_redirects', True),
|
||||
timeout=opt['timeout'])
|
||||
except Exception as e:
|
||||
from lib import requests
|
||||
if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''):
|
||||
response['data'] = ''
|
||||
response['sucess'] = False
|
||||
info_dict.append(('Success', 'False'))
|
||||
response['code'] = str(e)
|
||||
info_dict.append(('Response code', str(e)))
|
||||
info_dict.append(('Finalizado en', time.time() - inicio))
|
||||
if not opt.get('alfa_s', False):
|
||||
show_infobox(info_dict)
|
||||
return type('HTTPResponse', (), response)
|
||||
else:
|
||||
req = requests.Response()
|
||||
req.status_code = str(e)
|
||||
|
||||
else:
|
||||
response['data'] = ''
|
||||
response['sucess'] = False
|
||||
response['code'] = ''
|
||||
return type('HTTPResponse', (), response)
|
||||
else:
|
||||
response['data'] = ''
|
||||
response['sucess'] = False
|
||||
response['code'] = ''
|
||||
return type('HTTPResponse', (), response)
|
||||
|
||||
response_code = req.status_code
|
||||
response_code = req.status_code
|
||||
|
||||
response['data'] = req.content
|
||||
response['url'] = req.url
|
||||
if not response['data']:
|
||||
response['data'] = ''
|
||||
try:
|
||||
response['json'] = to_utf8(req.json())
|
||||
except:
|
||||
response['json'] = dict()
|
||||
response['code'] = response_code
|
||||
response['headers'] = req.headers
|
||||
response['cookies'] = req.cookies
|
||||
response['data'] = req.content
|
||||
response['url'] = req.url
|
||||
if not response['data']:
|
||||
response['data'] = ''
|
||||
try:
|
||||
response['json'] = to_utf8(req.json())
|
||||
except:
|
||||
response['json'] = dict()
|
||||
response['code'] = response_code
|
||||
response['headers'] = req.headers
|
||||
response['cookies'] = req.cookies
|
||||
|
||||
info_dict, response = fill_fields_post(info_dict, req, response, req_headers, inicio)
|
||||
info_dict, response = fill_fields_post(info_dict, req, response, req_headers, inicio)
|
||||
|
||||
if opt.get('cookies', True):
|
||||
save_cookies(alfa_s=opt.get('alfa_s', False))
|
||||
if opt.get('cookies', True):
|
||||
save_cookies(alfa_s=opt.get('alfa_s', False))
|
||||
|
||||
# is_channel = inspect.getmodule(inspect.currentframe().f_back)
|
||||
# is_channel = scrapertoolsV2.find_single_match(str(is_channel), "<module '(channels).*?'")
|
||||
# if is_channel and isinstance(response_code, int):
|
||||
# if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''):
|
||||
# if response_code > 399:
|
||||
# show_infobox(info_dict)
|
||||
# raise WebErrorException(urlparse.urlparse(url)[1])
|
||||
# is_channel = inspect.getmodule(inspect.currentframe().f_back)
|
||||
# is_channel = scrapertools.find_single_match(str(is_channel), "<module '(channels).*?'")
|
||||
# if is_channel and isinstance(response_code, int):
|
||||
# if not opt.get('ignore_response_code', False) and not proxy_data.get('stat', ''):
|
||||
# if response_code > 399:
|
||||
# show_infobox(info_dict)
|
||||
# raise WebErrorException(urlparse.urlparse(url)[1])
|
||||
|
||||
if not 'api.themoviedb' in url and not opt.get('alfa_s', False):
|
||||
show_infobox(info_dict)
|
||||
|
||||
# If there is a proxy error, refresh the list and retry the number indicated in proxy_retries
|
||||
# response['data'], response['sucess'], url, opt = proxy_post_processing(url, proxy_data, response, opt)
|
||||
# if opt.get('out_break', False):
|
||||
# break
|
||||
if not 'api.themoviedb' in url and not opt.get('alfa_s', False):
|
||||
show_infobox(info_dict)
|
||||
|
||||
return type('HTTPResponse', (), response)
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,27 +1,17 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------------------------------
|
||||
# Scraper tools for reading and processing web elements
|
||||
# Scraper tools v2 for reading and processing web elements
|
||||
# --------------------------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import time
|
||||
|
||||
from core import httptools
|
||||
import urlparse
|
||||
|
||||
from core.entities import html5
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def get_header_from_response(url, header_to_get="", post=None, headers=None):
|
||||
header_to_get = header_to_get.lower()
|
||||
response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True)
|
||||
return response.headers.get(header_to_get)
|
||||
|
||||
|
||||
def read_body_and_headers(url, post=None, headers=None, follow_redirects=False, timeout=None):
|
||||
response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects,
|
||||
timeout=timeout)
|
||||
return response.data, response.headers
|
||||
|
||||
|
||||
def printMatches(matches):
|
||||
i = 0
|
||||
for match in matches:
|
||||
@@ -42,8 +32,37 @@ def find_multiple_matches(text, pattern):
|
||||
return re.findall(pattern, text, re.DOTALL)
|
||||
|
||||
|
||||
def entityunescape(cadena):
|
||||
return unescape(cadena)
|
||||
def find_multiple_matches_groups(text, pattern):
|
||||
r = re.compile(pattern)
|
||||
return [m.groupdict() for m in r.finditer(text)]
|
||||
|
||||
|
||||
# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8
|
||||
def decodeHtmlentities(data):
|
||||
entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)")
|
||||
|
||||
def substitute_entity(match):
|
||||
ent = match.group(2) + match.group(3)
|
||||
res = ""
|
||||
while not ent in html5 and not ent.endswith(";") and match.group(1) != "#":
|
||||
# Excepción para cuando '&' se usa como argumento en la urls contenidas en los datos
|
||||
try:
|
||||
res = ent[-1] + res
|
||||
ent = ent[:-1]
|
||||
except:
|
||||
break
|
||||
|
||||
if match.group(1) == "#":
|
||||
ent = unichr(int(ent.replace(";", "")))
|
||||
return ent.encode('utf-8')
|
||||
else:
|
||||
cp = html5.get(ent)
|
||||
if cp:
|
||||
return cp.decode("unicode-escape").encode('utf-8') + res
|
||||
else:
|
||||
return match.group()
|
||||
|
||||
return entity_re.subn(substitute_entity, data)[0]
|
||||
|
||||
|
||||
def unescape(text):
|
||||
@@ -84,47 +103,6 @@ def unescape(text):
|
||||
# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8
|
||||
|
||||
|
||||
def decodeHtmlentities(string):
|
||||
string = entitiesfix(string)
|
||||
entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8});")
|
||||
|
||||
def substitute_entity(match):
|
||||
from htmlentitydefs import name2codepoint as n2cp
|
||||
ent = match.group(2)
|
||||
if match.group(1) == "#":
|
||||
return unichr(int(ent)).encode('utf-8')
|
||||
else:
|
||||
cp = n2cp.get(ent)
|
||||
|
||||
if cp:
|
||||
return unichr(cp).encode('utf-8')
|
||||
else:
|
||||
return match.group()
|
||||
|
||||
return entity_re.subn(substitute_entity, string)[0]
|
||||
|
||||
|
||||
def entitiesfix(string):
|
||||
# Las entidades comienzan siempre con el símbolo & , y terminan con un punto y coma ( ; ).
|
||||
string = string.replace("á", "á")
|
||||
string = string.replace("é", "é")
|
||||
string = string.replace("í", "í")
|
||||
string = string.replace("ó", "ó")
|
||||
string = string.replace("ú", "ú")
|
||||
string = string.replace("Á", "Á")
|
||||
string = string.replace("É", "É")
|
||||
string = string.replace("Í", "Í")
|
||||
string = string.replace("Ó", "Ó")
|
||||
string = string.replace("Ú", "Ú")
|
||||
string = string.replace("ü", "ü")
|
||||
string = string.replace("Ü", "Ü")
|
||||
string = string.replace("ñ", "ñ")
|
||||
string = string.replace("¿", "¿")
|
||||
string = string.replace("¡", "¡")
|
||||
string = string.replace(";;", ";")
|
||||
return string
|
||||
|
||||
|
||||
def htmlclean(cadena):
|
||||
cadena = re.compile("<!--.*?-->", re.DOTALL).sub("", cadena)
|
||||
|
||||
@@ -226,7 +204,7 @@ def htmlclean(cadena):
|
||||
cadena = re.compile("<link[^>]*>", re.DOTALL).sub("", cadena)
|
||||
|
||||
cadena = cadena.replace("\t", "")
|
||||
cadena = entityunescape(cadena)
|
||||
# cadena = entityunescape(cadena)
|
||||
return cadena
|
||||
|
||||
|
||||
@@ -314,8 +292,8 @@ def remove_show_from_title(title, show):
|
||||
return title
|
||||
|
||||
|
||||
# scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
def get_filename_from_url(url):
|
||||
import urlparse
|
||||
parsed_url = urlparse.urlparse(url)
|
||||
try:
|
||||
filename = parsed_url.path
|
||||
@@ -332,19 +310,18 @@ def get_filename_from_url(url):
|
||||
return filename
|
||||
|
||||
|
||||
# def get_domain_from_url(url):
|
||||
# import urlparse
|
||||
# parsed_url = urlparse.urlparse(url)
|
||||
# try:
|
||||
# filename = parsed_url.netloc
|
||||
# except:
|
||||
# # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path"
|
||||
# if len(parsed_url) >= 4:
|
||||
# filename = parsed_url[1]
|
||||
# else:
|
||||
# filename = ""
|
||||
#
|
||||
# return filename
|
||||
def get_domain_from_url(url):
|
||||
parsed_url = urlparse.urlparse(url)
|
||||
try:
|
||||
filename = parsed_url.netloc
|
||||
except:
|
||||
# Si falla es porque la implementación de parsed_url no reconoce los atributos como "path"
|
||||
if len(parsed_url) >= 4:
|
||||
filename = parsed_url[1]
|
||||
else:
|
||||
filename = ""
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def get_season_and_episode(title):
|
||||
@@ -365,22 +342,15 @@ def get_season_and_episode(title):
|
||||
@return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado
|
||||
"""
|
||||
filename = ""
|
||||
# 4l3x87 - fix for series example 9-1-1
|
||||
# original_title = title
|
||||
# title = title.replace('9-1-1','')
|
||||
|
||||
patrons = ["(\d+)\s*[x-]\s*(\d+)", "(\d+)\s*×\s*(\d+)", "(?:s|t)(\d+)e(\d+)",
|
||||
"(?:season|temp|stagione\w*)\s*(\d+)\s*(?:capitulo|epi|episode|episodio\w*)\s*(\d+)"]
|
||||
patrons = ["(\d+)x(\d+)", "(?:s|t)(\d+)e(\d+)",
|
||||
"(?:season|temp\w*)\s*(\d+)\s*(?:capitulo|epi\w*)\s*(\d+)"]
|
||||
|
||||
for patron in patrons:
|
||||
try:
|
||||
matches = re.compile(patron, re.I).search(title)
|
||||
|
||||
if matches:
|
||||
if len(matches.group(1)) == 1:
|
||||
filename = matches.group(1) + "x" + matches.group(2).zfill(2)
|
||||
else:
|
||||
filename = matches.group(1).lstrip('0') + "x" + matches.group(2).zfill(2)
|
||||
filename = matches.group(1) + "x" + matches.group(2).zfill(2)
|
||||
break
|
||||
except:
|
||||
pass
|
||||
@@ -388,3 +358,27 @@ def get_season_and_episode(title):
|
||||
logger.info("'" + title + "' -> '" + filename + "'")
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def get_sha1(cadena):
|
||||
try:
|
||||
import hashlib
|
||||
devuelve = hashlib.sha1(cadena).hexdigest()
|
||||
except:
|
||||
import sha
|
||||
import binascii
|
||||
devuelve = binascii.hexlify(sha.new(cadena).digest())
|
||||
|
||||
return devuelve
|
||||
|
||||
|
||||
def get_md5(cadena):
|
||||
try:
|
||||
import hashlib
|
||||
devuelve = hashlib.md5(cadena).hexdigest()
|
||||
except:
|
||||
import md5
|
||||
import binascii
|
||||
devuelve = binascii.hexlify(md5.new(cadena).digest())
|
||||
|
||||
return devuelve
|
||||
|
||||
@@ -1,346 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------------------------------
|
||||
# Scraper tools v2 for reading and processing web elements
|
||||
# --------------------------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import time
|
||||
|
||||
import urlparse
|
||||
|
||||
from core.entities import html5
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def printMatches(matches):
|
||||
i = 0
|
||||
for match in matches:
|
||||
logger.info("%d %s" % (i, match))
|
||||
i = i + 1
|
||||
|
||||
|
||||
def find_single_match(data, patron, index=0):
|
||||
try:
|
||||
matches = re.findall(patron, data, flags=re.DOTALL)
|
||||
return matches[index]
|
||||
except:
|
||||
return ""
|
||||
|
||||
|
||||
# Parse string and extracts multiple matches using regular expressions
|
||||
def find_multiple_matches(text, pattern):
|
||||
return re.findall(pattern, text, re.DOTALL)
|
||||
|
||||
|
||||
def find_multiple_matches_groups(text, pattern):
|
||||
r = re.compile(pattern)
|
||||
return [m.groupdict() for m in r.finditer(text)]
|
||||
|
||||
|
||||
# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8
|
||||
def decodeHtmlentities(data):
|
||||
entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)")
|
||||
|
||||
def substitute_entity(match):
|
||||
ent = match.group(2) + match.group(3)
|
||||
res = ""
|
||||
while not ent in html5 and not ent.endswith(";") and match.group(1) != "#":
|
||||
# Excepción para cuando '&' se usa como argumento en la urls contenidas en los datos
|
||||
try:
|
||||
res = ent[-1] + res
|
||||
ent = ent[:-1]
|
||||
except:
|
||||
break
|
||||
|
||||
if match.group(1) == "#":
|
||||
ent = unichr(int(ent.replace(";", "")))
|
||||
return ent.encode('utf-8')
|
||||
else:
|
||||
cp = html5.get(ent)
|
||||
if cp:
|
||||
return cp.decode("unicode-escape").encode('utf-8') + res
|
||||
else:
|
||||
return match.group()
|
||||
|
||||
return entity_re.subn(substitute_entity, data)[0]
|
||||
|
||||
|
||||
def htmlclean(cadena):
|
||||
cadena = re.compile("<!--.*?-->", re.DOTALL).sub("", cadena)
|
||||
|
||||
cadena = cadena.replace("<center>", "")
|
||||
cadena = cadena.replace("</center>", "")
|
||||
cadena = cadena.replace("<cite>", "")
|
||||
cadena = cadena.replace("</cite>", "")
|
||||
cadena = cadena.replace("<em>", "")
|
||||
cadena = cadena.replace("</em>", "")
|
||||
cadena = cadena.replace("<u>", "")
|
||||
cadena = cadena.replace("</u>", "")
|
||||
cadena = cadena.replace("<li>", "")
|
||||
cadena = cadena.replace("</li>", "")
|
||||
cadena = cadena.replace("<turl>", "")
|
||||
cadena = cadena.replace("</tbody>", "")
|
||||
cadena = cadena.replace("<tr>", "")
|
||||
cadena = cadena.replace("</tr>", "")
|
||||
cadena = cadena.replace("<![CDATA[", "")
|
||||
cadena = cadena.replace("<wbr>", "")
|
||||
cadena = cadena.replace("<Br />", " ")
|
||||
cadena = cadena.replace("<BR />", " ")
|
||||
cadena = cadena.replace("<Br>", " ")
|
||||
cadena = re.compile("<br[^>]*>", re.DOTALL).sub(" ", cadena)
|
||||
|
||||
cadena = re.compile("<script.*?</script>", re.DOTALL).sub("", cadena)
|
||||
|
||||
cadena = re.compile("<option[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</option>", "")
|
||||
|
||||
cadena = re.compile("<button[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</button>", "")
|
||||
|
||||
cadena = re.compile("<i[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</iframe>", "")
|
||||
cadena = cadena.replace("</i>", "")
|
||||
|
||||
cadena = re.compile("<table[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</table>", "")
|
||||
|
||||
cadena = re.compile("<td[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</td>", "")
|
||||
|
||||
cadena = re.compile("<div[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</div>", "")
|
||||
|
||||
cadena = re.compile("<dd[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</dd>", "")
|
||||
|
||||
cadena = re.compile("<b[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</b>", "")
|
||||
|
||||
cadena = re.compile("<font[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</font>", "")
|
||||
|
||||
cadena = re.compile("<strong[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</strong>", "")
|
||||
|
||||
cadena = re.compile("<small[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</small>", "")
|
||||
|
||||
cadena = re.compile("<span[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</span>", "")
|
||||
|
||||
cadena = re.compile("<a[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</a>", "")
|
||||
|
||||
cadena = re.compile("<p[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</p>", "")
|
||||
|
||||
cadena = re.compile("<ul[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</ul>", "")
|
||||
|
||||
cadena = re.compile("<h1[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</h1>", "")
|
||||
|
||||
cadena = re.compile("<h2[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</h2>", "")
|
||||
|
||||
cadena = re.compile("<h3[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</h3>", "")
|
||||
|
||||
cadena = re.compile("<h4[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</h4>", "")
|
||||
|
||||
cadena = re.compile("<!--[^-]+-->", re.DOTALL).sub("", cadena)
|
||||
|
||||
cadena = re.compile("<img[^>]*>", re.DOTALL).sub("", cadena)
|
||||
|
||||
cadena = re.compile("<object[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</object>", "")
|
||||
cadena = re.compile("<param[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</param>", "")
|
||||
cadena = re.compile("<embed[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</embed>", "")
|
||||
|
||||
cadena = re.compile("<title[^>]*>", re.DOTALL).sub("", cadena)
|
||||
cadena = cadena.replace("</title>", "")
|
||||
|
||||
cadena = re.compile("<link[^>]*>", re.DOTALL).sub("", cadena)
|
||||
|
||||
cadena = cadena.replace("\t", "")
|
||||
# cadena = entityunescape(cadena)
|
||||
return cadena
|
||||
|
||||
|
||||
def slugify(title):
|
||||
# print title
|
||||
|
||||
# Sustituye acentos y eñes
|
||||
title = title.replace("Á", "a")
|
||||
title = title.replace("É", "e")
|
||||
title = title.replace("Í", "i")
|
||||
title = title.replace("Ó", "o")
|
||||
title = title.replace("Ú", "u")
|
||||
title = title.replace("á", "a")
|
||||
title = title.replace("é", "e")
|
||||
title = title.replace("í", "i")
|
||||
title = title.replace("ó", "o")
|
||||
title = title.replace("ú", "u")
|
||||
title = title.replace("À", "a")
|
||||
title = title.replace("È", "e")
|
||||
title = title.replace("Ì", "i")
|
||||
title = title.replace("Ò", "o")
|
||||
title = title.replace("Ù", "u")
|
||||
title = title.replace("à", "a")
|
||||
title = title.replace("è", "e")
|
||||
title = title.replace("ì", "i")
|
||||
title = title.replace("ò", "o")
|
||||
title = title.replace("ù", "u")
|
||||
title = title.replace("ç", "c")
|
||||
title = title.replace("Ç", "C")
|
||||
title = title.replace("Ñ", "n")
|
||||
title = title.replace("ñ", "n")
|
||||
title = title.replace("/", "-")
|
||||
title = title.replace("&", "&")
|
||||
|
||||
# Pasa a minúsculas
|
||||
title = title.lower().strip()
|
||||
|
||||
# Elimina caracteres no válidos
|
||||
validchars = "abcdefghijklmnopqrstuvwxyz1234567890- "
|
||||
title = ''.join(c for c in title if c in validchars)
|
||||
|
||||
# Sustituye espacios en blanco duplicados y saltos de línea
|
||||
title = re.compile("\s+", re.DOTALL).sub(" ", title)
|
||||
|
||||
# Sustituye espacios en blanco por guiones
|
||||
title = re.compile("\s", re.DOTALL).sub("-", title.strip())
|
||||
|
||||
# Sustituye espacios en blanco duplicados y saltos de línea
|
||||
title = re.compile("\-+", re.DOTALL).sub("-", title)
|
||||
|
||||
# Arregla casos especiales
|
||||
if title.startswith("-"):
|
||||
title = title[1:]
|
||||
|
||||
if title == "":
|
||||
title = "-" + str(time.time())
|
||||
|
||||
return title
|
||||
|
||||
|
||||
def remove_htmltags(string):
|
||||
return re.sub('<[^<]+?>', '', string)
|
||||
|
||||
|
||||
def remove_show_from_title(title, show):
|
||||
# print slugify(title)+" == "+slugify(show)
|
||||
# Quita el nombre del programa del título
|
||||
if slugify(title).startswith(slugify(show)):
|
||||
|
||||
# Convierte a unicode primero, o el encoding se pierde
|
||||
title = unicode(title, "utf-8", "replace")
|
||||
show = unicode(show, "utf-8", "replace")
|
||||
title = title[len(show):].strip()
|
||||
|
||||
if title.startswith("-"):
|
||||
title = title[1:].strip()
|
||||
|
||||
if title == "":
|
||||
title = str(time.time())
|
||||
|
||||
# Vuelve a utf-8
|
||||
title = title.encode("utf-8", "ignore")
|
||||
show = show.encode("utf-8", "ignore")
|
||||
|
||||
return title
|
||||
|
||||
|
||||
# scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
def get_filename_from_url(url):
|
||||
parsed_url = urlparse.urlparse(url)
|
||||
try:
|
||||
filename = parsed_url.path
|
||||
except:
|
||||
# Si falla es porque la implementación de parsed_url no reconoce los atributos como "path"
|
||||
if len(parsed_url) >= 4:
|
||||
filename = parsed_url[2]
|
||||
else:
|
||||
filename = ""
|
||||
|
||||
if "/" in filename:
|
||||
filename = filename.split("/")[-1]
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def get_domain_from_url(url):
|
||||
parsed_url = urlparse.urlparse(url)
|
||||
try:
|
||||
filename = parsed_url.netloc
|
||||
except:
|
||||
# Si falla es porque la implementación de parsed_url no reconoce los atributos como "path"
|
||||
if len(parsed_url) >= 4:
|
||||
filename = parsed_url[1]
|
||||
else:
|
||||
filename = ""
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def get_season_and_episode(title):
|
||||
"""
|
||||
Retorna el numero de temporada y de episodio en formato "1x01" obtenido del titulo de un episodio
|
||||
Ejemplos de diferentes valores para title y su valor devuelto:
|
||||
"serie 101x1.strm", "s101e1.avi", "t101e1.avi" -> '101x01'
|
||||
"Name TvShow 1x6.avi" -> '1x06'
|
||||
"Temp 3 episodio 2.avi" -> '3x02'
|
||||
"Alcantara season 13 episodie 12.avi" -> '13x12'
|
||||
"Temp1 capitulo 14" -> '1x14'
|
||||
"Temporada 1: El origen Episodio 9" -> '' (entre el numero de temporada y los episodios no puede haber otro texto)
|
||||
"Episodio 25: titulo episodio" -> '' (no existe el numero de temporada)
|
||||
"Serie X Temporada 1" -> '' (no existe el numero del episodio)
|
||||
@type title: str
|
||||
@param title: titulo del episodio de una serie
|
||||
@rtype: str
|
||||
@return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado
|
||||
"""
|
||||
filename = ""
|
||||
|
||||
patrons = ["(\d+)x(\d+)", "(?:s|t)(\d+)e(\d+)",
|
||||
"(?:season|temp\w*)\s*(\d+)\s*(?:capitulo|epi\w*)\s*(\d+)"]
|
||||
|
||||
for patron in patrons:
|
||||
try:
|
||||
matches = re.compile(patron, re.I).search(title)
|
||||
if matches:
|
||||
filename = matches.group(1) + "x" + matches.group(2).zfill(2)
|
||||
break
|
||||
except:
|
||||
pass
|
||||
|
||||
logger.info("'" + title + "' -> '" + filename + "'")
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def get_sha1(cadena):
|
||||
try:
|
||||
import hashlib
|
||||
devuelve = hashlib.sha1(cadena).hexdigest()
|
||||
except:
|
||||
import sha
|
||||
import binascii
|
||||
devuelve = binascii.hexlify(sha.new(cadena).digest())
|
||||
|
||||
return devuelve
|
||||
|
||||
|
||||
def get_md5(cadena):
|
||||
try:
|
||||
import hashlib
|
||||
devuelve = hashlib.md5(cadena).hexdigest()
|
||||
except:
|
||||
import md5
|
||||
import binascii
|
||||
devuelve = binascii.hexlify(md5.new(cadena).digest())
|
||||
|
||||
return devuelve
|
||||
@@ -506,8 +506,8 @@ def get_server_json(server_name):
|
||||
|
||||
|
||||
def get_server_host(server_name):
|
||||
from core import scrapertoolsV2
|
||||
return [scrapertoolsV2.get_domain_from_url(pattern['url']) for pattern in get_server_json(server_name)['find_videos']['patterns']]
|
||||
from core import scrapertools
|
||||
return [scrapertools.get_domain_from_url(pattern['url']) for pattern in get_server_json(server_name)['find_videos']['patterns']]
|
||||
|
||||
|
||||
def get_server_controls_settings(server_name):
|
||||
|
||||
@@ -10,7 +10,7 @@ import urlparse
|
||||
import xbmcaddon
|
||||
|
||||
from channelselector import thumb
|
||||
from core import httptools, scrapertoolsV2, servertools, tmdb, channeltools
|
||||
from core import httptools, scrapertools, servertools, tmdb, channeltools
|
||||
from core.item import Item
|
||||
from lib import unshortenit
|
||||
from platformcode import logger, config
|
||||
@@ -21,7 +21,7 @@ def hdpass_get_servers(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data.replace('\n', '')
|
||||
patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
|
||||
url = scrapertoolsV2.find_single_match(data, patron).replace("?alta", "")
|
||||
url = scrapertools.find_single_match(data, patron).replace("?alta", "")
|
||||
url = url.replace("&download=1", "")
|
||||
if 'https' not in url:
|
||||
url = 'https:' + url
|
||||
@@ -37,20 +37,20 @@ def hdpass_get_servers(item):
|
||||
patron_mir = '<div class="row mobileMirrs">(.*?)</div>'
|
||||
patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed"\s*value="([^"]+)"\s*/>'
|
||||
|
||||
res = scrapertoolsV2.find_single_match(data, patron_res)
|
||||
res = scrapertools.find_single_match(data, patron_res)
|
||||
|
||||
itemlist = []
|
||||
|
||||
for res_url, res_video in scrapertoolsV2.find_multiple_matches(res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):
|
||||
for res_url, res_video in scrapertools.find_multiple_matches(res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):
|
||||
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, res_url)).data.replace('\n', '')
|
||||
|
||||
mir = scrapertoolsV2.find_single_match(data, patron_mir)
|
||||
mir = scrapertools.find_single_match(data, patron_mir)
|
||||
|
||||
for mir_url, srv in scrapertoolsV2.find_multiple_matches(mir, '<option.*?value="([^"]+?)">([^<]+?)</value>'):
|
||||
for mir_url, srv in scrapertools.find_multiple_matches(mir, '<option.*?value="([^"]+?)">([^<]+?)</value>'):
|
||||
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, mir_url)).data.replace('\n', '')
|
||||
for media_label, media_url in scrapertoolsV2.find_multiple_matches(data, patron_media):
|
||||
for media_label, media_url in scrapertools.find_multiple_matches(data, patron_media):
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="play",
|
||||
fulltitle=item.fulltitle,
|
||||
@@ -168,13 +168,13 @@ def scrapeLang(scraped, lang, longtitle):
|
||||
return language, longtitle
|
||||
|
||||
def cleantitle(title):
|
||||
cleantitle = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(title).replace('"', "'").replace('×', 'x').replace('–', '-')).strip()
|
||||
cleantitle = scrapertools.htmlclean(scrapertools.decodeHtmlentities(title).replace('"', "'").replace('×', 'x').replace('–', '-')).strip()
|
||||
return cleantitle
|
||||
|
||||
def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, typeContentDict, typeActionDict, blacklist, search, pag, function, lang):
|
||||
itemlist = []
|
||||
log("scrapeBlock qui", block, patron)
|
||||
matches = scrapertoolsV2.find_multiple_matches_groups(block, patron)
|
||||
log("scrapeBlock qui")
|
||||
matches = scrapertools.find_multiple_matches_groups(block, patron)
|
||||
log('MATCHES =', matches)
|
||||
|
||||
if debug:
|
||||
@@ -214,7 +214,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
|
||||
for kk in known_keys:
|
||||
val = match[listGroups.index(kk)] if kk in listGroups else ''
|
||||
if val and (kk == "url" or kk == 'thumb') and 'http' not in val:
|
||||
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
|
||||
val = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
|
||||
scraped[kk] = val
|
||||
|
||||
if scraped['season']:
|
||||
@@ -227,7 +227,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
|
||||
episode = ''
|
||||
else:
|
||||
episode = re.sub(r'\s-\s|-|x|–|×|×', 'x', scraped['episode']) if scraped['episode'] else ''
|
||||
second_episode = scrapertoolsV2.find_single_match(episode,'x\d+x(\d+)')
|
||||
second_episode = scrapertools.find_single_match(episode, 'x\d+x(\d+)')
|
||||
if second_episode: episode = re.sub(r'(\d+x\d+)x\d+',r'\1-', episode) + second_episode.zfill(2)
|
||||
|
||||
#episode = re.sub(r'\s-\s|-|x|–|×', 'x', scraped['episode']) if scraped['episode'] else ''
|
||||
@@ -257,18 +257,18 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
|
||||
if scraped["plot"]:
|
||||
infolabels['plot'] = plot
|
||||
if scraped['duration']:
|
||||
matches = scrapertoolsV2.find_multiple_matches(scraped['duration'],
|
||||
matches = scrapertools.find_multiple_matches(scraped['duration'],
|
||||
r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
|
||||
for h, m in matches:
|
||||
scraped['duration'] = int(h) * 60 + int(m)
|
||||
if not matches:
|
||||
scraped['duration'] = scrapertoolsV2.find_single_match(scraped['duration'], r'(\d+)')
|
||||
scraped['duration'] = scrapertools.find_single_match(scraped['duration'], r'(\d+)')
|
||||
infolabels['duration'] = int(scraped['duration']) * 60
|
||||
if scraped['genere']:
|
||||
genres = scrapertoolsV2.find_multiple_matches(scraped['genere'], '[A-Za-z]+')
|
||||
genres = scrapertools.find_multiple_matches(scraped['genere'], '[A-Za-z]+')
|
||||
infolabels['genere'] = ", ".join(genres)
|
||||
if scraped["rating"]:
|
||||
infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scraped["rating"])
|
||||
infolabels['rating'] = scrapertools.decodeHtmlentities(scraped["rating"])
|
||||
|
||||
AC = CT = ''
|
||||
if typeContentDict:
|
||||
@@ -377,7 +377,18 @@ def scrape(func):
|
||||
|
||||
log('PATRON= ', patron)
|
||||
if not data:
|
||||
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session).data.replace("'", '"')
|
||||
page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True, session=item.session)
|
||||
# if url may be changed and channel has findhost to update
|
||||
if (not page.data or scrapertools.get_domain_from_url(page.url) != scrapertools.get_domain_from_url(item.url)) and 'findhost' in func.__globals__:
|
||||
host = func.__globals__['findhost']()
|
||||
parse = list(urlparse.urlparse(item.url))
|
||||
from core import jsontools
|
||||
jsontools.update_node(host, func.__module__.split('.')[-1], 'url')
|
||||
parse[1] = scrapertools.get_domain_from_url(host)
|
||||
item.url = urlparse.urlunparse(parse)
|
||||
page = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True,
|
||||
session=item.session)
|
||||
data = page.data.replace("'", '"')
|
||||
data = re.sub('\n|\t', ' ', data)
|
||||
data = re.sub(r'>\s+<', '> <', data)
|
||||
# replace all ' with " and eliminate newline, so we don't need to worry about
|
||||
@@ -385,7 +396,7 @@ def scrape(func):
|
||||
if patronBlock:
|
||||
if debugBlock:
|
||||
regexDbg(item, patronBlock, headers, data)
|
||||
blocks = scrapertoolsV2.find_multiple_matches_groups(data, patronBlock)
|
||||
blocks = scrapertools.find_multiple_matches_groups(data, patronBlock)
|
||||
block = ""
|
||||
for bl in blocks:
|
||||
# log(len(blocks),bl)
|
||||
@@ -434,7 +445,7 @@ def scrape(func):
|
||||
if anime:
|
||||
if function == 'episodios' or item.action == 'episodios': autorenumber.renumber(itemlist, item, 'bold')
|
||||
else: autorenumber.renumber(itemlist)
|
||||
if anime and autorenumber.check(item) == False and not scrapertoolsV2.find_single_match(itemlist[0].title, r'(\d+.\d+)'):
|
||||
if anime and autorenumber.check(item) == False and not scrapertools.find_single_match(itemlist[0].title, r'(\d+.\d+)'):
|
||||
pass
|
||||
else:
|
||||
if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
|
||||
@@ -462,7 +473,7 @@ def dooplay_get_links(item, host):
|
||||
|
||||
data = httptools.downloadpage(item.url).data.replace("'", '"')
|
||||
patron = r'<li id="player-option-[0-9]".*?data-type="([^"]+)" data-post="([^"]+)" data-nume="([^"]+)".*?<span class="title".*?>([^<>]+)</span>(?:<span class="server">([^<>]+))?'
|
||||
matches = scrapertoolsV2.find_multiple_matches(data, patron)
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
ret = []
|
||||
|
||||
@@ -474,7 +485,7 @@ def dooplay_get_links(item, host):
|
||||
"type": type
|
||||
})
|
||||
dataAdmin = httptools.downloadpage(host + '/wp-admin/admin-ajax.php', post=postData,headers={'Referer': item.url}).data
|
||||
link = scrapertoolsV2.find_single_match(dataAdmin, "<iframe.*src='([^']+)'")
|
||||
link = scrapertools.find_single_match(dataAdmin, "<iframe.*src='([^']+)'")
|
||||
ret.append({
|
||||
'url': link,
|
||||
'title': title,
|
||||
@@ -551,25 +562,25 @@ def swzz_get_url(item):
|
||||
if "/link/" in item.url:
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
if "link =" in data:
|
||||
data = scrapertoolsV2.find_single_match(data, 'link = "([^"]+)"')
|
||||
data = scrapertools.find_single_match(data, 'link = "([^"]+)"')
|
||||
if 'http' not in data:
|
||||
data = 'https:' + data
|
||||
else:
|
||||
match = scrapertoolsV2.find_single_match(data, r'<meta name="og:url" content="([^"]+)"')
|
||||
match = scrapertoolsV2.find_single_match(data, r'URL=([^"]+)">') if not match else match
|
||||
match = scrapertools.find_single_match(data, r'<meta name="og:url" content="([^"]+)"')
|
||||
match = scrapertools.find_single_match(data, r'URL=([^"]+)">') if not match else match
|
||||
|
||||
if not match:
|
||||
from lib import jsunpack
|
||||
|
||||
try:
|
||||
data = scrapertoolsV2.find_single_match(data.replace('\n', ''), r"(eval\s?\(function\(p,a,c,k,e,d.*?)</script>")
|
||||
data = scrapertools.find_single_match(data.replace('\n', ''), r"(eval\s?\(function\(p,a,c,k,e,d.*?)</script>")
|
||||
data = jsunpack.unpack(data)
|
||||
|
||||
logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
|
||||
except:
|
||||
logger.debug("##### The content is yet unpacked ##\n%s\n##" % data)
|
||||
|
||||
data = scrapertoolsV2.find_single_match(data, r'var link(?:\s)?=(?:\s)?"([^"]+)";')
|
||||
data = scrapertools.find_single_match(data, r'var link(?:\s)?=(?:\s)?"([^"]+)";')
|
||||
data, c = unshortenit.unwrap_30x_only(data)
|
||||
else:
|
||||
data = match
|
||||
@@ -626,8 +637,8 @@ def menu(func):
|
||||
|
||||
item = args['item']
|
||||
host = func.__globals__['host']
|
||||
list_servers = func.__globals__['list_servers']
|
||||
list_quality = func.__globals__['list_quality']
|
||||
list_servers = func.__globals__['list_servers'] if 'list_servers' in func.__globals__ else 'directo'
|
||||
list_quality = func.__globals__['list_quality'] if 'list_quality' in func.__globals__ else 'default'
|
||||
filename = func.__module__.split('.')[1]
|
||||
global_search = False
|
||||
# listUrls = ['film', 'filmSub', 'tvshow', 'tvshowSub', 'anime', 'animeSub', 'search', 'top', 'topSub']
|
||||
@@ -744,7 +755,7 @@ def typo(string, typography=''):
|
||||
if 'submenu' in string:
|
||||
string = u"\u2022\u2022 ".encode('utf-8') + re.sub(r'\ssubmenu','',string)
|
||||
if 'color' in string:
|
||||
color = scrapertoolsV2.find_single_match(string,'color ([a-z]+)')
|
||||
color = scrapertools.find_single_match(string, 'color ([a-z]+)')
|
||||
if color == 'kod' or '': color = kod_color
|
||||
string = '[COLOR '+ color +']' + re.sub(r'\scolor\s([a-z]+)','',string) + '[/COLOR]'
|
||||
if 'bold' in string:
|
||||
@@ -776,13 +787,13 @@ def match(item, patron='', patronBlock='', headers='', url='', post=''):
|
||||
log('DATA= ', data)
|
||||
|
||||
if patronBlock:
|
||||
block = scrapertoolsV2.find_single_match(data, patronBlock)
|
||||
block = scrapertools.find_single_match(data, patronBlock)
|
||||
log('BLOCK= ',block)
|
||||
else:
|
||||
block = data
|
||||
|
||||
if patron:
|
||||
matches = scrapertoolsV2.find_multiple_matches(block, patron)
|
||||
matches = scrapertools.find_multiple_matches(block, patron)
|
||||
log('MATCHES= ',matches)
|
||||
|
||||
return matches, block
|
||||
@@ -890,12 +901,12 @@ def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page=
|
||||
# If the call is direct, leave it blank
|
||||
action = inspect.stack()[function_or_level][3] if type(function_or_level) == int else function_or_level
|
||||
if next_page == '':
|
||||
next_page = scrapertoolsV2.find_single_match(data, patron)
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if next_page != "":
|
||||
if resub: next_page = re.sub(resub[0], resub[1], next_page)
|
||||
if 'http' not in next_page:
|
||||
next_page = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page
|
||||
next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page
|
||||
next_page = re.sub('&', '&',next_page)
|
||||
log('NEXT= ', next_page)
|
||||
itemlist.append(
|
||||
@@ -970,6 +981,7 @@ def controls(itemlist, item, AutoPlay=True, CheckLinks=True, down_load=True):
|
||||
channel_node = autoplay_node.get(item.channel, {})
|
||||
settings_node = channel_node.get('settings', {})
|
||||
AP = get_setting('autoplay') or settings_node['active']
|
||||
APS = get_setting('autoplay_server_list')
|
||||
|
||||
if CL and not AP:
|
||||
if get_setting('checklinks', item.channel):
|
||||
@@ -982,15 +994,27 @@ def controls(itemlist, item, AutoPlay=True, CheckLinks=True, down_load=True):
|
||||
checklinks_number = get_setting('checklinks_number')
|
||||
itemlist = servertools.check_list_links(itemlist, checklinks_number)
|
||||
|
||||
if AutoPlay == True and inspect.stack()[4][3] != 'start_download':
|
||||
if AutoPlay == True and not 'downloads' in inspect.stack()[3][1] + inspect.stack()[4][1]:
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if item.contentChannel != 'videolibrary': videolibrary(itemlist, item, function_level=3)
|
||||
if get_setting('downloadenabled') and down_load == True: download(itemlist, item, function_level=3)
|
||||
return itemlist
|
||||
|
||||
VL = False
|
||||
try:
|
||||
if 'downloads' in inspect.stack()[3][1] + inspect.stack()[4][1] or \
|
||||
inspect.stack()[4][3] == 'play_from_library' or \
|
||||
inspect.stack()[5][3] == 'play_from_library' or \
|
||||
'videolibrary' in inspect.stack()[3][1] or \
|
||||
'videolibrary' in inspect.stack()[4][1]:
|
||||
VL = True
|
||||
except:
|
||||
pass
|
||||
if not AP or VL or not APS:
|
||||
return itemlist
|
||||
|
||||
def filterLang(item, itemlist):
|
||||
import channeltools
|
||||
# import channeltools
|
||||
list_language = channeltools.get_lang(item.channel)
|
||||
if len(list_language) > 1:
|
||||
from specials import filtertools
|
||||
|
||||
@@ -5,17 +5,19 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
from threading import Thread
|
||||
|
||||
import xbmc
|
||||
from platformcode import config, logger
|
||||
import ssl
|
||||
logger.info(ssl.OPENSSL_VERSION)
|
||||
|
||||
logger.info("init...")
|
||||
|
||||
librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib'))
|
||||
sys.path.insert(0, librerias)
|
||||
|
||||
if not config.dev_mode():
|
||||
from platformcode import updater
|
||||
Thread(target=updater.timer())
|
||||
|
||||
from platformcode import launcher
|
||||
|
||||
|
||||
77
lib/doh.py
Normal file
77
lib/doh.py
Normal file
@@ -0,0 +1,77 @@
|
||||
# https://github.com/stamparm/python-doh
|
||||
from __future__ import print_function
|
||||
|
||||
import json
|
||||
import re
|
||||
import socket
|
||||
import ssl
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
PY3 = sys.version_info >= (3, 0)
|
||||
|
||||
if hasattr(ssl, "_create_unverified_context"):
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
DOH_SERVER = "1.1.1.1" # Note: to prevent potential blocking of service based on DNS name
|
||||
else:
|
||||
DOH_SERVER = "cloudflare-dns.com" # Alternative servers: doh.securedns.eu, doh-de.blahdns.com, doh-jp.blahdns.com
|
||||
|
||||
if PY3:
|
||||
import urllib.request
|
||||
_urlopen = urllib.request.urlopen
|
||||
_Request = urllib.request.Request
|
||||
else:
|
||||
import urllib2
|
||||
_urlopen = urllib2.urlopen
|
||||
_Request = urllib2.Request
|
||||
|
||||
def query(name, type='A', server=DOH_SERVER, path="/dns-query", fallback=True, verbose=False):
|
||||
"""
|
||||
Returns domain name query results retrieved by using DNS over HTTPS protocol
|
||||
# Reference: https://developers.cloudflare.com/1.1.1.1/dns-over-https/json-format/
|
||||
>>> query("one.one.one.one", fallback=False)
|
||||
['1.0.0.1', '1.1.1.1']
|
||||
>>> query("one", "NS")
|
||||
['a.nic.one.', 'b.nic.one.', 'c.nic.one.', 'd.nic.one.']
|
||||
"""
|
||||
|
||||
retval = None
|
||||
|
||||
try:
|
||||
req = _Request("https://%s%s?name=%s&type=%s" % (server, path, name, type), headers={"Accept": "application/dns-json"})
|
||||
content = _urlopen(req).read().decode()
|
||||
reply = json.loads(content)
|
||||
|
||||
if "Answer" in reply:
|
||||
answer = json.loads(content)["Answer"]
|
||||
retval = [_["data"] for _ in answer]
|
||||
else:
|
||||
retval = []
|
||||
except Exception as ex:
|
||||
if verbose:
|
||||
print("Exception occurred: '%s'" % ex)
|
||||
|
||||
if retval is None and fallback:
|
||||
if type == 'A':
|
||||
try:
|
||||
retval = socket.gethostbyname_ex(name)[2]
|
||||
except (socket.error, IndexError):
|
||||
pass
|
||||
|
||||
if retval is None:
|
||||
process = subprocess.Popen(("nslookup", "-q=%s" % type, name), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
content = (process.communicate()[0] or "").decode().replace("\r", "")
|
||||
|
||||
if "\n\n" in content and "can't" not in content.lower():
|
||||
answer = content.split("\n\n", 1)[-1]
|
||||
retval = re.findall(r"(?m)^%s.+= ([^=,\n]+)$" % re.escape(name), answer) or re.findall(r"Address: (.+)", answer)
|
||||
|
||||
if not retval:
|
||||
match = re.search(r"Addresses: ([\s\d.]+)", answer)
|
||||
if match:
|
||||
retval = re.split(r"\s+", match.group(1).strip())
|
||||
|
||||
if not PY3 and retval:
|
||||
retval = [_.encode() for _ in retval]
|
||||
|
||||
return retval
|
||||
315
lib/gktools.py
315
lib/gktools.py
@@ -1,315 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
gktools son un conjunto de funciones para ayudar a resolver enlaces a videos con "protección GK".
|
||||
Lo de protección gk dudo que exista, le he llamado así pq los primeros ejemplos vistos se eran gkpluginsphp y gkpedia.
|
||||
|
||||
Características "GK" :
|
||||
- Utiliza una cookie __cfduid
|
||||
- Calcula un token criptográfico en función de un texto y una clave
|
||||
- El texto se saca del html (por ejemplo de meta name="google-site-verification", pero puede ser más complejo)
|
||||
- La clave para encriptar se calcula en js ofuscados que carga el html
|
||||
- Se llama a otra url con una serie de parámetros, como el token, y de allí se obtienen los videos finales.
|
||||
|
||||
Howto:
|
||||
1- descargar página
|
||||
2- extraer datos y calcular los necesarios
|
||||
3- descargar segunda página con el token calculado
|
||||
4- extraer videos
|
||||
|
||||
El paso 2 es con diferencia el más variable y depende mucho de cada web/servidor!
|
||||
Desofuscando los js se pueden ver los datos propios que necesita cada uno
|
||||
(el texto a encriptar, la clave a usar, la url dónde hay que llamar y los parámetros)
|
||||
|
||||
Ver ejemplos en el código de los canales animeyt y pelispedia
|
||||
|
||||
|
||||
Created for Alfa-addon by Alfa Developers Team 2018
|
||||
'''
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
import os, base64, json, hashlib, urlparse
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
from aadecode import decode as aadecode
|
||||
|
||||
# Descarga página y captura la petición de cookie
|
||||
# -----------------------------------------------
|
||||
def get_data_and_cookie(item, ck_name='__cfduid'):
|
||||
|
||||
headers = {'Referer': item.referer}
|
||||
resp = httptools.downloadpage(item.url, headers=headers, cookies=False)
|
||||
# ~ with open('gk_play1.html', 'w') as f: f.write(resp.data); f.close()
|
||||
|
||||
ck_value = ''
|
||||
if ck_name != '':
|
||||
for h in resp.headers:
|
||||
ck = scrapertools.find_single_match(resp.headers[h], '%s=([^;]*)' % ck_name)
|
||||
if ck:
|
||||
ck_value = ck
|
||||
break
|
||||
|
||||
return resp.data, ck_value
|
||||
|
||||
|
||||
# Descarga página usando una cookie concreta
|
||||
# ------------------------------------------
|
||||
def get_data_with_cookie(url, ck_value='', referer='', ck_name='__cfduid'):
|
||||
|
||||
headers = {'Cookie': ck_name+'='+ck_value}
|
||||
if referer != '': headers['referer'] = referer
|
||||
data = httptools.downloadpage(url, headers=headers, cookies=False).data
|
||||
# ~ with open('gk_play2.html', 'w') as f: f.write(data); f.close()
|
||||
|
||||
return data
|
||||
|
||||
|
||||
# Descarga json usando una cookie concreta
|
||||
# ----------------------------------------
|
||||
def get_data_json(url, post, ck_value='', referer='', ck_name='__cfduid'):
|
||||
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': ck_name+'='+ck_value}
|
||||
if referer != '': headers['referer'] = referer
|
||||
|
||||
data = httptools.downloadpage(url, post=post, headers=headers, cookies=False).data
|
||||
# ~ with open('gk_play3.html', 'w') as f: f.write(data); f.close()
|
||||
|
||||
return data
|
||||
|
||||
|
||||
# Obtiene link de una llamada javascript Play() o de la url
|
||||
# ---------------------------------------------------------
|
||||
def get_play_link_id(data, url):
|
||||
|
||||
playparms = scrapertools.find_single_match(data, 'Play\("([^"]*)","([^"]*)","([^"]*)"')
|
||||
if playparms:
|
||||
link = playparms[0]
|
||||
subtitle = '' if playparms[1] == '' or playparms[2] == '' else playparms[2] + playparms[1] + '.srt'
|
||||
else:
|
||||
subtitle = ''
|
||||
link = scrapertools.find_single_match(data, 'Play\("([^"]*)"')
|
||||
if not link:
|
||||
link = scrapertools.find_single_match(url, 'id=([^;]*)')
|
||||
|
||||
return link, subtitle
|
||||
|
||||
|
||||
# Extraer enlaces a videos de datos json
|
||||
# --------------------------------------
|
||||
def extraer_enlaces_json(data, referer, subtitle=''):
|
||||
itemlist = []
|
||||
|
||||
# Ejemplos:
|
||||
# {"Animeyt":[{"file":"https:\/\/storage.googleapis.com\/my-project-yt-195318.appspot.com\/slow.mp4","type":"mp4","label":"1080p"}]}
|
||||
# {"link":[{"link":"http:\/\/video8.narusaku.tv\/static\/720p\/2.1208982.2039540?md5=B64FKYNbFuWvxkGcSbtz2Q&expires=1528839657","label":"720p","type":"mp4"},{"link":"http:\/\/video5.narusaku.tv\/static\/480p\/2.1208982.2039540?md5=yhLG_3VghEUSd5YlCXOTBQ&expires=1528839657","label":"480p","type":"mp4","default":true},{"link":"http:\/\/video3.narusaku.tv\/static\/360p\/2.1208982.2039540?md5=vC0ZJkxRwV1rVBdeF7D4iA&expires=1528839657","label":"360p","type":"mp4"},{"link":"http:\/\/video2.narusaku.tv\/static\/240p\/2.1208982.2039540?md5=b-y_-rgrLMW7hJwFQSD8Tw&expires=1528839657","label":"240p","type":"mp4"}]}
|
||||
# {"link":"https:\/\/storage.googleapis.com\/cloudflare-caching-pelispedia.appspot.com\/cache\/16050.mp4","type":"mp4"}
|
||||
# {"Harbinger":[{"Harbinger":"...","type":"...","label":"..."}], ...}
|
||||
|
||||
data = data.replace('"Harbinger"', '"file"')
|
||||
|
||||
# Intentar como json
|
||||
# ------------------
|
||||
try:
|
||||
json_data = json.loads(data)
|
||||
enlaces = analizar_enlaces_json(json_data)
|
||||
for enlace in enlaces:
|
||||
url = enlace['link'] if 'link' in enlace else enlace['file']
|
||||
if not url.startswith('http'): url = aadecode(base64.b64decode(url)) # necesario para "Harbinger"
|
||||
if not url.startswith('http'): url = decode_rijndael(url) # post-"Harbinger" en algunos casos
|
||||
tit = ''
|
||||
if 'type' in enlace: tit += '[%s]' % enlace['type']
|
||||
if 'label' in enlace: tit += '[%s]' % enlace['label']
|
||||
if tit == '': tit = '.mp4'
|
||||
|
||||
itemlist.append([tit, corregir_url(url, referer), 0, subtitle])
|
||||
|
||||
# Sino, intentar como texto
|
||||
# -------------------------
|
||||
except:
|
||||
matches = scrapertools.find_multiple_matches(data, '"link"\s*:\s*"([^"]*)"\s*,\s*"label"\s*:\s*"([^"]*)"\s*,\s*"type"\s*:\s*"([^"]*)"')
|
||||
if matches:
|
||||
for url, lbl, typ in matches:
|
||||
itemlist.append(['[%s][%s]' % (typ, lbl), corregir_url(url, referer), 0, subtitle])
|
||||
else:
|
||||
url = scrapertools.find_single_match(data, '"link"\s*:\s*"([^"]*)"')
|
||||
if url:
|
||||
itemlist.append(['.mp4', corregir_url(url, referer), 0, subtitle])
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# Función recursiva que busca videos en un diccionario
|
||||
# ----------------------------------------------------
|
||||
def analizar_enlaces_json(d):
|
||||
itemlist = []
|
||||
found = {}
|
||||
for k, v in d.iteritems():
|
||||
if k in ['file','link','type','label'] and not isinstance(v, list):
|
||||
found[k] = v
|
||||
|
||||
if isinstance(v, list):
|
||||
for l in v:
|
||||
if isinstance(l, dict): itemlist += analizar_enlaces_json(l)
|
||||
|
||||
if 'file' in found or 'link' in found:
|
||||
itemlist.append(found)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# Correcciones en las urls finales obtenidas
|
||||
# ------------------------------------------
|
||||
def corregir_url(url, referer):
|
||||
url = url.replace('\/', '/')
|
||||
if 'chomikuj.pl/' in url: url += "|Referer=%s" % referer
|
||||
return url
|
||||
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
# Conversion tipo hexa que hay en el js
|
||||
# -------------------------------------
|
||||
def toHex(txt):
|
||||
ret = ''
|
||||
for i in range(len(txt)):
|
||||
ret += str(hex(ord(txt[i]))).replace('x','')[-2:]
|
||||
return ret
|
||||
|
||||
|
||||
# Subrutinas de encriptación
|
||||
# --------------------------
|
||||
|
||||
def md5_dominio(url): # sutorimux/kubechi
|
||||
h = hashlib.md5(urlparse.urlparse(url).netloc)
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def transforma_gsv(gsv, valor):
|
||||
llista = range(256)
|
||||
a = 0
|
||||
for i in range(256):
|
||||
a = (a + llista[i] + ord(gsv[i % len(gsv)]) ) % 256
|
||||
b = llista[i]
|
||||
llista[i] = llista[a]
|
||||
llista[a] = b
|
||||
|
||||
ret = ''
|
||||
a = 0; b= 0
|
||||
for i in range(len(valor)):
|
||||
a = (a + 1) % 256
|
||||
b = (b + llista[a]) % 256
|
||||
c = llista[a]
|
||||
llista[a] = llista[b]
|
||||
llista[b] = c
|
||||
ret += chr(ord(valor[i]) ^ llista[(llista[a] + llista[b]) % 256])
|
||||
|
||||
return base64.b64encode(ret)
|
||||
|
||||
|
||||
|
||||
# Codificar/Decodificar con Rijndael
|
||||
# ----------------------------------
|
||||
|
||||
def encode_rijndael(msg, IV, key):
|
||||
import rijndael
|
||||
return rijndael.cbc_encrypt(msg, IV, key)
|
||||
|
||||
|
||||
def decode_rijndael(txt, preIV='b3512f4972d314da9', key='3e1a854e7d5835ab99d99a29afec8bbb'):
|
||||
import rijndael
|
||||
msg = base64.b64decode(txt[:-15])
|
||||
IV = preIV + txt[-15:]
|
||||
deco = rijndael.cbc_decrypt(msg, IV, key)
|
||||
return deco.replace(chr(0), '')
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
# Generar un token válido a partir de un texto y una clave
|
||||
# --------------------------------------------------------
|
||||
|
||||
# gsv: google-site-verification, obtenido de '<meta name="google-site-verification" content="([^"]*)"'
|
||||
# pwd: Password
|
||||
def generar_token(gsv, pwd):
|
||||
txt = obtener_cripto(pwd, gsv)
|
||||
# ~ logger.info('Texto pre token %s' % txt)
|
||||
|
||||
_0x382d28 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
|
||||
|
||||
valors = [0, 0, 0]
|
||||
cicle = 0
|
||||
retorn = ''
|
||||
for ch in txt:
|
||||
valors[cicle] = ord(ch)
|
||||
cicle += 1
|
||||
if cicle == 3:
|
||||
primer = _0x382d28[valors[0] >> 0x2]
|
||||
segon = _0x382d28[((valors[0] & 0x3) << 0x4) | (valors[1] >> 0x4)]
|
||||
tercer = _0x382d28[((valors[1] & 0xf) << 0x2) | (valors[2] >> 0x6)]
|
||||
quart = _0x382d28[valors[2] & 0x3f]
|
||||
retorn += primer + segon + tercer + quart
|
||||
|
||||
valors = [0, 0, 0]
|
||||
cicle = 0
|
||||
|
||||
return retorn
|
||||
|
||||
|
||||
def obtener_cripto(password, plaintext):
|
||||
salt = os.urandom(8)
|
||||
|
||||
paddingLength = len(plaintext) % 16
|
||||
if paddingLength == 0:
|
||||
paddedPlaintext = plaintext
|
||||
else:
|
||||
dif = 16 - paddingLength
|
||||
paddedPlaintext = plaintext + chr(dif)*dif
|
||||
|
||||
kdf = evpKDF(password, salt)
|
||||
iv = kdf['iv']
|
||||
|
||||
try: # Intentar con librería AES del sistema
|
||||
from Crypto.Cipher import AES
|
||||
cipherSpec = AES.new(kdf['key'], AES.MODE_CBC, iv)
|
||||
except: # Si falla intentar con librería del addon
|
||||
import jscrypto
|
||||
cipherSpec = jscrypto.new(kdf['key'], jscrypto.MODE_CBC, iv)
|
||||
ciphertext = cipherSpec.encrypt(paddedPlaintext)
|
||||
|
||||
return json.dumps({'ct': base64.b64encode(ciphertext), 'iv': iv.encode("hex"), 's': salt.encode("hex")}, sort_keys=True, separators=(',', ':'))
|
||||
|
||||
|
||||
def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"):
|
||||
target_key_size = key_size + iv_size
|
||||
derived_bytes = ""
|
||||
number_of_derived_words = 0
|
||||
block = None
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
while number_of_derived_words < target_key_size:
|
||||
if block is not None:
|
||||
hasher.update(block)
|
||||
|
||||
hasher.update(passwd)
|
||||
hasher.update(salt)
|
||||
block = hasher.digest()
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
|
||||
for i in range(1, iterations):
|
||||
hasher.update(block)
|
||||
block = hasher.digest()
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
|
||||
derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)]
|
||||
|
||||
number_of_derived_words += len(block)/4
|
||||
|
||||
return {
|
||||
"key": derived_bytes[0: key_size * 4],
|
||||
"iv": derived_bytes[key_size * 4:]
|
||||
}
|
||||
312
lib/jjdecode.py
312
lib/jjdecode.py
@@ -1,312 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Python version of the jjdecode function written by Syed Zainudeen
|
||||
# http://csc.cs.utm.my/syed/images/files/jjdecode/jjdecode.html
|
||||
#
|
||||
# +NCR/CRC! [ReVeRsEr] - crackinglandia@gmail.com
|
||||
# Thanks to Jose Miguel Esparza (@EternalTodo) for the final push to make it work!
|
||||
#
|
||||
|
||||
import re
|
||||
|
||||
class JJDecoder(object):
|
||||
|
||||
def __init__(self, jj_encoded_data):
|
||||
self.encoded_str = jj_encoded_data
|
||||
|
||||
|
||||
def clean(self):
|
||||
return re.sub('^\s+|\s+$', '', self.encoded_str)
|
||||
|
||||
|
||||
def checkPalindrome(self, Str):
|
||||
startpos = -1
|
||||
endpos = -1
|
||||
gv, gvl = -1, -1
|
||||
|
||||
index = Str.find('"\'\\"+\'+",')
|
||||
|
||||
if index == 0:
|
||||
startpos = Str.find('$$+"\\""+') + 8
|
||||
endpos = Str.find('"\\"")())()')
|
||||
gv = Str[Str.find('"\'\\"+\'+",')+9:Str.find('=~[]')]
|
||||
gvl = len(gv)
|
||||
else:
|
||||
gv = Str[0:Str.find('=')]
|
||||
gvl = len(gv)
|
||||
startpos = Str.find('"\\""+') + 5
|
||||
endpos = Str.find('"\\"")())()')
|
||||
|
||||
return (startpos, endpos, gv, gvl)
|
||||
|
||||
|
||||
def decode(self):
|
||||
|
||||
self.encoded_str = self.clean()
|
||||
startpos, endpos, gv, gvl = self.checkPalindrome(self.encoded_str)
|
||||
|
||||
if startpos == endpos:
|
||||
raise Exception('No data!')
|
||||
|
||||
data = self.encoded_str[startpos:endpos]
|
||||
|
||||
b = ['___+', '__$+', '_$_+', '_$$+', '$__+', '$_$+', '$$_+', '$$$+', '$___+', '$__$+', '$_$_+', '$_$$+', '$$__+', '$$_$+', '$$$_+', '$$$$+']
|
||||
|
||||
str_l = '(![]+"")[' + gv + '._$_]+'
|
||||
str_o = gv + '._$+'
|
||||
str_t = gv + '.__+'
|
||||
str_u = gv + '._+'
|
||||
|
||||
str_hex = gv + '.'
|
||||
|
||||
str_s = '"'
|
||||
gvsig = gv + '.'
|
||||
|
||||
str_quote = '\\\\\\"'
|
||||
str_slash = '\\\\\\\\'
|
||||
|
||||
str_lower = '\\\\"+'
|
||||
str_upper = '\\\\"+' + gv + '._+'
|
||||
|
||||
str_end = '"+'
|
||||
|
||||
out = ''
|
||||
while data != '':
|
||||
# l o t u
|
||||
if data.find(str_l) == 0:
|
||||
data = data[len(str_l):]
|
||||
out += 'l'
|
||||
continue
|
||||
elif data.find(str_o) == 0:
|
||||
data = data[len(str_o):]
|
||||
out += 'o'
|
||||
continue
|
||||
elif data.find(str_t) == 0:
|
||||
data = data[len(str_t):]
|
||||
out += 't'
|
||||
continue
|
||||
elif data.find(str_u) == 0:
|
||||
data = data[len(str_u):]
|
||||
out += 'u'
|
||||
continue
|
||||
|
||||
# 0123456789abcdef
|
||||
if data.find(str_hex) == 0:
|
||||
data = data[len(str_hex):]
|
||||
|
||||
for i in range(len(b)):
|
||||
if data.find(b[i]) == 0:
|
||||
data = data[len(b[i]):]
|
||||
out += '%x' % i
|
||||
break
|
||||
continue
|
||||
|
||||
# start of s block
|
||||
if data.find(str_s) == 0:
|
||||
data = data[len(str_s):]
|
||||
|
||||
# check if "R
|
||||
if data.find(str_upper) == 0: # r4 n >= 128
|
||||
data = data[len(str_upper):] # skip sig
|
||||
ch_str = ''
|
||||
for i in range(2): # shouldn't be more than 2 hex chars
|
||||
# gv + "."+b[ c ]
|
||||
if data.find(gvsig) == 0:
|
||||
data = data[len(gvsig):]
|
||||
for k in range(len(b)): # for every entry in b
|
||||
if data.find(b[k]) == 0:
|
||||
data = data[len(b[k]):]
|
||||
ch_str = '%x' % k
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
out += chr(int(ch_str, 16))
|
||||
continue
|
||||
|
||||
elif data.find(str_lower) == 0: # r3 check if "R // n < 128
|
||||
data = data[len(str_lower):] # skip sig
|
||||
|
||||
ch_str = ''
|
||||
ch_lotux = ''
|
||||
temp = ''
|
||||
b_checkR1 = 0
|
||||
for j in range(3): # shouldn't be more than 3 octal chars
|
||||
if j > 1: # lotu check
|
||||
if data.find(str_l) == 0:
|
||||
data = data[len(str_l):]
|
||||
ch_lotux = 'l'
|
||||
break
|
||||
elif data.find(str_o) == 0:
|
||||
data = data[len(str_o):]
|
||||
ch_lotux = 'o'
|
||||
break
|
||||
elif data.find(str_t) == 0:
|
||||
data = data[len(str_t):]
|
||||
ch_lotux = 't'
|
||||
break
|
||||
elif data.find(str_u) == 0:
|
||||
data = data[len(str_u):]
|
||||
ch_lotux = 'u'
|
||||
break
|
||||
|
||||
# gv + "."+b[ c ]
|
||||
if data.find(gvsig) == 0:
|
||||
temp = data[len(gvsig):]
|
||||
for k in range(8): # for every entry in b octal
|
||||
if temp.find(b[k]) == 0:
|
||||
if int(ch_str + str(k), 8) > 128:
|
||||
b_checkR1 = 1
|
||||
break
|
||||
|
||||
ch_str += str(k)
|
||||
data = data[len(gvsig):] # skip gvsig
|
||||
data = data[len(b[k]):]
|
||||
break
|
||||
|
||||
if b_checkR1 == 1:
|
||||
if data.find(str_hex) == 0: # 0123456789abcdef
|
||||
data = data[len(str_hex):]
|
||||
# check every element of hex decode string for a match
|
||||
for i in range(len(b)):
|
||||
if data.find(b[i]) == 0:
|
||||
data = data[len(b[i]):]
|
||||
ch_lotux = '%x' % i
|
||||
break
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
out += chr(int(ch_str,8)) + ch_lotux
|
||||
continue
|
||||
|
||||
else: # "S ----> "SR or "S+
|
||||
# if there is, loop s until R 0r +
|
||||
# if there is no matching s block, throw error
|
||||
|
||||
match = 0;
|
||||
n = None
|
||||
|
||||
# searching for matching pure s block
|
||||
while True:
|
||||
n = ord(data[0])
|
||||
if data.find(str_quote) == 0:
|
||||
data = data[len(str_quote):]
|
||||
out += '"'
|
||||
match += 1
|
||||
continue
|
||||
elif data.find(str_slash) == 0:
|
||||
data = data[len(str_slash):]
|
||||
out += '\\'
|
||||
match += 1
|
||||
continue
|
||||
elif data.find(str_end) == 0: # reached end off S block ? +
|
||||
if match == 0:
|
||||
raise '+ no match S block: ' + data
|
||||
data = data[len(str_end):]
|
||||
break # step out of the while loop
|
||||
elif data.find(str_upper) == 0: # r4 reached end off S block ? - check if "R n >= 128
|
||||
if match == 0:
|
||||
raise 'no match S block n>128: ' + data
|
||||
data = data[len(str_upper):] # skip sig
|
||||
|
||||
ch_str = ''
|
||||
ch_lotux = ''
|
||||
|
||||
for j in range(10): # shouldn't be more than 10 hex chars
|
||||
if j > 1: # lotu check
|
||||
if data.find(str_l) == 0:
|
||||
data = data[len(str_l):]
|
||||
ch_lotux = 'l'
|
||||
break
|
||||
elif data.find(str_o) == 0:
|
||||
data = data[len(str_o):]
|
||||
ch_lotux = 'o'
|
||||
break
|
||||
elif data.find(str_t) == 0:
|
||||
data = data[len(str_t):]
|
||||
ch_lotux = 't'
|
||||
break
|
||||
elif data.find(str_u) == 0:
|
||||
data = data[len(str_u):]
|
||||
ch_lotux = 'u'
|
||||
break
|
||||
|
||||
# gv + "."+b[ c ]
|
||||
if data.find(gvsig) == 0:
|
||||
data = data[len(gvsig):] # skip gvsig
|
||||
for k in range(len(b)): # for every entry in b
|
||||
if data.find(b[k]) == 0:
|
||||
data = data[len(b[k]):]
|
||||
ch_str += '%x' % k
|
||||
break
|
||||
else:
|
||||
break # done
|
||||
out += chr(int(ch_str, 16))
|
||||
break # step out of the while loop
|
||||
elif data.find(str_lower) == 0: # r3 check if "R // n < 128
|
||||
if match == 0:
|
||||
raise 'no match S block n<128: ' + data
|
||||
|
||||
data = data[len(str_lower):] # skip sig
|
||||
|
||||
ch_str = ''
|
||||
ch_lotux = ''
|
||||
temp = ''
|
||||
b_checkR1 = 0
|
||||
|
||||
for j in range(3): # shouldn't be more than 3 octal chars
|
||||
if j > 1: # lotu check
|
||||
if data.find(str_l) == 0:
|
||||
data = data[len(str_l):]
|
||||
ch_lotux = 'l'
|
||||
break
|
||||
elif data.find(str_o) == 0:
|
||||
data = data[len(str_o):]
|
||||
ch_lotux = 'o'
|
||||
break
|
||||
elif data.find(str_t) == 0:
|
||||
data = data[len(str_t):]
|
||||
ch_lotux = 't'
|
||||
break
|
||||
elif data.find(str_u) == 0:
|
||||
data = data[len(str_u):]
|
||||
ch_lotux = 'u'
|
||||
break
|
||||
|
||||
# gv + "."+b[ c ]
|
||||
if data.find(gvsig) == 0:
|
||||
temp = data[len(gvsig):]
|
||||
for k in range(8): # for every entry in b octal
|
||||
if temp.find(b[k]) == 0:
|
||||
if int(ch_str + str(k), 8) > 128:
|
||||
b_checkR1 = 1
|
||||
break
|
||||
|
||||
ch_str += str(k)
|
||||
data = data[len(gvsig):] # skip gvsig
|
||||
data = data[len(b[k]):]
|
||||
break
|
||||
|
||||
if b_checkR1 == 1:
|
||||
if data.find(str_hex) == 0: # 0123456789abcdef
|
||||
data = data[len(str_hex):]
|
||||
# check every element of hex decode string for a match
|
||||
for i in range(len(b)):
|
||||
if data.find(b[i]) == 0:
|
||||
data = data[len(b[i]):]
|
||||
ch_lotux = '%x' % i
|
||||
break
|
||||
else:
|
||||
break
|
||||
out += chr(int(ch_str, 8)) + ch_lotux
|
||||
break # step out of the while loop
|
||||
elif (0x21 <= n and n <= 0x2f) or (0x3A <= n and n <= 0x40) or ( 0x5b <= n and n <= 0x60 ) or ( 0x7b <= n and n <= 0x7f ):
|
||||
out += data[0]
|
||||
data = data[1:]
|
||||
match += 1
|
||||
continue
|
||||
print 'No match : ' + data
|
||||
break
|
||||
return out
|
||||
@@ -5,6 +5,7 @@ import re
|
||||
from .translators.friendly_nodes import REGEXP_CONVERTER
|
||||
from .utils.injector import fix_js_args
|
||||
from types import FunctionType, ModuleType, GeneratorType, BuiltinFunctionType, MethodType, BuiltinMethodType
|
||||
from math import floor, log10
|
||||
import traceback
|
||||
try:
|
||||
import numpy
|
||||
@@ -603,15 +604,7 @@ class PyJs(object):
|
||||
elif typ == 'Boolean':
|
||||
return Js('true') if self.value else Js('false')
|
||||
elif typ == 'Number': #or self.Class=='Number':
|
||||
if self.is_nan():
|
||||
return Js('NaN')
|
||||
elif self.is_infinity():
|
||||
sign = '-' if self.value < 0 else ''
|
||||
return Js(sign + 'Infinity')
|
||||
elif isinstance(self.value,
|
||||
long) or self.value.is_integer(): # dont print .0
|
||||
return Js(unicode(int(self.value)))
|
||||
return Js(unicode(self.value)) # accurate enough
|
||||
return Js(unicode(js_dtoa(self.value)))
|
||||
elif typ == 'String':
|
||||
return self
|
||||
else: #object
|
||||
@@ -1046,7 +1039,7 @@ def PyJsComma(a, b):
|
||||
return b
|
||||
|
||||
|
||||
from .internals.simplex import JsException as PyJsException
|
||||
from .internals.simplex import JsException as PyJsException, js_dtoa
|
||||
import pyjsparser
|
||||
pyjsparser.parser.ENABLE_JS2PY_ERRORS = lambda msg: MakeError('SyntaxError', msg)
|
||||
|
||||
|
||||
@@ -116,36 +116,52 @@ def eval_js(js):
|
||||
|
||||
|
||||
def eval_js6(js):
|
||||
"""Just like eval_js but with experimental support for js6 via babel."""
|
||||
return eval_js(js6_to_js5(js))
|
||||
|
||||
|
||||
def translate_js6(js):
|
||||
"""Just like translate_js but with experimental support for js6 via babel."""
|
||||
return translate_js(js6_to_js5(js))
|
||||
|
||||
|
||||
class EvalJs(object):
|
||||
"""This class supports continuous execution of javascript under same context.
|
||||
|
||||
>>> js = EvalJs()
|
||||
>>> js.execute('var a = 10;function f(x) {return x*x};')
|
||||
>>> js.f(9)
|
||||
>>> ctx = EvalJs()
|
||||
>>> ctx.execute('var a = 10;function f(x) {return x*x};')
|
||||
>>> ctx.f(9)
|
||||
81
|
||||
>>> js.a
|
||||
>>> ctx.a
|
||||
10
|
||||
|
||||
context is a python dict or object that contains python variables that should be available to JavaScript
|
||||
For example:
|
||||
>>> js = EvalJs({'a': 30})
|
||||
>>> js.execute('var x = a')
|
||||
>>> js.x
|
||||
>>> ctx = EvalJs({'a': 30})
|
||||
>>> ctx.execute('var x = a')
|
||||
>>> ctx.x
|
||||
30
|
||||
|
||||
You can enable JS require function via enable_require. With this feature enabled you can use js modules
|
||||
from npm, for example:
|
||||
>>> ctx = EvalJs(enable_require=True)
|
||||
>>> ctx.execute("var esprima = require('esprima');")
|
||||
>>> ctx.execute("esprima.parse('var a = 1')")
|
||||
|
||||
You can run interactive javascript console with console method!"""
|
||||
|
||||
def __init__(self, context={}):
|
||||
def __init__(self, context={}, enable_require=False):
|
||||
self.__dict__['_context'] = {}
|
||||
exec (DEFAULT_HEADER, self._context)
|
||||
self.__dict__['_var'] = self._context['var'].to_python()
|
||||
|
||||
if enable_require:
|
||||
def _js_require_impl(npm_module_name):
|
||||
from .node_import import require
|
||||
from .base import to_python
|
||||
return require(to_python(npm_module_name), context=self._context)
|
||||
setattr(self._var, 'require', _js_require_impl)
|
||||
|
||||
if not isinstance(context, dict):
|
||||
try:
|
||||
context = context.__dict__
|
||||
|
||||
@@ -7,7 +7,7 @@ from ..byte_trans import ByteCodeGenerator, Code
|
||||
|
||||
def Function(this, args):
|
||||
# convert arguments to python list of strings
|
||||
a = map(to_string, tuple(args))
|
||||
a = list(map(to_string, tuple(args)))
|
||||
_body = u';'
|
||||
_args = ()
|
||||
if len(a):
|
||||
@@ -42,6 +42,7 @@ def executable_code(code_str, space, global_context=True):
|
||||
space.byte_generator.emit('LABEL', skip)
|
||||
space.byte_generator.emit('NOP')
|
||||
space.byte_generator.restore_state()
|
||||
|
||||
space.byte_generator.exe.compile(
|
||||
start_loc=old_tape_len
|
||||
) # dont read the code from the beginning, dont be stupid!
|
||||
@@ -71,5 +72,5 @@ def _eval(this, args):
|
||||
|
||||
|
||||
def log(this, args):
|
||||
print ' '.join(map(to_string, args))
|
||||
print(' '.join(map(to_string, args)))
|
||||
return undefined
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
from ..conversions import *
|
||||
from ..func_utils import *
|
||||
from jsregexp import RegExpExec
|
||||
from .jsregexp import RegExpExec
|
||||
|
||||
DIGS = set(u'0123456789')
|
||||
WHITE = u"\u0009\u000A\u000B\u000C\u000D\u0020\u00A0\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u2028\u2029\u202F\u205F\u3000\uFEFF"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
__all__ = ['require']
|
||||
import subprocess, os, codecs, glob
|
||||
from .evaljs import translate_js
|
||||
from .evaljs import translate_js, DEFAULT_HEADER
|
||||
import six
|
||||
DID_INIT = False
|
||||
DIRNAME = os.path.dirname(os.path.abspath(__file__))
|
||||
@@ -15,7 +15,7 @@ def _init():
|
||||
'node -v', shell=True, cwd=DIRNAME
|
||||
) == 0, 'You must have node installed! run: brew install node'
|
||||
assert subprocess.call(
|
||||
'cd %s;npm install babel-core babel-cli babel-preset-es2015 babel-polyfill babelify browserify'
|
||||
'cd %s;npm install babel-core babel-cli babel-preset-es2015 babel-polyfill babelify browserify browserify-shim'
|
||||
% repr(DIRNAME),
|
||||
shell=True,
|
||||
cwd=DIRNAME) == 0, 'Could not link required node_modules'
|
||||
@@ -46,12 +46,18 @@ GET_FROM_GLOBALS_FUNC = '''
|
||||
|
||||
'''
|
||||
|
||||
def _get_module_py_name(module_name):
|
||||
return module_name.replace('-', '_')
|
||||
|
||||
def require(module_name, include_polyfill=False, update=False):
|
||||
def _get_module_var_name(module_name):
|
||||
return _get_module_py_name(module_name).rpartition('/')[-1]
|
||||
|
||||
|
||||
def _get_and_translate_npm_module(module_name, include_polyfill=False, update=False):
|
||||
assert isinstance(module_name, str), 'module_name must be a string!'
|
||||
py_name = module_name.replace('-', '_')
|
||||
py_name = _get_module_py_name(module_name)
|
||||
module_filename = '%s.py' % py_name
|
||||
var_name = py_name.rpartition('/')[-1]
|
||||
var_name = _get_module_var_name(module_name)
|
||||
if not os.path.exists(os.path.join(PY_NODE_MODULES_PATH,
|
||||
module_filename)) or update:
|
||||
_init()
|
||||
@@ -77,7 +83,7 @@ def require(module_name, include_polyfill=False, update=False):
|
||||
|
||||
# convert the module
|
||||
assert subprocess.call(
|
||||
'''node -e "(require('browserify')('./%s').bundle(function (err,data) {fs.writeFile('%s', require('babel-core').transform(data, {'presets': require('babel-preset-es2015')}).code, ()=>{});}))"'''
|
||||
'''node -e "(require('browserify')('./%s').bundle(function (err,data) {if (err) {console.log(err);throw new Error(err);};fs.writeFile('%s', require('babel-core').transform(data, {'presets': require('babel-preset-es2015')}).code, ()=>{});}))"'''
|
||||
% (in_file_name, out_file_name),
|
||||
shell=True,
|
||||
cwd=DIRNAME,
|
||||
@@ -88,7 +94,8 @@ def require(module_name, include_polyfill=False, update=False):
|
||||
"utf-8") as f:
|
||||
js_code = f.read()
|
||||
os.remove(os.path.join(DIRNAME, out_file_name))
|
||||
|
||||
if len(js_code) < 50:
|
||||
raise RuntimeError("Candidate JS bundle too short - likely browserify issue.")
|
||||
js_code += GET_FROM_GLOBALS_FUNC
|
||||
js_code += ';var %s = getFromGlobals(%s);%s' % (
|
||||
var_name, repr(module_name), var_name)
|
||||
@@ -107,7 +114,32 @@ def require(module_name, include_polyfill=False, update=False):
|
||||
os.path.join(PY_NODE_MODULES_PATH, module_filename), "r",
|
||||
"utf-8") as f:
|
||||
py_code = f.read()
|
||||
return py_code
|
||||
|
||||
context = {}
|
||||
|
||||
def require(module_name, include_polyfill=False, update=False, context=None):
|
||||
"""
|
||||
Installs the provided npm module, exports a js bundle via browserify, converts to ECMA 5.1 via babel and
|
||||
finally translates the generated JS bundle to Python via Js2Py.
|
||||
Returns a pure python object that behaves like the installed module. Nice!
|
||||
|
||||
:param module_name: Name of the npm module to require. For example 'esprima'.
|
||||
:param include_polyfill: Whether the babel-polyfill should be included as part of the translation. May be needed
|
||||
for some modules that use unsupported features.
|
||||
:param update: Whether to force update the translation. Otherwise uses a cached version if exists.
|
||||
:param context: Optional context in which the translated module should be executed in. If provided, the
|
||||
header (js2py imports) will be skipped as it is assumed that the context already has all the necessary imports.
|
||||
:return: The JsObjectWrapper containing the translated module object. Can be used like a standard python object.
|
||||
"""
|
||||
py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=update)
|
||||
# this is a bit hacky but we need to strip the default header from the generated code...
|
||||
if context is not None:
|
||||
if not py_code.startswith(DEFAULT_HEADER):
|
||||
# new header version? retranslate...
|
||||
assert not update, "Unexpected header."
|
||||
py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=True)
|
||||
assert py_code.startswith(DEFAULT_HEADER), "Unexpected header."
|
||||
py_code = py_code[len(DEFAULT_HEADER):]
|
||||
context = {} if context is None else context
|
||||
exec (py_code, context)
|
||||
return context['var'][var_name].to_py()
|
||||
return context['var'][_get_module_var_name(module_name)].to_py()
|
||||
|
||||
@@ -6,8 +6,6 @@ if six.PY3:
|
||||
xrange = range
|
||||
unicode = str
|
||||
|
||||
# todo fix apply and bind
|
||||
|
||||
|
||||
class FunctionPrototype:
|
||||
def toString():
|
||||
@@ -41,6 +39,7 @@ class FunctionPrototype:
|
||||
return this.call(obj, args)
|
||||
|
||||
def bind(thisArg):
|
||||
arguments_ = arguments
|
||||
target = this
|
||||
if not target.is_callable():
|
||||
raise this.MakeError(
|
||||
@@ -48,5 +47,5 @@ class FunctionPrototype:
|
||||
if len(arguments) <= 1:
|
||||
args = ()
|
||||
else:
|
||||
args = tuple([arguments[e] for e in xrange(1, len(arguments))])
|
||||
args = tuple([arguments_[e] for e in xrange(1, len(arguments_))])
|
||||
return this.PyJsBoundFunction(target, thisArg, args)
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
from internals import byte_trans
|
||||
from internals import seval
|
||||
import pyjsparser
|
||||
|
||||
x = r'''
|
||||
function g() {var h123 = 11; return [function g1() {return h123}, new Function('return h123')]}
|
||||
g()[1]()
|
||||
'''
|
||||
print seval.eval_js_vm(x)
|
||||
@@ -155,7 +155,7 @@ def limited(func):
|
||||
inf = float('inf')
|
||||
|
||||
|
||||
def Literal(type, value, raw, regex=None, comments=None):
|
||||
def Literal(type, value, raw, regex=None):
|
||||
if regex: # regex
|
||||
return 'JsRegExp(%s)' % repr(compose_regex(value))
|
||||
elif value is None: # null
|
||||
@@ -165,12 +165,12 @@ def Literal(type, value, raw, regex=None, comments=None):
|
||||
return 'Js(%s)' % repr(value) if value != inf else 'Js(float("inf"))'
|
||||
|
||||
|
||||
def Identifier(type, name, comments=None):
|
||||
def Identifier(type, name):
|
||||
return 'var.get(%s)' % repr(name)
|
||||
|
||||
|
||||
@limited
|
||||
def MemberExpression(type, computed, object, property, comments=None):
|
||||
def MemberExpression(type, computed, object, property):
|
||||
far_left = trans(object)
|
||||
if computed: # obj[prop] type accessor
|
||||
# may be literal which is the same in every case so we can save some time on conversion
|
||||
@@ -183,12 +183,12 @@ def MemberExpression(type, computed, object, property, comments=None):
|
||||
return far_left + '.get(%s)' % prop
|
||||
|
||||
|
||||
def ThisExpression(type, comments=None):
|
||||
def ThisExpression(type):
|
||||
return 'var.get(u"this")'
|
||||
|
||||
|
||||
@limited
|
||||
def CallExpression(type, callee, arguments, comments=None):
|
||||
def CallExpression(type, callee, arguments):
|
||||
arguments = [trans(e) for e in arguments]
|
||||
if callee['type'] == 'MemberExpression':
|
||||
far_left = trans(callee['object'])
|
||||
@@ -210,38 +210,47 @@ def CallExpression(type, callee, arguments, comments=None):
|
||||
# ========== ARRAYS ============
|
||||
|
||||
|
||||
def ArrayExpression(type, elements, comments=None): # todo fix null inside problem
|
||||
def ArrayExpression(type, elements): # todo fix null inside problem
|
||||
return 'Js([%s])' % ', '.join(trans(e) if e else 'None' for e in elements)
|
||||
|
||||
|
||||
# ========== OBJECTS =============
|
||||
|
||||
|
||||
def ObjectExpression(type, properties, comments=None):
|
||||
name = inline_stack.require('Object')
|
||||
def ObjectExpression(type, properties):
|
||||
name = None
|
||||
elems = []
|
||||
after = ''
|
||||
for p in properties:
|
||||
if p['kind'] == 'init':
|
||||
elems.append('%s:%s' % Property(**p))
|
||||
elif p['kind'] == 'set':
|
||||
k, setter = Property(
|
||||
**p
|
||||
) # setter is just a lval referring to that function, it will be defined in InlineStack automatically
|
||||
after += '%s.define_own_property(%s, {"set":%s, "configurable":True, "enumerable":True})\n' % (
|
||||
name, k, setter)
|
||||
elif p['kind'] == 'get':
|
||||
k, getter = Property(**p)
|
||||
after += '%s.define_own_property(%s, {"get":%s, "configurable":True, "enumerable":True})\n' % (
|
||||
name, k, getter)
|
||||
else:
|
||||
raise RuntimeError('Unexpected object propery kind')
|
||||
obj = '%s = Js({%s})\n' % (name, ','.join(elems))
|
||||
inline_stack.define(name, obj + after)
|
||||
return name
|
||||
if name is None:
|
||||
name = inline_stack.require('Object')
|
||||
if p['kind'] == 'set':
|
||||
k, setter = Property(
|
||||
**p
|
||||
) # setter is just a lval referring to that function, it will be defined in InlineStack automatically
|
||||
after += '%s.define_own_property(%s, {"set":%s, "configurable":True, "enumerable":True})\n' % (
|
||||
name, k, setter)
|
||||
elif p['kind'] == 'get':
|
||||
k, getter = Property(**p)
|
||||
after += '%s.define_own_property(%s, {"get":%s, "configurable":True, "enumerable":True})\n' % (
|
||||
name, k, getter)
|
||||
else:
|
||||
raise RuntimeError('Unexpected object propery kind')
|
||||
definition = 'Js({%s})' % ','.join(elems)
|
||||
if name is None:
|
||||
return definition
|
||||
body = '%s = %s\n' % (name, definition)
|
||||
body += after
|
||||
body += 'return %s\n' % name
|
||||
code = 'def %s():\n%s' % (name, indent(body))
|
||||
inline_stack.define(name, code)
|
||||
return name + '()'
|
||||
|
||||
|
||||
def Property(type, kind, key, computed, value, method, shorthand, comments=None):
|
||||
def Property(type, kind, key, computed, value, method, shorthand):
|
||||
if shorthand or computed:
|
||||
raise NotImplementedError(
|
||||
'Shorthand and Computed properties not implemented!')
|
||||
@@ -256,7 +265,7 @@ def Property(type, kind, key, computed, value, method, shorthand, comments=None)
|
||||
|
||||
|
||||
@limited
|
||||
def UnaryExpression(type, operator, argument, prefix, comments=None):
|
||||
def UnaryExpression(type, operator, argument, prefix):
|
||||
a = trans(
|
||||
argument, standard=True
|
||||
) # unary involve some complex operations so we cant use line shorteners here
|
||||
@@ -271,7 +280,7 @@ def UnaryExpression(type, operator, argument, prefix, comments=None):
|
||||
|
||||
|
||||
@limited
|
||||
def BinaryExpression(type, operator, left, right, comments=None):
|
||||
def BinaryExpression(type, operator, left, right):
|
||||
a = trans(left)
|
||||
b = trans(right)
|
||||
# delegate to our friends
|
||||
@@ -279,7 +288,7 @@ def BinaryExpression(type, operator, left, right, comments=None):
|
||||
|
||||
|
||||
@limited
|
||||
def UpdateExpression(type, operator, argument, prefix, comments=None):
|
||||
def UpdateExpression(type, operator, argument, prefix):
|
||||
a = trans(
|
||||
argument, standard=True
|
||||
) # also complex operation involving parsing of the result so no line length reducing here
|
||||
@@ -287,7 +296,7 @@ def UpdateExpression(type, operator, argument, prefix, comments=None):
|
||||
|
||||
|
||||
@limited
|
||||
def AssignmentExpression(type, operator, left, right, comments=None):
|
||||
def AssignmentExpression(type, operator, left, right):
|
||||
operator = operator[:-1]
|
||||
if left['type'] == 'Identifier':
|
||||
if operator:
|
||||
@@ -319,12 +328,12 @@ six
|
||||
|
||||
|
||||
@limited
|
||||
def SequenceExpression(type, expressions, comments=None):
|
||||
def SequenceExpression(type, expressions):
|
||||
return reduce(js_comma, (trans(e) for e in expressions))
|
||||
|
||||
|
||||
@limited
|
||||
def NewExpression(type, callee, arguments, comments=None):
|
||||
def NewExpression(type, callee, arguments):
|
||||
return trans(callee) + '.create(%s)' % ', '.join(
|
||||
trans(e) for e in arguments)
|
||||
|
||||
@@ -332,7 +341,7 @@ def NewExpression(type, callee, arguments, comments=None):
|
||||
@limited
|
||||
def ConditionalExpression(
|
||||
type, test, consequent,
|
||||
alternate, comments=None): # caused plenty of problems in my home-made translator :)
|
||||
alternate): # caused plenty of problems in my home-made translator :)
|
||||
return '(%s if %s else %s)' % (trans(consequent), trans(test),
|
||||
trans(alternate))
|
||||
|
||||
@@ -340,49 +349,49 @@ def ConditionalExpression(
|
||||
# =========== STATEMENTS =============
|
||||
|
||||
|
||||
def BlockStatement(type, body, comments=None):
|
||||
def BlockStatement(type, body):
|
||||
return StatementList(
|
||||
body) # never returns empty string! In the worst case returns pass\n
|
||||
|
||||
|
||||
def ExpressionStatement(type, expression, comments=None):
|
||||
def ExpressionStatement(type, expression):
|
||||
return trans(expression) + '\n' # end expression space with new line
|
||||
|
||||
|
||||
def BreakStatement(type, label, comments=None):
|
||||
def BreakStatement(type, label):
|
||||
if label:
|
||||
return 'raise %s("Breaked")\n' % (get_break_label(label['name']))
|
||||
else:
|
||||
return 'break\n'
|
||||
|
||||
|
||||
def ContinueStatement(type, label, comments=None):
|
||||
def ContinueStatement(type, label):
|
||||
if label:
|
||||
return 'raise %s("Continued")\n' % (get_continue_label(label['name']))
|
||||
else:
|
||||
return 'continue\n'
|
||||
|
||||
|
||||
def ReturnStatement(type, argument, comments=None):
|
||||
def ReturnStatement(type, argument):
|
||||
return 'return %s\n' % (trans(argument)
|
||||
if argument else "var.get('undefined')")
|
||||
|
||||
|
||||
def EmptyStatement(type, comments=None):
|
||||
def EmptyStatement(type):
|
||||
return 'pass\n'
|
||||
|
||||
|
||||
def DebuggerStatement(type, comments=None):
|
||||
def DebuggerStatement(type):
|
||||
return 'pass\n'
|
||||
|
||||
|
||||
def DoWhileStatement(type, body, test, comments=None):
|
||||
def DoWhileStatement(type, body, test):
|
||||
inside = trans(body) + 'if not %s:\n' % trans(test) + indent('break\n')
|
||||
result = 'while 1:\n' + indent(inside)
|
||||
return result
|
||||
|
||||
|
||||
def ForStatement(type, init, test, update, body, comments=None):
|
||||
def ForStatement(type, init, test, update, body):
|
||||
update = indent(trans(update)) if update else ''
|
||||
init = trans(init) if init else ''
|
||||
if not init.endswith('\n'):
|
||||
@@ -398,7 +407,7 @@ def ForStatement(type, init, test, update, body, comments=None):
|
||||
return result
|
||||
|
||||
|
||||
def ForInStatement(type, left, right, body, each, comments=None):
|
||||
def ForInStatement(type, left, right, body, each):
|
||||
res = 'for PyJsTemp in %s:\n' % trans(right)
|
||||
if left['type'] == "VariableDeclaration":
|
||||
addon = trans(left) # make sure variable is registered
|
||||
@@ -417,7 +426,7 @@ def ForInStatement(type, left, right, body, each, comments=None):
|
||||
return res
|
||||
|
||||
|
||||
def IfStatement(type, test, consequent, alternate, comments=None):
|
||||
def IfStatement(type, test, consequent, alternate):
|
||||
# NOTE we cannot do elif because function definition inside elif statement would not be possible!
|
||||
IF = 'if %s:\n' % trans(test)
|
||||
IF += indent(trans(consequent))
|
||||
@@ -427,7 +436,7 @@ def IfStatement(type, test, consequent, alternate, comments=None):
|
||||
return IF + ELSE
|
||||
|
||||
|
||||
def LabeledStatement(type, label, body, comments=None):
|
||||
def LabeledStatement(type, label, body):
|
||||
# todo consider using smarter approach!
|
||||
inside = trans(body)
|
||||
defs = ''
|
||||
@@ -448,7 +457,7 @@ def LabeledStatement(type, label, body, comments=None):
|
||||
return defs + inside
|
||||
|
||||
|
||||
def StatementList(lis, comments=None):
|
||||
def StatementList(lis):
|
||||
if lis: # ensure we don't return empty string because it may ruin indentation!
|
||||
code = ''.join(trans(e) for e in lis)
|
||||
return code if code else 'pass\n'
|
||||
@@ -456,7 +465,7 @@ def StatementList(lis, comments=None):
|
||||
return 'pass\n'
|
||||
|
||||
|
||||
def PyimportStatement(type, imp, comments=None):
|
||||
def PyimportStatement(type, imp):
|
||||
lib = imp['name']
|
||||
jlib = 'PyImport_%s' % lib
|
||||
code = 'import %s as %s\n' % (lib, jlib)
|
||||
@@ -471,7 +480,7 @@ def PyimportStatement(type, imp, comments=None):
|
||||
return code
|
||||
|
||||
|
||||
def SwitchStatement(type, discriminant, cases, comments=None):
|
||||
def SwitchStatement(type, discriminant, cases):
|
||||
#TODO there will be a problem with continue in a switch statement.... FIX IT
|
||||
code = 'while 1:\n' + indent('SWITCHED = False\nCONDITION = (%s)\n')
|
||||
code = code % trans(discriminant)
|
||||
@@ -491,12 +500,12 @@ def SwitchStatement(type, discriminant, cases, comments=None):
|
||||
return code
|
||||
|
||||
|
||||
def ThrowStatement(type, argument, comments=None):
|
||||
def ThrowStatement(type, argument):
|
||||
return 'PyJsTempException = JsToPyException(%s)\nraise PyJsTempException\n' % trans(
|
||||
argument)
|
||||
|
||||
|
||||
def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer, comments=None):
|
||||
def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer):
|
||||
result = 'try:\n%s' % indent(trans(block))
|
||||
# complicated catch statement...
|
||||
if handler:
|
||||
@@ -516,13 +525,13 @@ def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer, com
|
||||
return result
|
||||
|
||||
|
||||
def LexicalDeclaration(type, declarations, kind, comments=None):
|
||||
def LexicalDeclaration(type, declarations, kind):
|
||||
raise NotImplementedError(
|
||||
'let and const not implemented yet but they will be soon! Check github for updates.'
|
||||
)
|
||||
|
||||
|
||||
def VariableDeclarator(type, id, init, comments=None):
|
||||
def VariableDeclarator(type, id, init):
|
||||
name = id['name']
|
||||
# register the name if not already registered
|
||||
Context.register(name)
|
||||
@@ -531,21 +540,21 @@ def VariableDeclarator(type, id, init, comments=None):
|
||||
return ''
|
||||
|
||||
|
||||
def VariableDeclaration(type, declarations, kind, comments=None):
|
||||
def VariableDeclaration(type, declarations, kind):
|
||||
code = ''.join(trans(d) for d in declarations)
|
||||
return code if code else 'pass\n'
|
||||
|
||||
|
||||
def WhileStatement(type, test, body, comments=None):
|
||||
def WhileStatement(type, test, body):
|
||||
result = 'while %s:\n' % trans(test) + indent(trans(body))
|
||||
return result
|
||||
|
||||
|
||||
def WithStatement(type, object, body, comments=None):
|
||||
def WithStatement(type, object, body):
|
||||
raise NotImplementedError('With statement not implemented!')
|
||||
|
||||
|
||||
def Program(type, body, comments=None):
|
||||
def Program(type, body):
|
||||
inline_stack.reset()
|
||||
code = ''.join(trans(e) for e in body)
|
||||
# here add hoisted elements (register variables and define functions)
|
||||
@@ -559,7 +568,7 @@ def Program(type, body, comments=None):
|
||||
|
||||
|
||||
def FunctionDeclaration(type, id, params, defaults, body, generator,
|
||||
expression, comments=None):
|
||||
expression):
|
||||
if generator:
|
||||
raise NotImplementedError('Generators not supported')
|
||||
if defaults:
|
||||
@@ -610,7 +619,7 @@ def FunctionDeclaration(type, id, params, defaults, body, generator,
|
||||
|
||||
|
||||
def FunctionExpression(type, id, params, defaults, body, generator,
|
||||
expression, comments=None):
|
||||
expression):
|
||||
if generator:
|
||||
raise NotImplementedError('Generators not supported')
|
||||
if defaults:
|
||||
|
||||
@@ -115,7 +115,16 @@ def append_arguments(code_obj, new_locals):
|
||||
code_obj.co_freevars, code_obj.co_cellvars)
|
||||
|
||||
# Done modifying codestring - make the code object
|
||||
return types.CodeType(*args)
|
||||
if hasattr(code_obj, "replace"):
|
||||
# Python 3.8+
|
||||
return code_obj.replace(
|
||||
co_argcount=co_argcount + new_locals_len,
|
||||
co_nlocals=code_obj.co_nlocals + new_locals_len,
|
||||
co_code=code,
|
||||
co_names=names,
|
||||
co_varnames=varnames)
|
||||
else:
|
||||
return types.CodeType(*args)
|
||||
|
||||
|
||||
def instructions(code_obj):
|
||||
|
||||
83
lib/jsc.py
83
lib/jsc.py
@@ -1,83 +0,0 @@
|
||||
MAPPING = {
|
||||
'a': '(false+"")[1]',
|
||||
'b': '([]["entries"]()+"")[2]',
|
||||
'c': '([]["fill"]+"")[3]',
|
||||
'd': '(undefined+"")[2]',
|
||||
'e': '(true+"")[3]',
|
||||
'f': '(false+"")[0]',
|
||||
'g': '(false+[0]+String)[20]',
|
||||
'h': '(+(101))["to"+String["name"]](21)[1]',
|
||||
'i': '([false]+undefined)[10]',
|
||||
'j': '([]["entries"]()+"")[3]',
|
||||
'k': '(+(20))["to"+String["name"]](21)',
|
||||
'l': '(false+"")[2]',
|
||||
'm': '(Number+"")[11]',
|
||||
'n': '(undefined+"")[1]',
|
||||
'o': '(true+[]["fill"])[10]',
|
||||
'p': '(+(211))["to"+String["name"]](31)[1]',
|
||||
'q': '(+(212))["to"+String["name"]](31)[1]',
|
||||
'r': '(true+"")[1]',
|
||||
's': '(false+"")[3]',
|
||||
't': '(true+"")[0]',
|
||||
'u': '(undefined+"")[0]',
|
||||
'v': '(+(31))["to"+String["name"]](32)',
|
||||
'w': '(+(32))["to"+String["name"]](33)',
|
||||
'x': '(+(101))["to"+String["name"]](34)[1]',
|
||||
'y': '(NaN+[Infinity])[10]',
|
||||
'z': '(+(35))["to"+String["name"]](36)',
|
||||
'A': '(+[]+Array)[10]',
|
||||
'B': '(+[]+Boolean)[10]',
|
||||
'C': 'Function("return escape")()(("")["italics"]())[2]',
|
||||
'D': 'Function("return escape")()([]["fill"])["slice"]("-1")',
|
||||
'E': '(RegExp+"")[12]',
|
||||
'F': '(+[]+Function)[10]',
|
||||
'G': '(false+Function("return Date")()())[30]',
|
||||
'I': '(Infinity+"")[0]',
|
||||
'M': '(true+Function("return Date")()())[30]',
|
||||
'N': '(NaN+"")[0]',
|
||||
'O': '(NaN+Function("return{}")())[11]',
|
||||
'R': '(+[]+RegExp)[10]',
|
||||
'S': '(+[]+String)[10]',
|
||||
'T': '(NaN+Function("return Date")()())[30]',
|
||||
'U': '(NaN+Function("return{}")()["to"+String["name"]]["call"]())[11]',
|
||||
' ': '(NaN+[]["fill"])[11]',
|
||||
'"': '("")["fontcolor"]()[12]',
|
||||
'%': 'Function("return escape")()([]["fill"])[21]',
|
||||
'&': '("")["link"](0+")[10]',
|
||||
'(': '(undefined+[]["fill"])[22]',
|
||||
')': '([0]+false+[]["fill"])[20]',
|
||||
'+': '(+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]])+[])[2]',
|
||||
',': '([]["slice"]["call"](false+"")+"")[1]',
|
||||
'-': '(+(.+[0000000001])+"")[2]',
|
||||
'.': '(+(+!+[]+[+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[!+[]+!+[]]+[+[]])+[])[+!+[]]',
|
||||
'/': '(false+[0])["italics"]()[10]',
|
||||
':': '(RegExp()+"")[3]',
|
||||
';': '("")["link"](")[14]',
|
||||
'<': '("")["italics"]()[0]',
|
||||
'=': '("")["fontcolor"]()[11]',
|
||||
'>': '("")["italics"]()[2]',
|
||||
'?': '(RegExp()+"")[2]',
|
||||
'[': '([]["entries"]()+"")[0]',
|
||||
']': '([]["entries"]()+"")[22]',
|
||||
'{': '(true+[]["fill"])[20]',
|
||||
'}': '([]["fill"]+"")["slice"]("-1")'
|
||||
}
|
||||
|
||||
SIMPLE = {
|
||||
'false': '![]',
|
||||
'true': '!![]',
|
||||
'undefined': '[][[]]',
|
||||
'NaN': '+[![]]',
|
||||
'Infinity': '+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]]+[+[]])' # +"1e1000"
|
||||
}
|
||||
|
||||
def jsunc(jscString):
|
||||
|
||||
for key in sorted(MAPPING, key=lambda k: len(MAPPING[k]), reverse=True):
|
||||
if MAPPING.get(key) in jscString:
|
||||
jscString = jscString.replace(MAPPING.get(key), '"{}"'.format(key))
|
||||
|
||||
for key in sorted(SIMPLE, key=lambda k: len(SIMPLE[k]), reverse=True):
|
||||
if SIMPLE.get(key) in jscString:
|
||||
jscString = jscString.replace(SIMPLE.get(key), '{}'.format(key))
|
||||
return jscString
|
||||
550
lib/jscrypto.py
550
lib/jscrypto.py
@@ -1,550 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import StringIO
|
||||
import binascii
|
||||
import hashlib
|
||||
from array import array
|
||||
|
||||
|
||||
def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"):
|
||||
target_key_size = key_size + iv_size
|
||||
derived_bytes = ""
|
||||
number_of_derived_words = 0
|
||||
block = None
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
while number_of_derived_words < target_key_size:
|
||||
if block is not None:
|
||||
hasher.update(block)
|
||||
|
||||
hasher.update(passwd)
|
||||
hasher.update(salt)
|
||||
block = hasher.digest()
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
|
||||
for i in range(1, iterations):
|
||||
hasher.update(block)
|
||||
block = hasher.digest()
|
||||
hasher = hashlib.new(hash_algorithm)
|
||||
|
||||
derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)]
|
||||
|
||||
number_of_derived_words += len(block) / 4
|
||||
|
||||
return {
|
||||
"key": derived_bytes[0: key_size * 4],
|
||||
"iv": derived_bytes[key_size * 4:]
|
||||
}
|
||||
|
||||
|
||||
class PKCS7Encoder(object):
|
||||
'''
|
||||
RFC 2315: PKCS#7 page 21
|
||||
Some content-encryption algorithms assume the
|
||||
input length is a multiple of k octets, where k > 1, and
|
||||
let the application define a method for handling inputs
|
||||
whose lengths are not a multiple of k octets. For such
|
||||
algorithms, the method shall be to pad the input at the
|
||||
trailing end with k - (l mod k) octets all having value k -
|
||||
(l mod k), where l is the length of the input. In other
|
||||
words, the input is padded at the trailing end with one of
|
||||
the following strings:
|
||||
|
||||
01 -- if l mod k = k-1
|
||||
02 02 -- if l mod k = k-2
|
||||
.
|
||||
.
|
||||
.
|
||||
k k ... k k -- if l mod k = 0
|
||||
|
||||
The padding can be removed unambiguously since all input is
|
||||
padded and no padding string is a suffix of another. This
|
||||
padding method is well-defined if and only if k < 256;
|
||||
methods for larger k are an open issue for further study.
|
||||
'''
|
||||
|
||||
def __init__(self, k=16):
|
||||
self.k = k
|
||||
|
||||
## @param text The padded text for which the padding is to be removed.
|
||||
# @exception ValueError Raised when the input padding is missing or corrupt.
|
||||
def decode(self, text):
|
||||
'''
|
||||
Remove the PKCS#7 padding from a text string
|
||||
'''
|
||||
nl = len(text)
|
||||
val = int(binascii.hexlify(text[-1]), 16)
|
||||
if val > self.k:
|
||||
raise ValueError('Input is not padded or padding is corrupt')
|
||||
|
||||
l = nl - val
|
||||
return text[:l]
|
||||
|
||||
## @param text The text to encode.
|
||||
def encode(self, text):
|
||||
'''
|
||||
Pad an input string according to PKCS#7
|
||||
'''
|
||||
l = len(text)
|
||||
output = StringIO.StringIO()
|
||||
val = self.k - (l % self.k)
|
||||
for _ in xrange(val):
|
||||
output.write('%02x' % val)
|
||||
return text + binascii.unhexlify(output.getvalue())
|
||||
|
||||
|
||||
# Pyaes file
|
||||
# Globals mandated by PEP 272:
|
||||
# http://www.python.org/dev/peps/pep-0272/
|
||||
MODE_ECB = 1
|
||||
MODE_CBC = 2
|
||||
# MODE_CTR = 6
|
||||
|
||||
block_size = 16
|
||||
key_size = None
|
||||
|
||||
|
||||
def new(key, mode, IV=None):
|
||||
if mode == MODE_ECB:
|
||||
return ECBMode(AES(key))
|
||||
elif mode == MODE_CBC:
|
||||
if IV is None:
|
||||
raise ValueError, "CBC mode needs an IV value!"
|
||||
|
||||
return CBCMode(AES(key), IV)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
#### AES cipher implementation
|
||||
|
||||
class AES(object):
|
||||
block_size = 16
|
||||
|
||||
def __init__(self, key):
|
||||
self.setkey(key)
|
||||
|
||||
def setkey(self, key):
|
||||
"""Sets the key and performs key expansion."""
|
||||
|
||||
self.key = key
|
||||
self.key_size = len(key)
|
||||
|
||||
if self.key_size == 16:
|
||||
self.rounds = 10
|
||||
elif self.key_size == 24:
|
||||
self.rounds = 12
|
||||
elif self.key_size == 32:
|
||||
self.rounds = 14
|
||||
else:
|
||||
raise ValueError, "Key length must be 16, 24 or 32 bytes"
|
||||
|
||||
self.expand_key()
|
||||
|
||||
def expand_key(self):
|
||||
"""Performs AES key expansion on self.key and stores in self.exkey"""
|
||||
|
||||
# The key schedule specifies how parts of the key are fed into the
|
||||
# cipher's round functions. "Key expansion" means performing this
|
||||
# schedule in advance. Almost all implementations do this.
|
||||
#
|
||||
# Here's a description of AES key schedule:
|
||||
# http://en.wikipedia.org/wiki/Rijndael_key_schedule
|
||||
|
||||
# The expanded key starts with the actual key itself
|
||||
exkey = array('B', self.key)
|
||||
|
||||
# extra key expansion steps
|
||||
if self.key_size == 16:
|
||||
extra_cnt = 0
|
||||
elif self.key_size == 24:
|
||||
extra_cnt = 2
|
||||
else:
|
||||
extra_cnt = 3
|
||||
|
||||
# 4-byte temporary variable for key expansion
|
||||
word = exkey[-4:]
|
||||
# Each expansion cycle uses 'i' once for Rcon table lookup
|
||||
for i in xrange(1, 11):
|
||||
|
||||
#### key schedule core:
|
||||
# left-rotate by 1 byte
|
||||
word = word[1:4] + word[0:1]
|
||||
|
||||
# apply S-box to all bytes
|
||||
for j in xrange(4):
|
||||
word[j] = aes_sbox[word[j]]
|
||||
|
||||
# apply the Rcon table to the leftmost byte
|
||||
word[0] = word[0] ^ aes_Rcon[i]
|
||||
#### end key schedule core
|
||||
|
||||
for z in xrange(4):
|
||||
for j in xrange(4):
|
||||
# mix in bytes from the last subkey
|
||||
word[j] ^= exkey[-self.key_size + j]
|
||||
exkey.extend(word)
|
||||
|
||||
# Last key expansion cycle always finishes here
|
||||
if len(exkey) >= (self.rounds + 1) * self.block_size:
|
||||
break
|
||||
|
||||
# Special substitution step for 256-bit key
|
||||
if self.key_size == 32:
|
||||
for j in xrange(4):
|
||||
# mix in bytes from the last subkey XORed with S-box of
|
||||
# current word bytes
|
||||
word[j] = aes_sbox[word[j]] ^ exkey[-self.key_size + j]
|
||||
exkey.extend(word)
|
||||
|
||||
# Twice for 192-bit key, thrice for 256-bit key
|
||||
for z in xrange(extra_cnt):
|
||||
for j in xrange(4):
|
||||
# mix in bytes from the last subkey
|
||||
word[j] ^= exkey[-self.key_size + j]
|
||||
exkey.extend(word)
|
||||
|
||||
self.exkey = exkey
|
||||
|
||||
def add_round_key(self, block, round):
|
||||
"""AddRoundKey step in AES. This is where the key is mixed into plaintext"""
|
||||
|
||||
offset = round * 16
|
||||
exkey = self.exkey
|
||||
|
||||
for i in xrange(16):
|
||||
block[i] ^= exkey[offset + i]
|
||||
|
||||
# print 'AddRoundKey:', block
|
||||
|
||||
def sub_bytes(self, block, sbox):
|
||||
"""SubBytes step, apply S-box to all bytes
|
||||
|
||||
Depending on whether encrypting or decrypting, a different sbox array
|
||||
is passed in.
|
||||
"""
|
||||
|
||||
for i in xrange(16):
|
||||
block[i] = sbox[block[i]]
|
||||
|
||||
# print 'SubBytes :', block
|
||||
|
||||
def shift_rows(self, b):
|
||||
"""ShiftRows step. Shifts 2nd row to left by 1, 3rd row by 2, 4th row by 3
|
||||
|
||||
Since we're performing this on a transposed matrix, cells are numbered
|
||||
from top to bottom::
|
||||
|
||||
0 4 8 12 -> 0 4 8 12 -- 1st row doesn't change
|
||||
1 5 9 13 -> 5 9 13 1 -- row shifted to left by 1 (wraps around)
|
||||
2 6 10 14 -> 10 14 2 6 -- shifted by 2
|
||||
3 7 11 15 -> 15 3 7 11 -- shifted by 3
|
||||
"""
|
||||
|
||||
b[1], b[5], b[9], b[13] = b[5], b[9], b[13], b[1]
|
||||
b[2], b[6], b[10], b[14] = b[10], b[14], b[2], b[6]
|
||||
b[3], b[7], b[11], b[15] = b[15], b[3], b[7], b[11]
|
||||
|
||||
# print 'ShiftRows :', b
|
||||
|
||||
def shift_rows_inv(self, b):
|
||||
"""Similar to shift_rows above, but performed in inverse for decryption."""
|
||||
|
||||
b[5], b[9], b[13], b[1] = b[1], b[5], b[9], b[13]
|
||||
b[10], b[14], b[2], b[6] = b[2], b[6], b[10], b[14]
|
||||
b[15], b[3], b[7], b[11] = b[3], b[7], b[11], b[15]
|
||||
|
||||
# print 'ShiftRows :', b
|
||||
|
||||
def mix_columns(self, block):
|
||||
"""MixColumns step. Mixes the values in each column"""
|
||||
|
||||
# Cache global multiplication tables (see below)
|
||||
mul_by_2 = gf_mul_by_2
|
||||
mul_by_3 = gf_mul_by_3
|
||||
|
||||
# Since we're dealing with a transposed matrix, columns are already
|
||||
# sequential
|
||||
for i in xrange(4):
|
||||
col = i * 4
|
||||
|
||||
# v0, v1, v2, v3 = block[col : col+4]
|
||||
v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2],
|
||||
block[col + 3])
|
||||
|
||||
block[col] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1]
|
||||
block[col + 1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2]
|
||||
block[col + 2] = mul_by_2[v2] ^ v1 ^ v0 ^ mul_by_3[v3]
|
||||
block[col + 3] = mul_by_2[v3] ^ v2 ^ v1 ^ mul_by_3[v0]
|
||||
|
||||
# print 'MixColumns :', block
|
||||
|
||||
def mix_columns_inv(self, block):
|
||||
"""Similar to mix_columns above, but performed in inverse for decryption."""
|
||||
|
||||
# Cache global multiplication tables (see below)
|
||||
mul_9 = gf_mul_by_9
|
||||
mul_11 = gf_mul_by_11
|
||||
mul_13 = gf_mul_by_13
|
||||
mul_14 = gf_mul_by_14
|
||||
|
||||
# Since we're dealing with a transposed matrix, columns are already
|
||||
# sequential
|
||||
for i in xrange(4):
|
||||
col = i * 4
|
||||
|
||||
v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2],
|
||||
block[col + 3])
|
||||
# v0, v1, v2, v3 = block[col:col+4]
|
||||
|
||||
block[col] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1]
|
||||
block[col + 1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2]
|
||||
block[col + 2] = mul_14[v2] ^ mul_9[v1] ^ mul_13[v0] ^ mul_11[v3]
|
||||
block[col + 3] = mul_14[v3] ^ mul_9[v2] ^ mul_13[v1] ^ mul_11[v0]
|
||||
|
||||
# print 'MixColumns :', block
|
||||
|
||||
def encrypt_block(self, block):
|
||||
"""Encrypts a single block. This is the main AES function"""
|
||||
|
||||
# For efficiency reasons, the state between steps is transmitted via a
|
||||
# mutable array, not returned.
|
||||
self.add_round_key(block, 0)
|
||||
|
||||
for round in xrange(1, self.rounds):
|
||||
self.sub_bytes(block, aes_sbox)
|
||||
self.shift_rows(block)
|
||||
self.mix_columns(block)
|
||||
self.add_round_key(block, round)
|
||||
|
||||
self.sub_bytes(block, aes_sbox)
|
||||
self.shift_rows(block)
|
||||
# no mix_columns step in the last round
|
||||
self.add_round_key(block, self.rounds)
|
||||
|
||||
def decrypt_block(self, block):
|
||||
"""Decrypts a single block. This is the main AES decryption function"""
|
||||
|
||||
# For efficiency reasons, the state between steps is transmitted via a
|
||||
# mutable array, not returned.
|
||||
self.add_round_key(block, self.rounds)
|
||||
|
||||
# count rounds down from 15 ... 1
|
||||
for round in xrange(self.rounds - 1, 0, -1):
|
||||
self.shift_rows_inv(block)
|
||||
self.sub_bytes(block, aes_inv_sbox)
|
||||
self.add_round_key(block, round)
|
||||
self.mix_columns_inv(block)
|
||||
|
||||
self.shift_rows_inv(block)
|
||||
self.sub_bytes(block, aes_inv_sbox)
|
||||
self.add_round_key(block, 0)
|
||||
# no mix_columns step in the last round
|
||||
|
||||
|
||||
#### ECB mode implementation
|
||||
|
||||
class ECBMode(object):
|
||||
"""Electronic CodeBook (ECB) mode encryption.
|
||||
|
||||
Basically this mode applies the cipher function to each block individually;
|
||||
no feedback is done. NB! This is insecure for almost all purposes
|
||||
"""
|
||||
|
||||
def __init__(self, cipher):
|
||||
self.cipher = cipher
|
||||
self.block_size = cipher.block_size
|
||||
|
||||
def ecb(self, data, block_func):
|
||||
"""Perform ECB mode with the given function"""
|
||||
|
||||
if len(data) % self.block_size != 0:
|
||||
raise ValueError, "Plaintext length must be multiple of 16"
|
||||
|
||||
block_size = self.block_size
|
||||
data = array('B', data)
|
||||
|
||||
for offset in xrange(0, len(data), block_size):
|
||||
block = data[offset: offset + block_size]
|
||||
block_func(block)
|
||||
data[offset: offset + block_size] = block
|
||||
|
||||
return data.tostring()
|
||||
|
||||
def encrypt(self, data):
|
||||
"""Encrypt data in ECB mode"""
|
||||
|
||||
return self.ecb(data, self.cipher.encrypt_block)
|
||||
|
||||
def decrypt(self, data):
|
||||
"""Decrypt data in ECB mode"""
|
||||
|
||||
return self.ecb(data, self.cipher.decrypt_block)
|
||||
|
||||
|
||||
#### CBC mode
|
||||
|
||||
class CBCMode(object):
|
||||
"""Cipher Block Chaining (CBC) mode encryption. This mode avoids content leaks.
|
||||
|
||||
In CBC encryption, each plaintext block is XORed with the ciphertext block
|
||||
preceding it; decryption is simply the inverse.
|
||||
"""
|
||||
|
||||
# A better explanation of CBC can be found here:
|
||||
# http://en.wikipedia.org/wiki/Block_cipher_modes_of_operation#Cipher-block_chaining_.28CBC.29
|
||||
|
||||
def __init__(self, cipher, IV):
|
||||
self.cipher = cipher
|
||||
self.block_size = cipher.block_size
|
||||
self.IV = array('B', IV)
|
||||
|
||||
def encrypt(self, data):
|
||||
"""Encrypt data in CBC mode"""
|
||||
|
||||
block_size = self.block_size
|
||||
if len(data) % block_size != 0:
|
||||
raise ValueError, "Plaintext length must be multiple of 16"
|
||||
|
||||
data = array('B', data)
|
||||
IV = self.IV
|
||||
|
||||
for offset in xrange(0, len(data), block_size):
|
||||
block = data[offset: offset + block_size]
|
||||
|
||||
# Perform CBC chaining
|
||||
for i in xrange(block_size):
|
||||
block[i] ^= IV[i]
|
||||
|
||||
self.cipher.encrypt_block(block)
|
||||
data[offset: offset + block_size] = block
|
||||
IV = block
|
||||
|
||||
self.IV = IV
|
||||
return data.tostring()
|
||||
|
||||
def decrypt(self, data):
|
||||
"""Decrypt data in CBC mode"""
|
||||
|
||||
block_size = self.block_size
|
||||
if len(data) % block_size != 0:
|
||||
raise ValueError, "Ciphertext length must be multiple of 16"
|
||||
|
||||
data = array('B', data)
|
||||
IV = self.IV
|
||||
|
||||
for offset in xrange(0, len(data), block_size):
|
||||
ctext = data[offset: offset + block_size]
|
||||
block = ctext[:]
|
||||
self.cipher.decrypt_block(block)
|
||||
|
||||
# Perform CBC chaining
|
||||
# for i in xrange(block_size):
|
||||
# data[offset + i] ^= IV[i]
|
||||
for i in xrange(block_size):
|
||||
block[i] ^= IV[i]
|
||||
data[offset: offset + block_size] = block
|
||||
|
||||
IV = ctext
|
||||
# data[offset : offset+block_size] = block
|
||||
|
||||
self.IV = IV
|
||||
return data.tostring()
|
||||
|
||||
|
||||
####
|
||||
|
||||
def galois_multiply(a, b):
|
||||
"""Galois Field multiplicaiton for AES"""
|
||||
p = 0
|
||||
while b:
|
||||
if b & 1:
|
||||
p ^= a
|
||||
a <<= 1
|
||||
if a & 0x100:
|
||||
a ^= 0x1b
|
||||
b >>= 1
|
||||
|
||||
return p & 0xff
|
||||
|
||||
|
||||
# Precompute the multiplication tables for encryption
|
||||
gf_mul_by_2 = array('B', [galois_multiply(x, 2) for x in range(256)])
|
||||
gf_mul_by_3 = array('B', [galois_multiply(x, 3) for x in range(256)])
|
||||
# ... for decryption
|
||||
gf_mul_by_9 = array('B', [galois_multiply(x, 9) for x in range(256)])
|
||||
gf_mul_by_11 = array('B', [galois_multiply(x, 11) for x in range(256)])
|
||||
gf_mul_by_13 = array('B', [galois_multiply(x, 13) for x in range(256)])
|
||||
gf_mul_by_14 = array('B', [galois_multiply(x, 14) for x in range(256)])
|
||||
|
||||
####
|
||||
|
||||
# The S-box is a 256-element array, that maps a single byte value to another
|
||||
# byte value. Since it's designed to be reversible, each value occurs only once
|
||||
# in the S-box
|
||||
#
|
||||
# More information: http://en.wikipedia.org/wiki/Rijndael_S-box
|
||||
|
||||
aes_sbox = array('B',
|
||||
'637c777bf26b6fc53001672bfed7ab76'
|
||||
'ca82c97dfa5947f0add4a2af9ca472c0'
|
||||
'b7fd9326363ff7cc34a5e5f171d83115'
|
||||
'04c723c31896059a071280e2eb27b275'
|
||||
'09832c1a1b6e5aa0523bd6b329e32f84'
|
||||
'53d100ed20fcb15b6acbbe394a4c58cf'
|
||||
'd0efaafb434d338545f9027f503c9fa8'
|
||||
'51a3408f929d38f5bcb6da2110fff3d2'
|
||||
'cd0c13ec5f974417c4a77e3d645d1973'
|
||||
'60814fdc222a908846eeb814de5e0bdb'
|
||||
'e0323a0a4906245cc2d3ac629195e479'
|
||||
'e7c8376d8dd54ea96c56f4ea657aae08'
|
||||
'ba78252e1ca6b4c6e8dd741f4bbd8b8a'
|
||||
'703eb5664803f60e613557b986c11d9e'
|
||||
'e1f8981169d98e949b1e87e9ce5528df'
|
||||
'8ca1890dbfe6426841992d0fb054bb16'.decode('hex')
|
||||
)
|
||||
|
||||
# This is the inverse of the above. In other words:
|
||||
# aes_inv_sbox[aes_sbox[val]] == val
|
||||
|
||||
aes_inv_sbox = array('B',
|
||||
'52096ad53036a538bf40a39e81f3d7fb'
|
||||
'7ce339829b2fff87348e4344c4dee9cb'
|
||||
'547b9432a6c2233dee4c950b42fac34e'
|
||||
'082ea16628d924b2765ba2496d8bd125'
|
||||
'72f8f66486689816d4a45ccc5d65b692'
|
||||
'6c704850fdedb9da5e154657a78d9d84'
|
||||
'90d8ab008cbcd30af7e45805b8b34506'
|
||||
'd02c1e8fca3f0f02c1afbd0301138a6b'
|
||||
'3a9111414f67dcea97f2cfcef0b4e673'
|
||||
'96ac7422e7ad3585e2f937e81c75df6e'
|
||||
'47f11a711d29c5896fb7620eaa18be1b'
|
||||
'fc563e4bc6d279209adbc0fe78cd5af4'
|
||||
'1fdda8338807c731b11210592780ec5f'
|
||||
'60517fa919b54a0d2de57a9f93c99cef'
|
||||
'a0e03b4dae2af5b0c8ebbb3c83539961'
|
||||
'172b047eba77d626e169146355210c7d'.decode('hex')
|
||||
)
|
||||
|
||||
# The Rcon table is used in AES's key schedule (key expansion)
|
||||
# It's a pre-computed table of exponentation of 2 in AES's finite field
|
||||
#
|
||||
# More information: http://en.wikipedia.org/wiki/Rijndael_key_schedule
|
||||
|
||||
aes_Rcon = array('B',
|
||||
'8d01020408102040801b366cd8ab4d9a'
|
||||
'2f5ebc63c697356ad4b37dfaefc59139'
|
||||
'72e4d3bd61c29f254a943366cc831d3a'
|
||||
'74e8cb8d01020408102040801b366cd8'
|
||||
'ab4d9a2f5ebc63c697356ad4b37dfaef'
|
||||
'c5913972e4d3bd61c29f254a943366cc'
|
||||
'831d3a74e8cb8d01020408102040801b'
|
||||
'366cd8ab4d9a2f5ebc63c697356ad4b3'
|
||||
'7dfaefc5913972e4d3bd61c29f254a94'
|
||||
'3366cc831d3a74e8cb8d010204081020'
|
||||
'40801b366cd8ab4d9a2f5ebc63c69735'
|
||||
'6ad4b37dfaefc5913972e4d3bd61c29f'
|
||||
'254a943366cc831d3a74e8cb8d010204'
|
||||
'08102040801b366cd8ab4d9a2f5ebc63'
|
||||
'c697356ad4b37dfaefc5913972e4d3bd'
|
||||
'61c29f254a943366cc831d3a74e8cb'.decode('hex')
|
||||
)
|
||||
@@ -1,249 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import json
|
||||
import operator
|
||||
import re
|
||||
|
||||
|
||||
_OPERATORS = [
|
||||
('|', operator.or_),
|
||||
('^', operator.xor),
|
||||
('&', operator.and_),
|
||||
('>>', operator.rshift),
|
||||
('<<', operator.lshift),
|
||||
('-', operator.sub),
|
||||
('+', operator.add),
|
||||
('%', operator.mod),
|
||||
('/', operator.truediv),
|
||||
('*', operator.mul),
|
||||
]
|
||||
|
||||
_ASSIGN_OPERATORS = []
|
||||
for op, opfunc in _OPERATORS:
|
||||
_ASSIGN_OPERATORS.append([op + '=', opfunc])
|
||||
_ASSIGN_OPERATORS.append(('=', lambda cur, right: right))
|
||||
|
||||
_NAME_RE = r'[a-zA-Z_$][a-zA-Z_$0-9]*'
|
||||
|
||||
|
||||
class JSInterpreter(object):
|
||||
def __init__(self, code, objects=None):
|
||||
if objects is None:
|
||||
objects = {}
|
||||
self.code = code
|
||||
self._functions = {}
|
||||
self._objects = objects
|
||||
|
||||
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
|
||||
|
||||
should_abort = False
|
||||
stmt = stmt.lstrip()
|
||||
stmt_m = re.match(r'var\s', stmt)
|
||||
if stmt_m:
|
||||
expr = stmt[len(stmt_m.group(0)):]
|
||||
else:
|
||||
return_m = re.match(r'return(?:\s+|$)', stmt)
|
||||
if return_m:
|
||||
expr = stmt[len(return_m.group(0)):]
|
||||
should_abort = True
|
||||
else:
|
||||
# Try interpreting it as an expression
|
||||
expr = stmt
|
||||
|
||||
v = self.interpret_expression(expr, local_vars, allow_recursion)
|
||||
return v, should_abort
|
||||
|
||||
def interpret_expression(self, expr, local_vars, allow_recursion):
|
||||
expr = expr.strip()
|
||||
|
||||
if expr == '': # Empty expression
|
||||
return None
|
||||
|
||||
if expr.startswith('('):
|
||||
parens_count = 0
|
||||
for m in re.finditer(r'[()]', expr):
|
||||
if m.group(0) == '(':
|
||||
parens_count += 1
|
||||
else:
|
||||
parens_count -= 1
|
||||
if parens_count == 0:
|
||||
sub_expr = expr[1:m.start()]
|
||||
sub_result = self.interpret_expression(
|
||||
sub_expr, local_vars, allow_recursion)
|
||||
remaining_expr = expr[m.end():].strip()
|
||||
if not remaining_expr:
|
||||
return sub_result
|
||||
else:
|
||||
expr = json.dumps(sub_result) + remaining_expr
|
||||
break
|
||||
|
||||
for op, opfunc in _ASSIGN_OPERATORS:
|
||||
m = re.match(r'''(?x)
|
||||
(?P<out>%s)(?:\[(?P<index>[^\]]+?)\])?
|
||||
\s*%s
|
||||
(?P<expr>.*)$''' % (_NAME_RE, re.escape(op)), expr)
|
||||
if not m:
|
||||
continue
|
||||
right_val = self.interpret_expression(
|
||||
m.group('expr'), local_vars, allow_recursion - 1)
|
||||
|
||||
if m.groupdict().get('index'):
|
||||
lvar = local_vars[m.group('out')]
|
||||
idx = self.interpret_expression(
|
||||
m.group('index'), local_vars, allow_recursion)
|
||||
assert isinstance(idx, int)
|
||||
cur = lvar[idx]
|
||||
val = opfunc(cur, right_val)
|
||||
lvar[idx] = val
|
||||
return val
|
||||
else:
|
||||
cur = local_vars.get(m.group('out'))
|
||||
val = opfunc(cur, right_val)
|
||||
local_vars[m.group('out')] = val
|
||||
return val
|
||||
|
||||
if expr.isdigit():
|
||||
return int(expr)
|
||||
|
||||
var_m = re.match(
|
||||
r'(?!if|return|true|false)(?P<name>%s)$' % _NAME_RE,
|
||||
expr)
|
||||
if var_m:
|
||||
return local_vars[var_m.group('name')]
|
||||
|
||||
try:
|
||||
return json.loads(expr)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
m = re.match(
|
||||
r'(?P<var>%s)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$' % _NAME_RE,
|
||||
expr)
|
||||
if m:
|
||||
variable = m.group('var')
|
||||
member = m.group('member')
|
||||
arg_str = m.group('args')
|
||||
|
||||
if variable in local_vars:
|
||||
obj = local_vars[variable]
|
||||
else:
|
||||
if variable not in self._objects:
|
||||
self._objects[variable] = self.extract_object(variable)
|
||||
obj = self._objects[variable]
|
||||
|
||||
if arg_str is None:
|
||||
# Member access
|
||||
if member == 'length':
|
||||
return len(obj)
|
||||
return obj[member]
|
||||
|
||||
assert expr.endswith(')')
|
||||
# Function call
|
||||
if arg_str == '':
|
||||
argvals = tuple()
|
||||
else:
|
||||
argvals = []
|
||||
for v in arg_str.split(','):
|
||||
argvals.extend([self.interpret_expression(v, local_vars, allow_recursion)])
|
||||
|
||||
if member == 'split':
|
||||
assert argvals == ('',)
|
||||
return list(obj)
|
||||
if member == 'join':
|
||||
assert len(argvals) == 1
|
||||
return argvals[0].join(obj)
|
||||
if member == 'reverse':
|
||||
assert len(argvals) == 0
|
||||
obj.reverse()
|
||||
return obj
|
||||
if member == 'slice':
|
||||
assert len(argvals) == 1
|
||||
return obj[argvals[0]:]
|
||||
if member == 'splice':
|
||||
assert isinstance(obj, list)
|
||||
index, howMany = argvals
|
||||
res = []
|
||||
for i in range(index, min(index + howMany, len(obj))):
|
||||
res.append(obj.pop(index))
|
||||
return res
|
||||
|
||||
return obj[member](argvals)
|
||||
|
||||
m = re.match(
|
||||
r'(?P<in>%s)\[(?P<idx>.+)\]$' % _NAME_RE, expr)
|
||||
if m:
|
||||
val = local_vars[m.group('in')]
|
||||
idx = self.interpret_expression(
|
||||
m.group('idx'), local_vars, allow_recursion - 1)
|
||||
return val[idx]
|
||||
|
||||
for op, opfunc in _OPERATORS:
|
||||
m = re.match(r'(?P<x>.+?)%s(?P<y>.+)' % re.escape(op), expr)
|
||||
if not m:
|
||||
continue
|
||||
x, abort = self.interpret_statement(
|
||||
m.group('x'), local_vars, allow_recursion - 1)
|
||||
y, abort = self.interpret_statement(
|
||||
m.group('y'), local_vars, allow_recursion - 1)
|
||||
return opfunc(x, y)
|
||||
|
||||
m = re.match(
|
||||
r'^(?P<func>%s)\((?P<args>[a-zA-Z0-9_$,]+)\)$' % _NAME_RE, expr)
|
||||
if m:
|
||||
fname = m.group('func')
|
||||
argvals = []
|
||||
for v in m.group('args').split(','):
|
||||
if v.isdigit():
|
||||
argvals.append([int(v)])
|
||||
else:
|
||||
argvals.append([local_vars[v]])
|
||||
|
||||
if fname not in self._functions:
|
||||
self._functions[fname] = self.extract_function(fname)
|
||||
return self._functions[fname](argvals)
|
||||
|
||||
|
||||
def extract_object(self, objname):
|
||||
obj = {}
|
||||
obj_m = re.search(
|
||||
(r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) +
|
||||
r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)' +
|
||||
r'\}\s*;',
|
||||
self.code)
|
||||
fields = obj_m.group('fields')
|
||||
# Currently, it only supports function definitions
|
||||
fields_m = re.finditer(
|
||||
r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function'
|
||||
r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}',
|
||||
fields)
|
||||
for f in fields_m:
|
||||
argnames = f.group('args').split(',')
|
||||
obj[f.group('key')] = self.build_function(argnames, f.group('code'))
|
||||
|
||||
return obj
|
||||
|
||||
def extract_function(self, funcname):
|
||||
func_m = re.search(
|
||||
r'''(?x)
|
||||
(?:function\s+%s|[{;,]\s*%s\s*=\s*function|var\s+%s\s*=\s*function)\s*
|
||||
\((?P<args>[^)]*)\)\s*
|
||||
\{(?P<code>[^}]+)\}''' % (
|
||||
re.escape(funcname), re.escape(funcname), re.escape(funcname)),
|
||||
self.code)
|
||||
argnames = func_m.group('args').split(',')
|
||||
|
||||
return self.build_function(argnames, func_m.group('code'))
|
||||
|
||||
def call_function(self, funcname, *args):
|
||||
f = self.extract_function(funcname)
|
||||
return f(args)
|
||||
|
||||
def build_function(self, argnames, code):
|
||||
def resf(args):
|
||||
local_vars = dict(zip(argnames, args))
|
||||
for stmt in code.split(';'):
|
||||
res, abort = self.interpret_statement(stmt, local_vars)
|
||||
if abort:
|
||||
break
|
||||
return res
|
||||
return resf
|
||||
@@ -1,15 +0,0 @@
|
||||
from .pafy import get_playlist
|
||||
from .pafy import new
|
||||
from .pafy import set_api_key
|
||||
from .pafy import dump_cache
|
||||
from .pafy import load_cache
|
||||
from .pafy import get_categoryname
|
||||
from .pafy import __version__
|
||||
from .pafy import __author__
|
||||
from .pafy import __license__
|
||||
import sys
|
||||
|
||||
if "test" not in sys.argv[0]:
|
||||
del pafy
|
||||
|
||||
del sys
|
||||
1618
lib/pafy/pafy.py
1618
lib/pafy/pafy.py
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
||||
__all__ = ['PyJsParser', 'parse', 'JsSyntaxError']
|
||||
__all__ = ['PyJsParser', 'parse', 'JsSyntaxError', 'pyjsparserdata']
|
||||
__author__ = 'Piotr Dabkowski'
|
||||
__version__ = '2.2.0'
|
||||
from .parser import PyJsParser, parse, JsSyntaxError
|
||||
from .parser import PyJsParser, parse, JsSyntaxError
|
||||
from . import pyjsparserdata
|
||||
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,7 @@ import sys
|
||||
import unicodedata
|
||||
from collections import defaultdict
|
||||
|
||||
PY3 = sys.version_info >= (3,0)
|
||||
PY3 = sys.version_info >= (3, 0)
|
||||
|
||||
if PY3:
|
||||
unichr = chr
|
||||
@@ -31,179 +31,254 @@ if PY3:
|
||||
unicode = str
|
||||
|
||||
token = {
|
||||
'BooleanLiteral': 1,
|
||||
'EOF': 2,
|
||||
'Identifier': 3,
|
||||
'Keyword': 4,
|
||||
'NullLiteral': 5,
|
||||
'NumericLiteral': 6,
|
||||
'Punctuator': 7,
|
||||
'StringLiteral': 8,
|
||||
'RegularExpression': 9,
|
||||
'Template': 10
|
||||
}
|
||||
'BooleanLiteral': 1,
|
||||
'EOF': 2,
|
||||
'Identifier': 3,
|
||||
'Keyword': 4,
|
||||
'NullLiteral': 5,
|
||||
'NumericLiteral': 6,
|
||||
'Punctuator': 7,
|
||||
'StringLiteral': 8,
|
||||
'RegularExpression': 9,
|
||||
'Template': 10
|
||||
}
|
||||
|
||||
TokenName = dict((v, k) for k, v in token.items())
|
||||
|
||||
TokenName = dict((v,k) for k,v in token.items())
|
||||
FnExprTokens = [
|
||||
'(',
|
||||
'{',
|
||||
'[',
|
||||
'in',
|
||||
'typeof',
|
||||
'instanceof',
|
||||
'new',
|
||||
'return',
|
||||
'case',
|
||||
'delete',
|
||||
'throw',
|
||||
'void',
|
||||
# assignment operators
|
||||
'=',
|
||||
'+=',
|
||||
'-=',
|
||||
'*=',
|
||||
'/=',
|
||||
'%=',
|
||||
'<<=',
|
||||
'>>=',
|
||||
'>>>=',
|
||||
'&=',
|
||||
'|=',
|
||||
'^=',
|
||||
',',
|
||||
# binary/unary operators
|
||||
'+',
|
||||
'-',
|
||||
'*',
|
||||
'/',
|
||||
'%',
|
||||
'++',
|
||||
'--',
|
||||
'<<',
|
||||
'>>',
|
||||
'>>>',
|
||||
'&',
|
||||
'|',
|
||||
'^',
|
||||
'!',
|
||||
'~',
|
||||
'&&',
|
||||
'||',
|
||||
'?',
|
||||
':',
|
||||
'===',
|
||||
'==',
|
||||
'>=',
|
||||
'<=',
|
||||
'<',
|
||||
'>',
|
||||
'!=',
|
||||
'!=='
|
||||
]
|
||||
|
||||
FnExprTokens = ['(', '{', '[', 'in', 'typeof', 'instanceof', 'new',
|
||||
'return', 'case', 'delete', 'throw', 'void',
|
||||
# assignment operators
|
||||
'=', '+=', '-=', '*=', '/=', '%=', '<<=', '>>=', '>>>=',
|
||||
'&=', '|=', '^=', ',',
|
||||
# binary/unary operators
|
||||
'+', '-', '*', '/', '%', '++', '--', '<<', '>>', '>>>', '&',
|
||||
'|', '^', '!', '~', '&&', '||', '?', ':', '===', '==', '>=',
|
||||
'<=', '<', '>', '!=', '!==']
|
||||
|
||||
syntax= set(('AssignmentExpression',
|
||||
'AssignmentPattern',
|
||||
'ArrayExpression',
|
||||
'ArrayPattern',
|
||||
'ArrowFunctionExpression',
|
||||
'BlockStatement',
|
||||
'BinaryExpression',
|
||||
'BreakStatement',
|
||||
'CallExpression',
|
||||
'CatchClause',
|
||||
'ClassBody',
|
||||
'ClassDeclaration',
|
||||
'ClassExpression',
|
||||
'ConditionalExpression',
|
||||
'ContinueStatement',
|
||||
'DoWhileStatement',
|
||||
'DebuggerStatement',
|
||||
'EmptyStatement',
|
||||
'ExportAllDeclaration',
|
||||
'ExportDefaultDeclaration',
|
||||
'ExportNamedDeclaration',
|
||||
'ExportSpecifier',
|
||||
'ExpressionStatement',
|
||||
'ForStatement',
|
||||
'ForInStatement',
|
||||
'FunctionDeclaration',
|
||||
'FunctionExpression',
|
||||
'Identifier',
|
||||
'IfStatement',
|
||||
'ImportDeclaration',
|
||||
'ImportDefaultSpecifier',
|
||||
'ImportNamespaceSpecifier',
|
||||
'ImportSpecifier',
|
||||
'Literal',
|
||||
'LabeledStatement',
|
||||
'LogicalExpression',
|
||||
'MemberExpression',
|
||||
'MethodDefinition',
|
||||
'NewExpression',
|
||||
'ObjectExpression',
|
||||
'ObjectPattern',
|
||||
'Program',
|
||||
'Property',
|
||||
'RestElement',
|
||||
'ReturnStatement',
|
||||
'SequenceExpression',
|
||||
'SpreadElement',
|
||||
'Super',
|
||||
'SwitchCase',
|
||||
'SwitchStatement',
|
||||
'TaggedTemplateExpression',
|
||||
'TemplateElement',
|
||||
'TemplateLiteral',
|
||||
'ThisExpression',
|
||||
'ThrowStatement',
|
||||
'TryStatement',
|
||||
'UnaryExpression',
|
||||
'UpdateExpression',
|
||||
'VariableDeclaration',
|
||||
'VariableDeclarator',
|
||||
'WhileStatement',
|
||||
'WithStatement'))
|
||||
syntax = set(
|
||||
('AssignmentExpression', 'AssignmentPattern', 'ArrayExpression',
|
||||
'ArrayPattern', 'ArrowFunctionExpression', 'BlockStatement',
|
||||
'BinaryExpression', 'BreakStatement', 'CallExpression', 'CatchClause',
|
||||
'ClassBody', 'ClassDeclaration', 'ClassExpression',
|
||||
'ConditionalExpression', 'ContinueStatement', 'DoWhileStatement',
|
||||
'DebuggerStatement', 'EmptyStatement', 'ExportAllDeclaration',
|
||||
'ExportDefaultDeclaration', 'ExportNamedDeclaration', 'ExportSpecifier',
|
||||
'ExpressionStatement', 'ForStatement', 'ForInStatement',
|
||||
'FunctionDeclaration', 'FunctionExpression', 'Identifier', 'IfStatement',
|
||||
'ImportDeclaration', 'ImportDefaultSpecifier', 'ImportNamespaceSpecifier',
|
||||
'ImportSpecifier', 'Literal', 'LabeledStatement', 'LogicalExpression',
|
||||
'MemberExpression', 'MethodDefinition', 'NewExpression',
|
||||
'ObjectExpression', 'ObjectPattern', 'Program', 'Property', 'RestElement',
|
||||
'ReturnStatement', 'SequenceExpression', 'SpreadElement', 'Super',
|
||||
'SwitchCase', 'SwitchStatement', 'TaggedTemplateExpression',
|
||||
'TemplateElement', 'TemplateLiteral', 'ThisExpression', 'ThrowStatement',
|
||||
'TryStatement', 'UnaryExpression', 'UpdateExpression',
|
||||
'VariableDeclaration', 'VariableDeclarator', 'WhileStatement',
|
||||
'WithStatement'))
|
||||
|
||||
supported_syntax = set(
|
||||
('AssignmentExpression', 'ArrayExpression', 'BlockStatement',
|
||||
'BinaryExpression', 'BreakStatement', 'CallExpression', 'CatchClause',
|
||||
'ConditionalExpression', 'ContinueStatement', 'DoWhileStatement',
|
||||
'DebuggerStatement', 'EmptyStatement', 'ExpressionStatement',
|
||||
'ForStatement', 'ForInStatement', 'FunctionDeclaration',
|
||||
'FunctionExpression', 'Identifier', 'IfStatement', 'Literal',
|
||||
'LabeledStatement', 'LogicalExpression', 'MemberExpression',
|
||||
'MethodDefinition', 'NewExpression', 'ObjectExpression', 'Program',
|
||||
'Property', 'ReturnStatement', 'SequenceExpression', 'SwitchCase',
|
||||
'SwitchStatement', 'ThisExpression', 'ThrowStatement', 'TryStatement',
|
||||
'UnaryExpression', 'UpdateExpression', 'VariableDeclaration',
|
||||
'VariableDeclarator', 'WhileStatement', 'WithStatement'))
|
||||
|
||||
# Error messages should be identical to V8.
|
||||
messages = {
|
||||
'UnexpectedToken': 'Unexpected token %s',
|
||||
'UnexpectedNumber': 'Unexpected number',
|
||||
'UnexpectedString': 'Unexpected string',
|
||||
'UnexpectedIdentifier': 'Unexpected identifier',
|
||||
'UnexpectedReserved': 'Unexpected reserved word',
|
||||
'UnexpectedTemplate': 'Unexpected quasi %s',
|
||||
'UnexpectedEOS': 'Unexpected end of input',
|
||||
'NewlineAfterThrow': 'Illegal newline after throw',
|
||||
'InvalidRegExp': 'Invalid regular expression',
|
||||
'UnterminatedRegExp': 'Invalid regular expression: missing /',
|
||||
'InvalidLHSInAssignment': 'Invalid left-hand side in assignment',
|
||||
'InvalidLHSInForIn': 'Invalid left-hand side in for-in',
|
||||
'MultipleDefaultsInSwitch': 'More than one default clause in switch statement',
|
||||
'NoCatchOrFinally': 'Missing catch or finally after try',
|
||||
'UnknownLabel': 'Undefined label \'%s\'',
|
||||
'Redeclaration': '%s \'%s\' has already been declared',
|
||||
'IllegalContinue': 'Illegal continue statement',
|
||||
'IllegalBreak': 'Illegal break statement',
|
||||
'IllegalReturn': 'Illegal return statement',
|
||||
'StrictModeWith': 'Strict mode code may not include a with statement',
|
||||
'StrictCatchVariable': 'Catch variable may not be eval or arguments in strict mode',
|
||||
'StrictVarName': 'Variable name may not be eval or arguments in strict mode',
|
||||
'StrictParamName': 'Parameter name eval or arguments is not allowed in strict mode',
|
||||
'StrictParamDupe': 'Strict mode function may not have duplicate parameter names',
|
||||
'StrictFunctionName': 'Function name may not be eval or arguments in strict mode',
|
||||
'StrictOctalLiteral': 'Octal literals are not allowed in strict mode.',
|
||||
'StrictDelete': 'Delete of an unqualified identifier in strict mode.',
|
||||
'StrictLHSAssignment': 'Assignment to eval or arguments is not allowed in strict mode',
|
||||
'StrictLHSPostfix': 'Postfix increment/decrement may not have eval or arguments operand in strict mode',
|
||||
'StrictLHSPrefix': 'Prefix increment/decrement may not have eval or arguments operand in strict mode',
|
||||
'StrictReservedWord': 'Use of future reserved word in strict mode',
|
||||
'TemplateOctalLiteral': 'Octal literals are not allowed in template strings.',
|
||||
'ParameterAfterRestParameter': 'Rest parameter must be last formal parameter',
|
||||
'DefaultRestParameter': 'Unexpected token =',
|
||||
'ObjectPatternAsRestParameter': 'Unexpected token {',
|
||||
'DuplicateProtoProperty': 'Duplicate __proto__ fields are not allowed in object literals',
|
||||
'ConstructorSpecialMethod': 'Class constructor may not be an accessor',
|
||||
'DuplicateConstructor': 'A class may only have one constructor',
|
||||
'StaticPrototype': 'Classes may not have static property named prototype',
|
||||
'MissingFromClause': 'Unexpected token',
|
||||
'NoAsAfterImportNamespace': 'Unexpected token',
|
||||
'InvalidModuleSpecifier': 'Unexpected token',
|
||||
'IllegalImportDeclaration': 'Unexpected token',
|
||||
'IllegalExportDeclaration': 'Unexpected token'}
|
||||
'UnexpectedToken':
|
||||
'Unexpected token %s',
|
||||
'UnexpectedNumber':
|
||||
'Unexpected number',
|
||||
'UnexpectedString':
|
||||
'Unexpected string',
|
||||
'UnexpectedIdentifier':
|
||||
'Unexpected identifier',
|
||||
'UnexpectedReserved':
|
||||
'Unexpected reserved word',
|
||||
'UnexpectedTemplate':
|
||||
'Unexpected quasi %s',
|
||||
'UnexpectedEOS':
|
||||
'Unexpected end of input',
|
||||
'NewlineAfterThrow':
|
||||
'Illegal newline after throw',
|
||||
'InvalidRegExp':
|
||||
'Invalid regular expression',
|
||||
'UnterminatedRegExp':
|
||||
'Invalid regular expression: missing /',
|
||||
'InvalidLHSInAssignment':
|
||||
'Invalid left-hand side in assignment',
|
||||
'InvalidLHSInForIn':
|
||||
'Invalid left-hand side in for-in',
|
||||
'MultipleDefaultsInSwitch':
|
||||
'More than one default clause in switch statement',
|
||||
'NoCatchOrFinally':
|
||||
'Missing catch or finally after try',
|
||||
'UnknownLabel':
|
||||
'Undefined label \'%s\'',
|
||||
'Redeclaration':
|
||||
'%s \'%s\' has already been declared',
|
||||
'IllegalContinue':
|
||||
'Illegal continue statement',
|
||||
'IllegalBreak':
|
||||
'Illegal break statement',
|
||||
'IllegalReturn':
|
||||
'Illegal return statement',
|
||||
'StrictModeWith':
|
||||
'Strict mode code may not include a with statement',
|
||||
'StrictCatchVariable':
|
||||
'Catch variable may not be eval or arguments in strict mode',
|
||||
'StrictVarName':
|
||||
'Variable name may not be eval or arguments in strict mode',
|
||||
'StrictParamName':
|
||||
'Parameter name eval or arguments is not allowed in strict mode',
|
||||
'StrictParamDupe':
|
||||
'Strict mode function may not have duplicate parameter names',
|
||||
'StrictFunctionName':
|
||||
'Function name may not be eval or arguments in strict mode',
|
||||
'StrictOctalLiteral':
|
||||
'Octal literals are not allowed in strict mode.',
|
||||
'StrictDelete':
|
||||
'Delete of an unqualified identifier in strict mode.',
|
||||
'StrictLHSAssignment':
|
||||
'Assignment to eval or arguments is not allowed in strict mode',
|
||||
'StrictLHSPostfix':
|
||||
'Postfix increment/decrement may not have eval or arguments operand in strict mode',
|
||||
'StrictLHSPrefix':
|
||||
'Prefix increment/decrement may not have eval or arguments operand in strict mode',
|
||||
'StrictReservedWord':
|
||||
'Use of future reserved word in strict mode',
|
||||
'TemplateOctalLiteral':
|
||||
'Octal literals are not allowed in template strings.',
|
||||
'ParameterAfterRestParameter':
|
||||
'Rest parameter must be last formal parameter',
|
||||
'DefaultRestParameter':
|
||||
'Unexpected token =',
|
||||
'ObjectPatternAsRestParameter':
|
||||
'Unexpected token {',
|
||||
'DuplicateProtoProperty':
|
||||
'Duplicate __proto__ fields are not allowed in object literals',
|
||||
'ConstructorSpecialMethod':
|
||||
'Class constructor may not be an accessor',
|
||||
'DuplicateConstructor':
|
||||
'A class may only have one constructor',
|
||||
'StaticPrototype':
|
||||
'Classes may not have static property named prototype',
|
||||
'MissingFromClause':
|
||||
'Unexpected token',
|
||||
'NoAsAfterImportNamespace':
|
||||
'Unexpected token',
|
||||
'InvalidModuleSpecifier':
|
||||
'Unexpected token',
|
||||
'IllegalImportDeclaration':
|
||||
'Unexpected token',
|
||||
'IllegalExportDeclaration':
|
||||
'Unexpected token'
|
||||
}
|
||||
|
||||
PRECEDENCE = {
|
||||
'||': 1,
|
||||
'&&': 2,
|
||||
'|': 3,
|
||||
'^': 4,
|
||||
'&': 5,
|
||||
'==': 6,
|
||||
'!=': 6,
|
||||
'===': 6,
|
||||
'!==': 6,
|
||||
'<': 7,
|
||||
'>': 7,
|
||||
'<=': 7,
|
||||
'>=': 7,
|
||||
'instanceof': 7,
|
||||
'in': 7,
|
||||
'<<': 8,
|
||||
'>>': 8,
|
||||
'>>>': 8,
|
||||
'+': 9,
|
||||
'-': 9,
|
||||
'*': 11,
|
||||
'/': 11,
|
||||
'%': 11
|
||||
}
|
||||
|
||||
|
||||
class Token:
|
||||
pass
|
||||
|
||||
|
||||
class Syntax:
|
||||
pass
|
||||
|
||||
|
||||
class Messages:
|
||||
pass
|
||||
|
||||
PRECEDENCE = {'||':1,
|
||||
'&&':2,
|
||||
'|':3,
|
||||
'^':4,
|
||||
'&':5,
|
||||
'==':6,
|
||||
'!=':6,
|
||||
'===':6,
|
||||
'!==':6,
|
||||
'<':7,
|
||||
'>':7,
|
||||
'<=':7,
|
||||
'>=':7,
|
||||
'instanceof':7,
|
||||
'in':7,
|
||||
'<<':8,
|
||||
'>>':8,
|
||||
'>>>':8,
|
||||
'+':9,
|
||||
'-':9,
|
||||
'*':11,
|
||||
'/':11,
|
||||
'%':11}
|
||||
|
||||
class Token: pass
|
||||
class Syntax: pass
|
||||
class Messages: pass
|
||||
class PlaceHolders:
|
||||
ArrowParameterPlaceHolder = 'ArrowParameterPlaceHolder'
|
||||
|
||||
for k,v in token.items():
|
||||
|
||||
for k, v in token.items():
|
||||
setattr(Token, k, v)
|
||||
|
||||
for e in syntax:
|
||||
setattr(Syntax, e, e)
|
||||
|
||||
for k,v in messages.items():
|
||||
for k, v in messages.items():
|
||||
setattr(Messages, k, v)
|
||||
|
||||
#http://stackoverflow.com/questions/14245893/efficiently-list-all-characters-in-a-given-unicode-category
|
||||
@@ -220,84 +295,113 @@ CR = u'\u000D'
|
||||
LS = u'\u2028'
|
||||
PS = u'\u2029'
|
||||
|
||||
U_CATEGORIES = defaultdict(list)
|
||||
for c in map(unichr, range(sys.maxunicode + 1)):
|
||||
U_CATEGORIES[unicodedata.category(c)].append(c)
|
||||
UNICODE_LETTER = set(U_CATEGORIES['Lu']+U_CATEGORIES['Ll']+
|
||||
U_CATEGORIES['Lt']+U_CATEGORIES['Lm']+
|
||||
U_CATEGORIES['Lo']+U_CATEGORIES['Nl'])
|
||||
UNICODE_COMBINING_MARK = set(U_CATEGORIES['Mn']+U_CATEGORIES['Mc'])
|
||||
UNICODE_DIGIT = set(U_CATEGORIES['Nd'])
|
||||
UNICODE_CONNECTOR_PUNCTUATION = set(U_CATEGORIES['Pc'])
|
||||
IDENTIFIER_START = UNICODE_LETTER.union(set(('$','_', '\\'))) # and some fucking unicode escape sequence
|
||||
IDENTIFIER_PART = IDENTIFIER_START.union(UNICODE_COMBINING_MARK).union(UNICODE_DIGIT)\
|
||||
.union(UNICODE_CONNECTOR_PUNCTUATION).union(set((ZWJ, ZWNJ)))
|
||||
|
||||
WHITE_SPACE = set((0x20, 0x09, 0x0B, 0x0C, 0xA0, 0x1680,
|
||||
0x180E, 0x2000, 0x2001, 0x2002, 0x2003,
|
||||
0x2004, 0x2005, 0x2006, 0x2007, 0x2008,
|
||||
0x2009, 0x200A, 0x202F, 0x205F, 0x3000,
|
||||
0xFEFF))
|
||||
LETTER_CATEGORIES = set(['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl'])
|
||||
|
||||
COMBINING_MARK_CATEGORIES = set(['Mn', 'Mc'])
|
||||
DIGIT_CATEGORIES = set(['Nd'])
|
||||
CONNECTOR_PUNCTUATION_CATEGORIES = set(['Pc'])
|
||||
IDENTIFIER_START_CATEGORIES = LETTER_CATEGORIES.copy() # and some fucking unicode escape sequence
|
||||
IDENTIFIER_PART_CATEGORIES = IDENTIFIER_START_CATEGORIES.union(COMBINING_MARK_CATEGORIES).union(DIGIT_CATEGORIES)\
|
||||
.union(CONNECTOR_PUNCTUATION_CATEGORIES)
|
||||
|
||||
EXTRA_IDENTIFIER_START_CHARS = set(('$','_', '\\'))
|
||||
EXTRA_IDENTIFIER_PART_CHARS = EXTRA_IDENTIFIER_START_CHARS.union(set((ZWJ, ZWNJ)))
|
||||
|
||||
WHITE_SPACE = set((0x20, 0x09, 0x0B, 0x0C, 0xA0, 0x1680, 0x180E, 0x2000,
|
||||
0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007,
|
||||
0x2008, 0x2009, 0x200A, 0x202F, 0x205F, 0x3000, 0xFEFF))
|
||||
|
||||
LINE_TERMINATORS = set((0x0A, 0x0D, 0x2028, 0x2029))
|
||||
|
||||
|
||||
def isIdentifierStart(ch):
|
||||
return (ch if isinstance(ch, unicode) else unichr(ch)) in IDENTIFIER_START
|
||||
uch = (ch if isinstance(ch, unicode) else unichr(ch))
|
||||
return unicodedata.category(uch) in IDENTIFIER_START_CATEGORIES or uch in EXTRA_IDENTIFIER_START_CHARS
|
||||
|
||||
|
||||
def isIdentifierPart(ch):
|
||||
return (ch if isinstance(ch, unicode) else unichr(ch)) in IDENTIFIER_PART
|
||||
uch = (ch if isinstance(ch, unicode) else unichr(ch))
|
||||
return unicodedata.category(uch) in IDENTIFIER_PART_CATEGORIES or uch in EXTRA_IDENTIFIER_PART_CHARS
|
||||
|
||||
|
||||
def isValidIdentifier(name):
|
||||
if not name or isKeyword(name):
|
||||
return False
|
||||
check = isIdentifierStart
|
||||
for e in name:
|
||||
if not check(e):
|
||||
return False
|
||||
check = isIdentifierPart
|
||||
return True
|
||||
|
||||
|
||||
def isWhiteSpace(ch):
|
||||
return (ord(ch) if isinstance(ch, unicode) else ch) in WHITE_SPACE
|
||||
|
||||
|
||||
def isLineTerminator(ch):
|
||||
return (ord(ch) if isinstance(ch, unicode) else ch) in LINE_TERMINATORS
|
||||
return (ord(ch) if isinstance(ch, unicode) else ch) in LINE_TERMINATORS
|
||||
|
||||
|
||||
OCTAL = set(('0', '1', '2', '3', '4', '5', '6', '7'))
|
||||
DEC = set(('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'))
|
||||
HEX = set('0123456789abcdefABCDEF')
|
||||
HEX_CONV = dict(('0123456789abcdef'[n],n) for n in xrange(16))
|
||||
for i,e in enumerate('ABCDEF', 10):
|
||||
HEX_CONV = dict(('0123456789abcdef' [n], n) for n in xrange(16))
|
||||
for i, e in enumerate('ABCDEF', 10):
|
||||
HEX_CONV[e] = i
|
||||
|
||||
|
||||
def isDecimalDigit(ch):
|
||||
return (ch if isinstance(ch, unicode) else unichr(ch)) in DEC
|
||||
|
||||
|
||||
def isHexDigit(ch):
|
||||
return (ch if isinstance(ch, unicode) else unichr(ch)) in HEX
|
||||
return (ch if isinstance(ch, unicode) else unichr(ch)) in HEX
|
||||
|
||||
|
||||
def isOctalDigit(ch):
|
||||
return (ch if isinstance(ch, unicode) else unichr(ch)) in OCTAL
|
||||
return (ch if isinstance(ch, unicode) else unichr(ch)) in OCTAL
|
||||
|
||||
|
||||
def isFutureReservedWord(w):
|
||||
return w in ('enum', 'export', 'import', 'super')
|
||||
|
||||
|
||||
RESERVED_WORD = set(('implements', 'interface', 'package', 'private', 'protected', 'public', 'static', 'yield', 'let'))
|
||||
RESERVED_WORD = set(('implements', 'interface', 'package', 'private',
|
||||
'protected', 'public', 'static', 'yield', 'let'))
|
||||
|
||||
|
||||
def isStrictModeReservedWord(w):
|
||||
return w in RESERVED_WORD
|
||||
|
||||
|
||||
def isRestrictedWord(w):
|
||||
return w in ('eval', 'arguments')
|
||||
return w in ('eval', 'arguments')
|
||||
|
||||
|
||||
KEYWORDS = set(
|
||||
('if', 'in', 'do', 'var', 'for', 'new', 'try', 'let', 'this', 'else',
|
||||
'case', 'void', 'with', 'enum', 'while', 'break', 'catch', 'throw',
|
||||
'const', 'yield', 'class', 'super', 'return', 'typeof', 'delete',
|
||||
'switch', 'export', 'import', 'default', 'finally', 'extends', 'function',
|
||||
'continue', 'debugger', 'instanceof', 'pyimport'))
|
||||
|
||||
|
||||
KEYWORDS = set(('if', 'in', 'do', 'var', 'for', 'new', 'try', 'let', 'this', 'else', 'case',
|
||||
'void', 'with', 'enum', 'while', 'break', 'catch', 'throw', 'const', 'yield',
|
||||
'class', 'super', 'return', 'typeof', 'delete', 'switch', 'export', 'import',
|
||||
'default', 'finally', 'extends', 'function', 'continue', 'debugger', 'instanceof', 'pyimport'))
|
||||
def isKeyword(w):
|
||||
# 'const' is specialized as Keyword in V8.
|
||||
# 'yield' and 'let' are for compatibility with SpiderMonkey and ES.next.
|
||||
# Some others are from future reserved words.
|
||||
return w in KEYWORDS
|
||||
# 'const' is specialized as Keyword in V8.
|
||||
# 'yield' and 'let' are for compatibility with SpiderMonkey and ES.next.
|
||||
# Some others are from future reserved words.
|
||||
return w in KEYWORDS
|
||||
|
||||
|
||||
class JsSyntaxError(Exception): pass
|
||||
class JsSyntaxError(Exception):
|
||||
pass
|
||||
|
||||
if __name__=='__main__':
|
||||
|
||||
if __name__ == '__main__':
|
||||
assert isLineTerminator('\n')
|
||||
assert isLineTerminator(0x0A)
|
||||
assert isIdentifierStart('$')
|
||||
assert isIdentifierStart(100)
|
||||
assert isWhiteSpace(' ')
|
||||
assert isWhiteSpace(' ')
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
from .pyjsparserdata import *
|
||||
|
||||
|
||||
class Ecma51NotSupported(Exception):
|
||||
def __init__(self, feature):
|
||||
super(Ecma51NotSupported,
|
||||
self).__init__("%s is not supported by ECMA 5.1." % feature)
|
||||
self.feature = feature
|
||||
|
||||
def get_feature(self):
|
||||
return self.feature
|
||||
|
||||
|
||||
class BaseNode:
|
||||
def finish(self):
|
||||
pass
|
||||
@@ -17,17 +27,6 @@ class BaseNode:
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishArrowFunctionExpression(self, params, defaults, body, expression):
|
||||
self.type = Syntax.ArrowFunctionExpression
|
||||
self.id = None
|
||||
self.params = params
|
||||
self.defaults = defaults
|
||||
self.body = body
|
||||
self.generator = False
|
||||
self.expression = expression
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishAssignmentExpression(self, operator, left, right):
|
||||
self.type = Syntax.AssignmentExpression
|
||||
self.operator = operator
|
||||
@@ -44,7 +43,8 @@ class BaseNode:
|
||||
return self
|
||||
|
||||
def finishBinaryExpression(self, operator, left, right):
|
||||
self.type = Syntax.LogicalExpression if (operator == '||' or operator == '&&') else Syntax.BinaryExpression
|
||||
self.type = Syntax.LogicalExpression if (
|
||||
operator == '||' or operator == '&&') else Syntax.BinaryExpression
|
||||
self.operator = operator
|
||||
self.left = left
|
||||
self.right = right
|
||||
@@ -77,28 +77,6 @@ class BaseNode:
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishClassBody(self, body):
|
||||
self.type = Syntax.ClassBody
|
||||
self.body = body
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishClassDeclaration(self, id, superClass, body):
|
||||
self.type = Syntax.ClassDeclaration
|
||||
self.id = id
|
||||
self.superClass = superClass
|
||||
self.body = body
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishClassExpression(self, id, superClass, body):
|
||||
self.type = Syntax.ClassExpression
|
||||
self.id = id
|
||||
self.superClass = superClass
|
||||
self.body = body
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishConditionalExpression(self, test, consequent, alternate):
|
||||
self.type = Syntax.ConditionalExpression
|
||||
self.test = test
|
||||
@@ -200,7 +178,7 @@ class BaseNode:
|
||||
def finishLiteral(self, token):
|
||||
self.type = Syntax.Literal
|
||||
self.value = token['value']
|
||||
self.raw = None # todo fix it?
|
||||
self.raw = token['raw']
|
||||
if token.get('regex'):
|
||||
self.regex = token['regex']
|
||||
self.finish()
|
||||
@@ -264,12 +242,6 @@ class BaseNode:
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishRestElement(self, argument):
|
||||
self.type = Syntax.RestElement
|
||||
self.argument = argument
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishReturnStatement(self, argument):
|
||||
self.type = Syntax.ReturnStatement
|
||||
self.argument = argument
|
||||
@@ -282,12 +254,6 @@ class BaseNode:
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishSpreadElement(self, argument):
|
||||
self.type = Syntax.SpreadElement
|
||||
self.argument = argument
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishSwitchCase(self, test, consequent):
|
||||
self.type = Syntax.SwitchCase
|
||||
self.test = test
|
||||
@@ -295,11 +261,6 @@ class BaseNode:
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishSuper(self, ):
|
||||
self.type = Syntax.Super
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishSwitchStatement(self, discriminant, cases):
|
||||
self.type = Syntax.SwitchStatement
|
||||
self.discriminant = discriminant
|
||||
@@ -307,27 +268,6 @@ class BaseNode:
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishTaggedTemplateExpression(self, tag, quasi):
|
||||
self.type = Syntax.TaggedTemplateExpression
|
||||
self.tag = tag
|
||||
self.quasi = quasi
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishTemplateElement(self, value, tail):
|
||||
self.type = Syntax.TemplateElement
|
||||
self.value = value
|
||||
self.tail = tail
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishTemplateLiteral(self, quasis, expressions):
|
||||
self.type = Syntax.TemplateLiteral
|
||||
self.quasis = quasis
|
||||
self.expressions = expressions
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishThisExpression(self, ):
|
||||
self.type = Syntax.ThisExpression
|
||||
self.finish()
|
||||
@@ -350,7 +290,8 @@ class BaseNode:
|
||||
return self
|
||||
|
||||
def finishUnaryExpression(self, operator, argument):
|
||||
self.type = Syntax.UpdateExpression if (operator == '++' or operator == '--') else Syntax.UnaryExpression
|
||||
self.type = Syntax.UpdateExpression if (
|
||||
operator == '++' or operator == '--') else Syntax.UnaryExpression
|
||||
self.operator = operator
|
||||
self.argument = argument
|
||||
self.prefix = True
|
||||
@@ -392,58 +333,14 @@ class BaseNode:
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishExportSpecifier(self, local, exported):
|
||||
self.type = Syntax.ExportSpecifier
|
||||
self.exported = exported or local
|
||||
self.local = local
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishImportDefaultSpecifier(self, local):
|
||||
self.type = Syntax.ImportDefaultSpecifier
|
||||
self.local = local
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishImportNamespaceSpecifier(self, local):
|
||||
self.type = Syntax.ImportNamespaceSpecifier
|
||||
self.local = local
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishExportNamedDeclaration(self, declaration, specifiers, src):
|
||||
self.type = Syntax.ExportNamedDeclaration
|
||||
self.declaration = declaration
|
||||
self.specifiers = specifiers
|
||||
self.source = src
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishExportDefaultDeclaration(self, declaration):
|
||||
self.type = Syntax.ExportDefaultDeclaration
|
||||
self.declaration = declaration
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishExportAllDeclaration(self, src):
|
||||
self.type = Syntax.ExportAllDeclaration
|
||||
self.source = src
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishImportSpecifier(self, local, imported):
|
||||
self.type = Syntax.ImportSpecifier
|
||||
self.local = local or imported
|
||||
self.imported = imported
|
||||
self.finish()
|
||||
return self
|
||||
|
||||
def finishImportDeclaration(self, specifiers, src):
|
||||
self.type = Syntax.ImportDeclaration
|
||||
self.specifiers = specifiers
|
||||
self.source = src
|
||||
self.finish()
|
||||
return self
|
||||
def __getattr__(self, item):
|
||||
if item in self.__dict__:
|
||||
return self.__dict__[item]
|
||||
if item.startswith('finish'):
|
||||
feature = item[6:]
|
||||
raise Ecma51NotSupported(feature)
|
||||
else:
|
||||
raise AttributeError(item)
|
||||
|
||||
def __getitem__(self, item):
|
||||
return getattr(self, item)
|
||||
@@ -451,6 +348,10 @@ class BaseNode:
|
||||
def __setitem__(self, key, value):
|
||||
setattr(self, key, value)
|
||||
|
||||
def to_dict(self):
|
||||
return node_to_dict(self)
|
||||
|
||||
|
||||
class Node(BaseNode):
|
||||
pass
|
||||
|
||||
|
||||
34
lib/requests_toolbelt/__init__.py
Normal file
34
lib/requests_toolbelt/__init__.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
requests-toolbelt
|
||||
=================
|
||||
|
||||
See http://toolbelt.rtfd.org/ for documentation
|
||||
|
||||
:copyright: (c) 2014 by Ian Cordasco and Cory Benfield
|
||||
:license: Apache v2.0, see LICENSE for more details
|
||||
"""
|
||||
|
||||
from .adapters import SSLAdapter, SourceAddressAdapter
|
||||
from .auth.guess import GuessAuth
|
||||
from .multipart import (
|
||||
MultipartEncoder, MultipartEncoderMonitor, MultipartDecoder,
|
||||
ImproperBodyPartContentException, NonMultipartContentTypeException
|
||||
)
|
||||
from .streaming_iterator import StreamingIterator
|
||||
from .utils.user_agent import user_agent
|
||||
|
||||
__title__ = 'requests-toolbelt'
|
||||
__authors__ = 'Ian Cordasco, Cory Benfield'
|
||||
__license__ = 'Apache v2.0'
|
||||
__copyright__ = 'Copyright 2014 Ian Cordasco, Cory Benfield'
|
||||
__version__ = '0.9.1'
|
||||
__version_info__ = tuple(int(i) for i in __version__.split('.'))
|
||||
|
||||
__all__ = [
|
||||
'GuessAuth', 'MultipartEncoder', 'MultipartEncoderMonitor',
|
||||
'MultipartDecoder', 'SSLAdapter', 'SourceAddressAdapter',
|
||||
'StreamingIterator', 'user_agent', 'ImproperBodyPartContentException',
|
||||
'NonMultipartContentTypeException', '__title__', '__authors__',
|
||||
'__license__', '__copyright__', '__version__', '__version_info__',
|
||||
]
|
||||
324
lib/requests_toolbelt/_compat.py
Normal file
324
lib/requests_toolbelt/_compat.py
Normal file
@@ -0,0 +1,324 @@
|
||||
"""Private module full of compatibility hacks.
|
||||
|
||||
Primarily this is for downstream redistributions of requests that unvendor
|
||||
urllib3 without providing a shim.
|
||||
|
||||
.. warning::
|
||||
|
||||
This module is private. If you use it, and something breaks, you were
|
||||
warned
|
||||
"""
|
||||
import sys
|
||||
|
||||
import requests
|
||||
|
||||
try:
|
||||
from requests.packages.urllib3 import fields
|
||||
from requests.packages.urllib3 import filepost
|
||||
from requests.packages.urllib3 import poolmanager
|
||||
except ImportError:
|
||||
from urllib3 import fields
|
||||
from urllib3 import filepost
|
||||
from urllib3 import poolmanager
|
||||
|
||||
try:
|
||||
from requests.packages.urllib3.connection import HTTPConnection
|
||||
from requests.packages.urllib3 import connection
|
||||
except ImportError:
|
||||
try:
|
||||
from urllib3.connection import HTTPConnection
|
||||
from urllib3 import connection
|
||||
except ImportError:
|
||||
HTTPConnection = None
|
||||
connection = None
|
||||
|
||||
|
||||
if requests.__build__ < 0x020300:
|
||||
timeout = None
|
||||
else:
|
||||
try:
|
||||
from requests.packages.urllib3.util import timeout
|
||||
except ImportError:
|
||||
from urllib3.util import timeout
|
||||
|
||||
if requests.__build__ < 0x021000:
|
||||
gaecontrib = None
|
||||
else:
|
||||
try:
|
||||
from requests.packages.urllib3.contrib import appengine as gaecontrib
|
||||
except ImportError:
|
||||
from urllib3.contrib import appengine as gaecontrib
|
||||
|
||||
if requests.__build__ < 0x021200:
|
||||
PyOpenSSLContext = None
|
||||
else:
|
||||
try:
|
||||
from requests.packages.urllib3.contrib.pyopenssl \
|
||||
import PyOpenSSLContext
|
||||
except ImportError:
|
||||
try:
|
||||
from urllib3.contrib.pyopenssl import PyOpenSSLContext
|
||||
except ImportError:
|
||||
PyOpenSSLContext = None
|
||||
|
||||
PY3 = sys.version_info > (3, 0)
|
||||
|
||||
if PY3:
|
||||
from collections.abc import Mapping, MutableMapping
|
||||
import queue
|
||||
from urllib.parse import urlencode, urljoin
|
||||
else:
|
||||
from collections import Mapping, MutableMapping
|
||||
import Queue as queue
|
||||
from urllib import urlencode
|
||||
from urlparse import urljoin
|
||||
|
||||
try:
|
||||
basestring = basestring
|
||||
except NameError:
|
||||
basestring = (str, bytes)
|
||||
|
||||
|
||||
class HTTPHeaderDict(MutableMapping):
|
||||
"""
|
||||
:param headers:
|
||||
An iterable of field-value pairs. Must not contain multiple field names
|
||||
when compared case-insensitively.
|
||||
|
||||
:param kwargs:
|
||||
Additional field-value pairs to pass in to ``dict.update``.
|
||||
|
||||
A ``dict`` like container for storing HTTP Headers.
|
||||
|
||||
Field names are stored and compared case-insensitively in compliance with
|
||||
RFC 7230. Iteration provides the first case-sensitive key seen for each
|
||||
case-insensitive pair.
|
||||
|
||||
Using ``__setitem__`` syntax overwrites fields that compare equal
|
||||
case-insensitively in order to maintain ``dict``'s api. For fields that
|
||||
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
|
||||
in a loop.
|
||||
|
||||
If multiple fields that are equal case-insensitively are passed to the
|
||||
constructor or ``.update``, the behavior is undefined and some will be
|
||||
lost.
|
||||
|
||||
>>> headers = HTTPHeaderDict()
|
||||
>>> headers.add('Set-Cookie', 'foo=bar')
|
||||
>>> headers.add('set-cookie', 'baz=quxx')
|
||||
>>> headers['content-length'] = '7'
|
||||
>>> headers['SET-cookie']
|
||||
'foo=bar, baz=quxx'
|
||||
>>> headers['Content-Length']
|
||||
'7'
|
||||
"""
|
||||
|
||||
def __init__(self, headers=None, **kwargs):
|
||||
super(HTTPHeaderDict, self).__init__()
|
||||
self._container = {}
|
||||
if headers is not None:
|
||||
if isinstance(headers, HTTPHeaderDict):
|
||||
self._copy_from(headers)
|
||||
else:
|
||||
self.extend(headers)
|
||||
if kwargs:
|
||||
self.extend(kwargs)
|
||||
|
||||
def __setitem__(self, key, val):
|
||||
self._container[key.lower()] = (key, val)
|
||||
return self._container[key.lower()]
|
||||
|
||||
def __getitem__(self, key):
|
||||
val = self._container[key.lower()]
|
||||
return ', '.join(val[1:])
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self._container[key.lower()]
|
||||
|
||||
def __contains__(self, key):
|
||||
return key.lower() in self._container
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
|
||||
return False
|
||||
if not isinstance(other, type(self)):
|
||||
other = type(self)(other)
|
||||
return (dict((k.lower(), v) for k, v in self.itermerged()) ==
|
||||
dict((k.lower(), v) for k, v in other.itermerged()))
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
if not PY3: # Python 2
|
||||
iterkeys = MutableMapping.iterkeys
|
||||
itervalues = MutableMapping.itervalues
|
||||
|
||||
__marker = object()
|
||||
|
||||
def __len__(self):
|
||||
return len(self._container)
|
||||
|
||||
def __iter__(self):
|
||||
# Only provide the originally cased names
|
||||
for vals in self._container.values():
|
||||
yield vals[0]
|
||||
|
||||
def pop(self, key, default=__marker):
|
||||
"""D.pop(k[,d]) -> v, remove specified key and return its value.
|
||||
|
||||
If key is not found, d is returned if given, otherwise KeyError is
|
||||
raised.
|
||||
"""
|
||||
# Using the MutableMapping function directly fails due to the private
|
||||
# marker.
|
||||
# Using ordinary dict.pop would expose the internal structures.
|
||||
# So let's reinvent the wheel.
|
||||
try:
|
||||
value = self[key]
|
||||
except KeyError:
|
||||
if default is self.__marker:
|
||||
raise
|
||||
return default
|
||||
else:
|
||||
del self[key]
|
||||
return value
|
||||
|
||||
def discard(self, key):
|
||||
try:
|
||||
del self[key]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def add(self, key, val):
|
||||
"""Adds a (name, value) pair, doesn't overwrite the value if it already
|
||||
exists.
|
||||
|
||||
>>> headers = HTTPHeaderDict(foo='bar')
|
||||
>>> headers.add('Foo', 'baz')
|
||||
>>> headers['foo']
|
||||
'bar, baz'
|
||||
"""
|
||||
key_lower = key.lower()
|
||||
new_vals = key, val
|
||||
# Keep the common case aka no item present as fast as possible
|
||||
vals = self._container.setdefault(key_lower, new_vals)
|
||||
if new_vals is not vals:
|
||||
# new_vals was not inserted, as there was a previous one
|
||||
if isinstance(vals, list):
|
||||
# If already several items got inserted, we have a list
|
||||
vals.append(val)
|
||||
else:
|
||||
# vals should be a tuple then, i.e. only one item so far
|
||||
# Need to convert the tuple to list for further extension
|
||||
self._container[key_lower] = [vals[0], vals[1], val]
|
||||
|
||||
def extend(self, *args, **kwargs):
|
||||
"""Generic import function for any type of header-like object.
|
||||
Adapted version of MutableMapping.update in order to insert items
|
||||
with self.add instead of self.__setitem__
|
||||
"""
|
||||
if len(args) > 1:
|
||||
raise TypeError("extend() takes at most 1 positional "
|
||||
"arguments ({} given)".format(len(args)))
|
||||
other = args[0] if len(args) >= 1 else ()
|
||||
|
||||
if isinstance(other, HTTPHeaderDict):
|
||||
for key, val in other.iteritems():
|
||||
self.add(key, val)
|
||||
elif isinstance(other, Mapping):
|
||||
for key in other:
|
||||
self.add(key, other[key])
|
||||
elif hasattr(other, "keys"):
|
||||
for key in other.keys():
|
||||
self.add(key, other[key])
|
||||
else:
|
||||
for key, value in other:
|
||||
self.add(key, value)
|
||||
|
||||
for key, value in kwargs.items():
|
||||
self.add(key, value)
|
||||
|
||||
def getlist(self, key):
|
||||
"""Returns a list of all the values for the named field. Returns an
|
||||
empty list if the key doesn't exist."""
|
||||
try:
|
||||
vals = self._container[key.lower()]
|
||||
except KeyError:
|
||||
return []
|
||||
else:
|
||||
if isinstance(vals, tuple):
|
||||
return [vals[1]]
|
||||
else:
|
||||
return vals[1:]
|
||||
|
||||
# Backwards compatibility for httplib
|
||||
getheaders = getlist
|
||||
getallmatchingheaders = getlist
|
||||
iget = getlist
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
|
||||
|
||||
def _copy_from(self, other):
|
||||
for key in other:
|
||||
val = other.getlist(key)
|
||||
if isinstance(val, list):
|
||||
# Don't need to convert tuples
|
||||
val = list(val)
|
||||
self._container[key.lower()] = [key] + val
|
||||
|
||||
def copy(self):
|
||||
clone = type(self)()
|
||||
clone._copy_from(self)
|
||||
return clone
|
||||
|
||||
def iteritems(self):
|
||||
"""Iterate over all header lines, including duplicate ones."""
|
||||
for key in self:
|
||||
vals = self._container[key.lower()]
|
||||
for val in vals[1:]:
|
||||
yield vals[0], val
|
||||
|
||||
def itermerged(self):
|
||||
"""Iterate over all headers, merging duplicate ones together."""
|
||||
for key in self:
|
||||
val = self._container[key.lower()]
|
||||
yield val[0], ', '.join(val[1:])
|
||||
|
||||
def items(self):
|
||||
return list(self.iteritems())
|
||||
|
||||
@classmethod
|
||||
def from_httplib(cls, message): # Python 2
|
||||
"""Read headers from a Python 2 httplib message object."""
|
||||
# python2.7 does not expose a proper API for exporting multiheaders
|
||||
# efficiently. This function re-reads raw lines from the message
|
||||
# object and extracts the multiheaders properly.
|
||||
headers = []
|
||||
|
||||
for line in message.headers:
|
||||
if line.startswith((' ', '\t')):
|
||||
key, value = headers[-1]
|
||||
headers[-1] = (key, value + '\r\n' + line.rstrip())
|
||||
continue
|
||||
|
||||
key, value = line.split(':', 1)
|
||||
headers.append((key, value.strip()))
|
||||
|
||||
return cls(headers)
|
||||
|
||||
|
||||
__all__ = (
|
||||
'basestring',
|
||||
'connection',
|
||||
'fields',
|
||||
'filepost',
|
||||
'poolmanager',
|
||||
'timeout',
|
||||
'HTTPHeaderDict',
|
||||
'queue',
|
||||
'urlencode',
|
||||
'gaecontrib',
|
||||
'urljoin',
|
||||
'PyOpenSSLContext',
|
||||
)
|
||||
15
lib/requests_toolbelt/adapters/__init__.py
Normal file
15
lib/requests_toolbelt/adapters/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
requests-toolbelt.adapters
|
||||
==========================
|
||||
|
||||
See http://toolbelt.rtfd.org/ for documentation
|
||||
|
||||
:copyright: (c) 2014 by Ian Cordasco and Cory Benfield
|
||||
:license: Apache v2.0, see LICENSE for more details
|
||||
"""
|
||||
|
||||
from .ssl import SSLAdapter
|
||||
from .source import SourceAddressAdapter
|
||||
|
||||
__all__ = ['SSLAdapter', 'SourceAddressAdapter']
|
||||
206
lib/requests_toolbelt/adapters/appengine.py
Normal file
206
lib/requests_toolbelt/adapters/appengine.py
Normal file
@@ -0,0 +1,206 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""The App Engine Transport Adapter for requests.
|
||||
|
||||
.. versionadded:: 0.6.0
|
||||
|
||||
This requires a version of requests >= 2.10.0 and Python 2.
|
||||
|
||||
There are two ways to use this library:
|
||||
|
||||
#. If you're using requests directly, you can use code like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> import requests
|
||||
>>> import ssl
|
||||
>>> import requests.packages.urllib3.contrib.appengine as ul_appengine
|
||||
>>> from requests_toolbelt.adapters import appengine
|
||||
>>> s = requests.Session()
|
||||
>>> if ul_appengine.is_appengine_sandbox():
|
||||
... s.mount('http://', appengine.AppEngineAdapter())
|
||||
... s.mount('https://', appengine.AppEngineAdapter())
|
||||
|
||||
#. If you depend on external libraries which use requests, you can use code
|
||||
like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> from requests_toolbelt.adapters import appengine
|
||||
>>> appengine.monkeypatch()
|
||||
|
||||
which will ensure all requests.Session objects use AppEngineAdapter properly.
|
||||
|
||||
You are also able to :ref:`disable certificate validation <insecure_appengine>`
|
||||
when monkey-patching.
|
||||
"""
|
||||
import requests
|
||||
import warnings
|
||||
from requests import adapters
|
||||
from requests import sessions
|
||||
|
||||
from .. import exceptions as exc
|
||||
from .._compat import gaecontrib
|
||||
from .._compat import timeout
|
||||
|
||||
|
||||
class AppEngineMROHack(adapters.HTTPAdapter):
|
||||
"""Resolves infinite recursion when monkeypatching.
|
||||
|
||||
This works by injecting itself as the base class of both the
|
||||
:class:`AppEngineAdapter` and Requests' default HTTPAdapter, which needs to
|
||||
be done because default HTTPAdapter's MRO is recompiled when we
|
||||
monkeypatch, at which point this class becomes HTTPAdapter's base class.
|
||||
In addition, we use an instantiation flag to avoid infinite recursion.
|
||||
"""
|
||||
_initialized = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if not self._initialized:
|
||||
self._initialized = True
|
||||
super(AppEngineMROHack, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class AppEngineAdapter(AppEngineMROHack, adapters.HTTPAdapter):
|
||||
"""The transport adapter for Requests to use urllib3's GAE support.
|
||||
|
||||
Implements Requests's HTTPAdapter API.
|
||||
|
||||
When deploying to Google's App Engine service, some of Requests'
|
||||
functionality is broken. There is underlying support for GAE in urllib3.
|
||||
This functionality, however, is opt-in and needs to be enabled explicitly
|
||||
for Requests to be able to use it.
|
||||
"""
|
||||
|
||||
__attrs__ = adapters.HTTPAdapter.__attrs__ + ['_validate_certificate']
|
||||
|
||||
def __init__(self, validate_certificate=True, *args, **kwargs):
|
||||
_check_version()
|
||||
self._validate_certificate = validate_certificate
|
||||
super(AppEngineAdapter, self).__init__(*args, **kwargs)
|
||||
|
||||
def init_poolmanager(self, connections, maxsize, block=False):
|
||||
self.poolmanager = _AppEnginePoolManager(self._validate_certificate)
|
||||
|
||||
|
||||
class InsecureAppEngineAdapter(AppEngineAdapter):
|
||||
"""An always-insecure GAE adapter for Requests.
|
||||
|
||||
This is a variant of the the transport adapter for Requests to use
|
||||
urllib3's GAE support that does not validate certificates. Use with
|
||||
caution!
|
||||
|
||||
.. note::
|
||||
The ``validate_certificate`` keyword argument will not be honored here
|
||||
and is not part of the signature because we always force it to
|
||||
``False``.
|
||||
|
||||
See :class:`AppEngineAdapter` for further details.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if kwargs.pop("validate_certificate", False):
|
||||
warnings.warn("Certificate validation cannot be specified on the "
|
||||
"InsecureAppEngineAdapter, but was present. This "
|
||||
"will be ignored and certificate validation will "
|
||||
"remain off.", exc.IgnoringGAECertificateValidation)
|
||||
|
||||
super(InsecureAppEngineAdapter, self).__init__(
|
||||
validate_certificate=False, *args, **kwargs)
|
||||
|
||||
|
||||
class _AppEnginePoolManager(object):
|
||||
"""Implements urllib3's PoolManager API expected by requests.
|
||||
|
||||
While a real PoolManager map hostnames to reusable Connections,
|
||||
AppEngine has no concept of a reusable connection to a host.
|
||||
So instead, this class constructs a small Connection per request,
|
||||
that is returned to the Adapter and used to access the URL.
|
||||
"""
|
||||
|
||||
def __init__(self, validate_certificate=True):
|
||||
self.appengine_manager = gaecontrib.AppEngineManager(
|
||||
validate_certificate=validate_certificate)
|
||||
|
||||
def connection_from_url(self, url):
|
||||
return _AppEngineConnection(self.appengine_manager, url)
|
||||
|
||||
def clear(self):
|
||||
pass
|
||||
|
||||
|
||||
class _AppEngineConnection(object):
|
||||
"""Implements urllib3's HTTPConnectionPool API's urlopen().
|
||||
|
||||
This Connection's urlopen() is called with a host-relative path,
|
||||
so in order to properly support opening the URL, we need to store
|
||||
the full URL when this Connection is constructed from the PoolManager.
|
||||
|
||||
This code wraps AppEngineManager.urlopen(), which exposes a different
|
||||
API than in the original urllib3 urlopen(), and thus needs this adapter.
|
||||
"""
|
||||
|
||||
def __init__(self, appengine_manager, url):
|
||||
self.appengine_manager = appengine_manager
|
||||
self.url = url
|
||||
|
||||
def urlopen(self, method, url, body=None, headers=None, retries=None,
|
||||
redirect=True, assert_same_host=True,
|
||||
timeout=timeout.Timeout.DEFAULT_TIMEOUT,
|
||||
pool_timeout=None, release_conn=None, **response_kw):
|
||||
# This function's url argument is a host-relative URL,
|
||||
# but the AppEngineManager expects an absolute URL.
|
||||
# So we saved out the self.url when the AppEngineConnection
|
||||
# was constructed, which we then can use down below instead.
|
||||
|
||||
# We once tried to verify our assumptions here, but sometimes the
|
||||
# passed-in URL differs on url fragments, or "http://a.com" vs "/".
|
||||
|
||||
# urllib3's App Engine adapter only uses Timeout.total, not read or
|
||||
# connect.
|
||||
if not timeout.total:
|
||||
timeout.total = timeout._read or timeout._connect
|
||||
|
||||
# Jump through the hoops necessary to call AppEngineManager's API.
|
||||
return self.appengine_manager.urlopen(
|
||||
method,
|
||||
self.url,
|
||||
body=body,
|
||||
headers=headers,
|
||||
retries=retries,
|
||||
redirect=redirect,
|
||||
timeout=timeout,
|
||||
**response_kw)
|
||||
|
||||
|
||||
def monkeypatch(validate_certificate=True):
|
||||
"""Sets up all Sessions to use AppEngineAdapter by default.
|
||||
|
||||
If you don't want to deal with configuring your own Sessions,
|
||||
or if you use libraries that use requests directly (ie requests.post),
|
||||
then you may prefer to monkeypatch and auto-configure all Sessions.
|
||||
|
||||
.. warning: :
|
||||
|
||||
If ``validate_certificate`` is ``False``, certification validation will
|
||||
effectively be disabled for all requests.
|
||||
"""
|
||||
_check_version()
|
||||
# HACK: We should consider modifying urllib3 to support this cleanly,
|
||||
# so that we can set a module-level variable in the sessions module,
|
||||
# instead of overriding an imported HTTPAdapter as is done here.
|
||||
adapter = AppEngineAdapter
|
||||
if not validate_certificate:
|
||||
adapter = InsecureAppEngineAdapter
|
||||
|
||||
sessions.HTTPAdapter = adapter
|
||||
adapters.HTTPAdapter = adapter
|
||||
|
||||
|
||||
def _check_version():
|
||||
if gaecontrib is None:
|
||||
raise exc.VersionMismatchError(
|
||||
"The toolbelt requires at least Requests 2.10.0 to be "
|
||||
"installed. Version {0} was found instead.".format(
|
||||
requests.__version__
|
||||
)
|
||||
)
|
||||
48
lib/requests_toolbelt/adapters/fingerprint.py
Normal file
48
lib/requests_toolbelt/adapters/fingerprint.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Submodule containing the implementation for the FingerprintAdapter.
|
||||
|
||||
This file contains an implementation of a Transport Adapter that validates
|
||||
the fingerprints of SSL certificates presented upon connection.
|
||||
"""
|
||||
from requests.adapters import HTTPAdapter
|
||||
|
||||
from .._compat import poolmanager
|
||||
|
||||
|
||||
class FingerprintAdapter(HTTPAdapter):
|
||||
"""
|
||||
A HTTPS Adapter for Python Requests that verifies certificate fingerprints,
|
||||
instead of certificate hostnames.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import requests
|
||||
import ssl
|
||||
from requests_toolbelt.adapters.fingerprint import FingerprintAdapter
|
||||
|
||||
twitter_fingerprint = '...'
|
||||
s = requests.Session()
|
||||
s.mount(
|
||||
'https://twitter.com',
|
||||
FingerprintAdapter(twitter_fingerprint)
|
||||
)
|
||||
|
||||
The fingerprint should be provided as a hexadecimal string, optionally
|
||||
containing colons.
|
||||
"""
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ['fingerprint']
|
||||
|
||||
def __init__(self, fingerprint, **kwargs):
|
||||
self.fingerprint = fingerprint
|
||||
|
||||
super(FingerprintAdapter, self).__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(self, connections, maxsize, block=False):
|
||||
self.poolmanager = poolmanager.PoolManager(
|
||||
num_pools=connections,
|
||||
maxsize=maxsize,
|
||||
block=block,
|
||||
assert_fingerprint=self.fingerprint)
|
||||
43
lib/requests_toolbelt/adapters/host_header_ssl.py
Normal file
43
lib/requests_toolbelt/adapters/host_header_ssl.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
requests_toolbelt.adapters.host_header_ssl
|
||||
==========================================
|
||||
|
||||
This file contains an implementation of the HostHeaderSSLAdapter.
|
||||
"""
|
||||
|
||||
from requests.adapters import HTTPAdapter
|
||||
|
||||
|
||||
class HostHeaderSSLAdapter(HTTPAdapter):
|
||||
"""
|
||||
A HTTPS Adapter for Python Requests that sets the hostname for certificate
|
||||
verification based on the Host header.
|
||||
|
||||
This allows requesting the IP address directly via HTTPS without getting
|
||||
a "hostname doesn't match" exception.
|
||||
|
||||
Example usage:
|
||||
|
||||
>>> s.mount('https://', HostHeaderSSLAdapter())
|
||||
>>> s.get("https://93.184.216.34", headers={"Host": "example.org"})
|
||||
|
||||
"""
|
||||
|
||||
def send(self, request, **kwargs):
|
||||
# HTTP headers are case-insensitive (RFC 7230)
|
||||
host_header = None
|
||||
for header in request.headers:
|
||||
if header.lower() == "host":
|
||||
host_header = request.headers[header]
|
||||
break
|
||||
|
||||
connection_pool_kwargs = self.poolmanager.connection_pool_kw
|
||||
|
||||
if host_header:
|
||||
connection_pool_kwargs["assert_hostname"] = host_header
|
||||
elif "assert_hostname" in connection_pool_kwargs:
|
||||
# an assert_hostname from a previous request may have been left
|
||||
connection_pool_kwargs.pop("assert_hostname", None)
|
||||
|
||||
return super(HostHeaderSSLAdapter, self).send(request, **kwargs)
|
||||
129
lib/requests_toolbelt/adapters/socket_options.py
Normal file
129
lib/requests_toolbelt/adapters/socket_options.py
Normal file
@@ -0,0 +1,129 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""The implementation of the SocketOptionsAdapter."""
|
||||
import socket
|
||||
import warnings
|
||||
import sys
|
||||
|
||||
import requests
|
||||
from requests import adapters
|
||||
|
||||
from .._compat import connection
|
||||
from .._compat import poolmanager
|
||||
from .. import exceptions as exc
|
||||
|
||||
|
||||
class SocketOptionsAdapter(adapters.HTTPAdapter):
|
||||
"""An adapter for requests that allows users to specify socket options.
|
||||
|
||||
Since version 2.4.0 of requests, it is possible to specify a custom list
|
||||
of socket options that need to be set before establishing the connection.
|
||||
|
||||
Example usage::
|
||||
|
||||
>>> import socket
|
||||
>>> import requests
|
||||
>>> from requests_toolbelt.adapters import socket_options
|
||||
>>> s = requests.Session()
|
||||
>>> opts = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)]
|
||||
>>> adapter = socket_options.SocketOptionsAdapter(socket_options=opts)
|
||||
>>> s.mount('http://', adapter)
|
||||
|
||||
You can also take advantage of the list of default options on this class
|
||||
to keep using the original options in addition to your custom options. In
|
||||
that case, ``opts`` might look like::
|
||||
|
||||
>>> opts = socket_options.SocketOptionsAdapter.default_options + opts
|
||||
|
||||
"""
|
||||
|
||||
if connection is not None:
|
||||
default_options = getattr(
|
||||
connection.HTTPConnection,
|
||||
'default_socket_options',
|
||||
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
|
||||
)
|
||||
else:
|
||||
default_options = []
|
||||
warnings.warn(exc.RequestsVersionTooOld,
|
||||
"This version of Requests is only compatible with a "
|
||||
"version of urllib3 which is too old to support "
|
||||
"setting options on a socket. This adapter is "
|
||||
"functionally useless.")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.socket_options = kwargs.pop('socket_options',
|
||||
self.default_options)
|
||||
|
||||
super(SocketOptionsAdapter, self).__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(self, connections, maxsize, block=False):
|
||||
if requests.__build__ >= 0x020400:
|
||||
# NOTE(Ian): Perhaps we should raise a warning
|
||||
self.poolmanager = poolmanager.PoolManager(
|
||||
num_pools=connections,
|
||||
maxsize=maxsize,
|
||||
block=block,
|
||||
socket_options=self.socket_options
|
||||
)
|
||||
else:
|
||||
super(SocketOptionsAdapter, self).init_poolmanager(
|
||||
connections, maxsize, block
|
||||
)
|
||||
|
||||
|
||||
class TCPKeepAliveAdapter(SocketOptionsAdapter):
|
||||
"""An adapter for requests that turns on TCP Keep-Alive by default.
|
||||
|
||||
The adapter sets 4 socket options:
|
||||
|
||||
- ``SOL_SOCKET`` ``SO_KEEPALIVE`` - This turns on TCP Keep-Alive
|
||||
- ``IPPROTO_TCP`` ``TCP_KEEPINTVL`` 20 - Sets the keep alive interval
|
||||
- ``IPPROTO_TCP`` ``TCP_KEEPCNT`` 5 - Sets the number of keep alive probes
|
||||
- ``IPPROTO_TCP`` ``TCP_KEEPIDLE`` 60 - Sets the keep alive time if the
|
||||
socket library has the ``TCP_KEEPIDLE`` constant
|
||||
|
||||
The latter three can be overridden by keyword arguments (respectively):
|
||||
|
||||
- ``idle``
|
||||
- ``interval``
|
||||
- ``count``
|
||||
|
||||
You can use this adapter like so::
|
||||
|
||||
>>> from requests_toolbelt.adapters import socket_options
|
||||
>>> tcp = socket_options.TCPKeepAliveAdapter(idle=120, interval=10)
|
||||
>>> s = requests.Session()
|
||||
>>> s.mount('http://', tcp)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
socket_options = kwargs.pop('socket_options',
|
||||
SocketOptionsAdapter.default_options)
|
||||
idle = kwargs.pop('idle', 60)
|
||||
interval = kwargs.pop('interval', 20)
|
||||
count = kwargs.pop('count', 5)
|
||||
socket_options = socket_options + [
|
||||
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
]
|
||||
|
||||
# NOTE(Ian): OSX does not have these constants defined, so we
|
||||
# set them conditionally.
|
||||
if getattr(socket, 'TCP_KEEPINTVL', None) is not None:
|
||||
socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
|
||||
interval)]
|
||||
elif sys.platform == 'darwin':
|
||||
# On OSX, TCP_KEEPALIVE from netinet/tcp.h is not exported
|
||||
# by python's socket module
|
||||
TCP_KEEPALIVE = getattr(socket, 'TCP_KEEPALIVE', 0x10)
|
||||
socket_options += [(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval)]
|
||||
|
||||
if getattr(socket, 'TCP_KEEPCNT', None) is not None:
|
||||
socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, count)]
|
||||
|
||||
if getattr(socket, 'TCP_KEEPIDLE', None) is not None:
|
||||
socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, idle)]
|
||||
|
||||
super(TCPKeepAliveAdapter, self).__init__(
|
||||
socket_options=socket_options, **kwargs
|
||||
)
|
||||
67
lib/requests_toolbelt/adapters/source.py
Normal file
67
lib/requests_toolbelt/adapters/source.py
Normal file
@@ -0,0 +1,67 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
requests_toolbelt.source_adapter
|
||||
================================
|
||||
|
||||
This file contains an implementation of the SourceAddressAdapter originally
|
||||
demonstrated on the Requests GitHub page.
|
||||
"""
|
||||
from requests.adapters import HTTPAdapter
|
||||
|
||||
from .._compat import poolmanager, basestring
|
||||
|
||||
|
||||
class SourceAddressAdapter(HTTPAdapter):
|
||||
"""
|
||||
A Source Address Adapter for Python Requests that enables you to choose the
|
||||
local address to bind to. This allows you to send your HTTP requests from a
|
||||
specific interface and IP address.
|
||||
|
||||
Two address formats are accepted. The first is a string: this will set the
|
||||
local IP address to the address given in the string, and will also choose a
|
||||
semi-random high port for the local port number.
|
||||
|
||||
The second is a two-tuple of the form (ip address, port): for example,
|
||||
``('10.10.10.10', 8999)``. This will set the local IP address to the first
|
||||
element, and the local port to the second element. If ``0`` is used as the
|
||||
port number, a semi-random high port will be selected.
|
||||
|
||||
.. warning:: Setting an explicit local port can have negative interactions
|
||||
with connection-pooling in Requests: in particular, it risks
|
||||
the possibility of getting "Address in use" errors. The
|
||||
string-only argument is generally preferred to the tuple-form.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import requests
|
||||
from requests_toolbelt.adapters.source import SourceAddressAdapter
|
||||
|
||||
s = requests.Session()
|
||||
s.mount('http://', SourceAddressAdapter('10.10.10.10'))
|
||||
s.mount('https://', SourceAddressAdapter(('10.10.10.10', 8999)))
|
||||
"""
|
||||
def __init__(self, source_address, **kwargs):
|
||||
if isinstance(source_address, basestring):
|
||||
self.source_address = (source_address, 0)
|
||||
elif isinstance(source_address, tuple):
|
||||
self.source_address = source_address
|
||||
else:
|
||||
raise TypeError(
|
||||
"source_address must be IP address string or (ip, port) tuple"
|
||||
)
|
||||
|
||||
super(SourceAddressAdapter, self).__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(self, connections, maxsize, block=False):
|
||||
self.poolmanager = poolmanager.PoolManager(
|
||||
num_pools=connections,
|
||||
maxsize=maxsize,
|
||||
block=block,
|
||||
source_address=self.source_address)
|
||||
|
||||
def proxy_manager_for(self, *args, **kwargs):
|
||||
kwargs['source_address'] = self.source_address
|
||||
return super(SourceAddressAdapter, self).proxy_manager_for(
|
||||
*args, **kwargs)
|
||||
66
lib/requests_toolbelt/adapters/ssl.py
Normal file
66
lib/requests_toolbelt/adapters/ssl.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
requests_toolbelt.ssl_adapter
|
||||
=============================
|
||||
|
||||
This file contains an implementation of the SSLAdapter originally demonstrated
|
||||
in this blog post:
|
||||
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
|
||||
|
||||
"""
|
||||
import requests
|
||||
|
||||
from requests.adapters import HTTPAdapter
|
||||
|
||||
from .._compat import poolmanager
|
||||
|
||||
|
||||
class SSLAdapter(HTTPAdapter):
|
||||
"""
|
||||
A HTTPS Adapter for Python Requests that allows the choice of the SSL/TLS
|
||||
version negotiated by Requests. This can be used either to enforce the
|
||||
choice of high-security TLS versions (where supported), or to work around
|
||||
misbehaving servers that fail to correctly negotiate the default TLS
|
||||
version being offered.
|
||||
|
||||
Example usage:
|
||||
|
||||
>>> import requests
|
||||
>>> import ssl
|
||||
>>> from requests_toolbelt import SSLAdapter
|
||||
>>> s = requests.Session()
|
||||
>>> s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1))
|
||||
|
||||
You can replace the chosen protocol with any that are available in the
|
||||
default Python SSL module. All subsequent requests that match the adapter
|
||||
prefix will use the chosen SSL version instead of the default.
|
||||
|
||||
This adapter will also attempt to change the SSL/TLS version negotiated by
|
||||
Requests when using a proxy. However, this may not always be possible:
|
||||
prior to Requests v2.4.0 the adapter did not have access to the proxy setup
|
||||
code. In earlier versions of Requests, this adapter will not function
|
||||
properly when used with proxies.
|
||||
"""
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ['ssl_version']
|
||||
|
||||
def __init__(self, ssl_version=None, **kwargs):
|
||||
self.ssl_version = ssl_version
|
||||
|
||||
super(SSLAdapter, self).__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(self, connections, maxsize, block=False):
|
||||
self.poolmanager = poolmanager.PoolManager(
|
||||
num_pools=connections,
|
||||
maxsize=maxsize,
|
||||
block=block,
|
||||
ssl_version=self.ssl_version)
|
||||
|
||||
if requests.__build__ >= 0x020400:
|
||||
# Earlier versions of requests either don't have this method or, worse,
|
||||
# don't allow passing arbitrary keyword arguments. As a result, only
|
||||
# conditionally define this method.
|
||||
def proxy_manager_for(self, *args, **kwargs):
|
||||
kwargs['ssl_version'] = self.ssl_version
|
||||
return super(SSLAdapter, self).proxy_manager_for(*args, **kwargs)
|
||||
178
lib/requests_toolbelt/adapters/x509.py
Normal file
178
lib/requests_toolbelt/adapters/x509.py
Normal file
@@ -0,0 +1,178 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""A X509Adapter for use with the requests library.
|
||||
|
||||
This file contains an implementation of the X509Adapter that will
|
||||
allow users to authenticate a request using an arbitrary
|
||||
X.509 certificate without needing to convert it to a .pem file
|
||||
|
||||
"""
|
||||
|
||||
from OpenSSL.crypto import PKey, X509
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.primitives.serialization import (load_pem_private_key,
|
||||
load_der_private_key)
|
||||
from cryptography.hazmat.primitives.serialization import Encoding
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
|
||||
from datetime import datetime
|
||||
from requests.adapters import HTTPAdapter
|
||||
import requests
|
||||
|
||||
from .._compat import PyOpenSSLContext
|
||||
from .. import exceptions as exc
|
||||
|
||||
"""
|
||||
importing the protocol constants from _ssl instead of ssl because only the
|
||||
constants are needed and to handle issues caused by importing from ssl on
|
||||
the 2.7.x line.
|
||||
"""
|
||||
try:
|
||||
from _ssl import PROTOCOL_TLS as PROTOCOL
|
||||
except ImportError:
|
||||
from _ssl import PROTOCOL_SSLv23 as PROTOCOL
|
||||
|
||||
|
||||
class X509Adapter(HTTPAdapter):
|
||||
r"""Adapter for use with X.509 certificates.
|
||||
|
||||
Provides an interface for Requests sessions to contact HTTPS urls and
|
||||
authenticate with an X.509 cert by implementing the Transport Adapter
|
||||
interface. This class will need to be manually instantiated and mounted
|
||||
to the session
|
||||
|
||||
:param pool_connections: The number of urllib3 connection pools to
|
||||
cache.
|
||||
:param pool_maxsize: The maximum number of connections to save in the
|
||||
pool.
|
||||
:param max_retries: The maximum number of retries each connection
|
||||
should attempt. Note, this applies only to failed DNS lookups,
|
||||
socket connections and connection timeouts, never to requests where
|
||||
data has made it to the server. By default, Requests does not retry
|
||||
failed connections. If you need granular control over the
|
||||
conditions under which we retry a request, import urllib3's
|
||||
``Retry`` class and pass that instead.
|
||||
:param pool_block: Whether the connection pool should block for
|
||||
connections.
|
||||
|
||||
:param bytes cert_bytes:
|
||||
bytes object containing contents of a cryptography.x509Certificate
|
||||
object using the encoding specified by the ``encoding`` parameter.
|
||||
:param bytes pk_bytes:
|
||||
bytes object containing contents of a object that implements
|
||||
``cryptography.hazmat.primitives.serialization.PrivateFormat``
|
||||
using the encoding specified by the ``encoding`` parameter.
|
||||
:param password:
|
||||
string or utf8 encoded bytes containing the passphrase used for the
|
||||
private key. None if unencrypted. Defaults to None.
|
||||
:param encoding:
|
||||
Enumeration detailing the encoding method used on the ``cert_bytes``
|
||||
parameter. Can be either PEM or DER. Defaults to PEM.
|
||||
:type encoding:
|
||||
:class: `cryptography.hazmat.primitives.serialization.Encoding`
|
||||
|
||||
Usage::
|
||||
|
||||
>>> import requests
|
||||
>>> from requests_toolbelt.adapters.x509 import X509Adapter
|
||||
>>> s = requests.Session()
|
||||
>>> a = X509Adapter(max_retries=3,
|
||||
cert_bytes=b'...', pk_bytes=b'...', encoding='...'
|
||||
>>> s.mount('https://', a)
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._check_version()
|
||||
cert_bytes = kwargs.pop('cert_bytes', None)
|
||||
pk_bytes = kwargs.pop('pk_bytes', None)
|
||||
password = kwargs.pop('password', None)
|
||||
encoding = kwargs.pop('encoding', Encoding.PEM)
|
||||
|
||||
password_bytes = None
|
||||
|
||||
if cert_bytes is None or not isinstance(cert_bytes, bytes):
|
||||
raise ValueError('Invalid cert content provided. '
|
||||
'You must provide an X.509 cert '
|
||||
'formatted as a byte array.')
|
||||
if pk_bytes is None or not isinstance(pk_bytes, bytes):
|
||||
raise ValueError('Invalid private key content provided. '
|
||||
'You must provide a private key '
|
||||
'formatted as a byte array.')
|
||||
|
||||
if isinstance(password, bytes):
|
||||
password_bytes = password
|
||||
elif password:
|
||||
password_bytes = password.encode('utf8')
|
||||
|
||||
self.ssl_context = create_ssl_context(cert_bytes, pk_bytes,
|
||||
password_bytes, encoding)
|
||||
|
||||
super(X509Adapter, self).__init__(*args, **kwargs)
|
||||
|
||||
def init_poolmanager(self, *args, **kwargs):
|
||||
if self.ssl_context:
|
||||
kwargs['ssl_context'] = self.ssl_context
|
||||
return super(X509Adapter, self).init_poolmanager(*args, **kwargs)
|
||||
|
||||
def proxy_manager_for(self, *args, **kwargs):
|
||||
if self.ssl_context:
|
||||
kwargs['ssl_context'] = self.ssl_context
|
||||
return super(X509Adapter, self).proxy_manager_for(*args, **kwargs)
|
||||
|
||||
def _check_version(self):
|
||||
if PyOpenSSLContext is None:
|
||||
raise exc.VersionMismatchError(
|
||||
"The X509Adapter requires at least Requests 2.12.0 to be "
|
||||
"installed. Version {0} was found instead.".format(
|
||||
requests.__version__
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def check_cert_dates(cert):
|
||||
"""Verify that the supplied client cert is not invalid."""
|
||||
|
||||
now = datetime.utcnow()
|
||||
if cert.not_valid_after < now or cert.not_valid_before > now:
|
||||
raise ValueError('Client certificate expired: Not After: '
|
||||
'{0:%Y-%m-%d %H:%M:%SZ} '
|
||||
'Not Before: {1:%Y-%m-%d %H:%M:%SZ}'
|
||||
.format(cert.not_valid_after, cert.not_valid_before))
|
||||
|
||||
|
||||
def create_ssl_context(cert_byes, pk_bytes, password=None,
|
||||
encoding=Encoding.PEM):
|
||||
"""Create an SSL Context with the supplied cert/password.
|
||||
|
||||
:param cert_bytes array of bytes containing the cert encoded
|
||||
using the method supplied in the ``encoding`` parameter
|
||||
:param pk_bytes array of bytes containing the private key encoded
|
||||
using the method supplied in the ``encoding`` parameter
|
||||
:param password array of bytes containing the passphrase to be used
|
||||
with the supplied private key. None if unencrypted.
|
||||
Defaults to None.
|
||||
:param encoding ``cryptography.hazmat.primitives.serialization.Encoding``
|
||||
details the encoding method used on the ``cert_bytes`` and
|
||||
``pk_bytes`` parameters. Can be either PEM or DER.
|
||||
Defaults to PEM.
|
||||
"""
|
||||
backend = default_backend()
|
||||
|
||||
cert = None
|
||||
key = None
|
||||
if encoding == Encoding.PEM:
|
||||
cert = x509.load_pem_x509_certificate(cert_byes, backend)
|
||||
key = load_pem_private_key(pk_bytes, password, backend)
|
||||
elif encoding == Encoding.DER:
|
||||
cert = x509.load_der_x509_certificate(cert_byes, backend)
|
||||
key = load_der_private_key(pk_bytes, password, backend)
|
||||
else:
|
||||
raise ValueError('Invalid encoding provided: Must be PEM or DER')
|
||||
|
||||
if not (cert and key):
|
||||
raise ValueError('Cert and key could not be parsed from '
|
||||
'provided data')
|
||||
check_cert_dates(cert)
|
||||
ssl_context = PyOpenSSLContext(PROTOCOL)
|
||||
ssl_context._ctx.use_certificate(X509.from_cryptography(cert))
|
||||
ssl_context._ctx.use_privatekey(PKey.from_cryptography_key(key))
|
||||
return ssl_context
|
||||
0
lib/requests_toolbelt/auth/__init__.py
Normal file
0
lib/requests_toolbelt/auth/__init__.py
Normal file
29
lib/requests_toolbelt/auth/_digest_auth_compat.py
Normal file
29
lib/requests_toolbelt/auth/_digest_auth_compat.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""Provide a compatibility layer for requests.auth.HTTPDigestAuth."""
|
||||
import requests
|
||||
|
||||
|
||||
class _ThreadingDescriptor(object):
|
||||
def __init__(self, prop, default):
|
||||
self.prop = prop
|
||||
self.default = default
|
||||
|
||||
def __get__(self, obj, objtype=None):
|
||||
return getattr(obj._thread_local, self.prop, self.default)
|
||||
|
||||
def __set__(self, obj, value):
|
||||
setattr(obj._thread_local, self.prop, value)
|
||||
|
||||
|
||||
class _HTTPDigestAuth(requests.auth.HTTPDigestAuth):
|
||||
init = _ThreadingDescriptor('init', True)
|
||||
last_nonce = _ThreadingDescriptor('last_nonce', '')
|
||||
nonce_count = _ThreadingDescriptor('nonce_count', 0)
|
||||
chal = _ThreadingDescriptor('chal', {})
|
||||
pos = _ThreadingDescriptor('pos', None)
|
||||
num_401_calls = _ThreadingDescriptor('num_401_calls', 1)
|
||||
|
||||
|
||||
if requests.__build__ < 0x020800:
|
||||
HTTPDigestAuth = requests.auth.HTTPDigestAuth
|
||||
else:
|
||||
HTTPDigestAuth = _HTTPDigestAuth
|
||||
146
lib/requests_toolbelt/auth/guess.py
Normal file
146
lib/requests_toolbelt/auth/guess.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""The module containing the code for GuessAuth."""
|
||||
from requests import auth
|
||||
from requests import cookies
|
||||
|
||||
from . import _digest_auth_compat as auth_compat, http_proxy_digest
|
||||
|
||||
|
||||
class GuessAuth(auth.AuthBase):
|
||||
"""Guesses the auth type by the WWW-Authentication header."""
|
||||
def __init__(self, username, password):
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.auth = None
|
||||
self.pos = None
|
||||
|
||||
def _handle_basic_auth_401(self, r, kwargs):
|
||||
if self.pos is not None:
|
||||
r.request.body.seek(self.pos)
|
||||
|
||||
# Consume content and release the original connection
|
||||
# to allow our new request to reuse the same one.
|
||||
r.content
|
||||
r.raw.release_conn()
|
||||
prep = r.request.copy()
|
||||
if not hasattr(prep, '_cookies'):
|
||||
prep._cookies = cookies.RequestsCookieJar()
|
||||
cookies.extract_cookies_to_jar(prep._cookies, r.request, r.raw)
|
||||
prep.prepare_cookies(prep._cookies)
|
||||
|
||||
self.auth = auth.HTTPBasicAuth(self.username, self.password)
|
||||
prep = self.auth(prep)
|
||||
_r = r.connection.send(prep, **kwargs)
|
||||
_r.history.append(r)
|
||||
_r.request = prep
|
||||
|
||||
return _r
|
||||
|
||||
def _handle_digest_auth_401(self, r, kwargs):
|
||||
self.auth = auth_compat.HTTPDigestAuth(self.username, self.password)
|
||||
try:
|
||||
self.auth.init_per_thread_state()
|
||||
except AttributeError:
|
||||
# If we're not on requests 2.8.0+ this method does not exist and
|
||||
# is not relevant.
|
||||
pass
|
||||
|
||||
# Check that the attr exists because much older versions of requests
|
||||
# set this attribute lazily. For example:
|
||||
# https://github.com/kennethreitz/requests/blob/33735480f77891754304e7f13e3cdf83aaaa76aa/requests/auth.py#L59
|
||||
if (hasattr(self.auth, 'num_401_calls') and
|
||||
self.auth.num_401_calls is None):
|
||||
self.auth.num_401_calls = 1
|
||||
# Digest auth would resend the request by itself. We can take a
|
||||
# shortcut here.
|
||||
return self.auth.handle_401(r, **kwargs)
|
||||
|
||||
def handle_401(self, r, **kwargs):
|
||||
"""Resends a request with auth headers, if needed."""
|
||||
|
||||
www_authenticate = r.headers.get('www-authenticate', '').lower()
|
||||
|
||||
if 'basic' in www_authenticate:
|
||||
return self._handle_basic_auth_401(r, kwargs)
|
||||
|
||||
if 'digest' in www_authenticate:
|
||||
return self._handle_digest_auth_401(r, kwargs)
|
||||
|
||||
def __call__(self, request):
|
||||
if self.auth is not None:
|
||||
return self.auth(request)
|
||||
|
||||
try:
|
||||
self.pos = request.body.tell()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
request.register_hook('response', self.handle_401)
|
||||
return request
|
||||
|
||||
|
||||
class GuessProxyAuth(GuessAuth):
|
||||
"""
|
||||
Guesses the auth type by WWW-Authentication and Proxy-Authentication
|
||||
headers
|
||||
"""
|
||||
def __init__(self, username=None, password=None,
|
||||
proxy_username=None, proxy_password=None):
|
||||
super(GuessProxyAuth, self).__init__(username, password)
|
||||
self.proxy_username = proxy_username
|
||||
self.proxy_password = proxy_password
|
||||
self.proxy_auth = None
|
||||
|
||||
def _handle_basic_auth_407(self, r, kwargs):
|
||||
if self.pos is not None:
|
||||
r.request.body.seek(self.pos)
|
||||
|
||||
r.content
|
||||
r.raw.release_conn()
|
||||
prep = r.request.copy()
|
||||
if not hasattr(prep, '_cookies'):
|
||||
prep._cookies = cookies.RequestsCookieJar()
|
||||
cookies.extract_cookies_to_jar(prep._cookies, r.request, r.raw)
|
||||
prep.prepare_cookies(prep._cookies)
|
||||
|
||||
self.proxy_auth = auth.HTTPProxyAuth(self.proxy_username,
|
||||
self.proxy_password)
|
||||
prep = self.proxy_auth(prep)
|
||||
_r = r.connection.send(prep, **kwargs)
|
||||
_r.history.append(r)
|
||||
_r.request = prep
|
||||
|
||||
return _r
|
||||
|
||||
def _handle_digest_auth_407(self, r, kwargs):
|
||||
self.proxy_auth = http_proxy_digest.HTTPProxyDigestAuth(
|
||||
username=self.proxy_username,
|
||||
password=self.proxy_password)
|
||||
|
||||
try:
|
||||
self.auth.init_per_thread_state()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return self.proxy_auth.handle_407(r, **kwargs)
|
||||
|
||||
def handle_407(self, r, **kwargs):
|
||||
proxy_authenticate = r.headers.get('Proxy-Authenticate', '').lower()
|
||||
|
||||
if 'basic' in proxy_authenticate:
|
||||
return self._handle_basic_auth_407(r, kwargs)
|
||||
|
||||
if 'digest' in proxy_authenticate:
|
||||
return self._handle_digest_auth_407(r, kwargs)
|
||||
|
||||
def __call__(self, request):
|
||||
if self.proxy_auth is not None:
|
||||
request = self.proxy_auth(request)
|
||||
|
||||
try:
|
||||
self.pos = request.body.tell()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
request.register_hook('response', self.handle_407)
|
||||
return super(GuessProxyAuth, self).__call__(request)
|
||||
142
lib/requests_toolbelt/auth/handler.py
Normal file
142
lib/requests_toolbelt/auth/handler.py
Normal file
@@ -0,0 +1,142 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
requests_toolbelt.auth.handler
|
||||
==============================
|
||||
|
||||
This holds all of the implementation details of the Authentication Handler.
|
||||
|
||||
"""
|
||||
|
||||
from requests.auth import AuthBase, HTTPBasicAuth
|
||||
from requests.compat import urlparse, urlunparse
|
||||
|
||||
|
||||
class AuthHandler(AuthBase):
|
||||
|
||||
"""
|
||||
|
||||
The ``AuthHandler`` object takes a dictionary of domains paired with
|
||||
authentication strategies and will use this to determine which credentials
|
||||
to use when making a request. For example, you could do the following:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from requests import HTTPDigestAuth
|
||||
from requests_toolbelt.auth.handler import AuthHandler
|
||||
|
||||
import requests
|
||||
|
||||
auth = AuthHandler({
|
||||
'https://api.github.com': ('sigmavirus24', 'fakepassword'),
|
||||
'https://example.com': HTTPDigestAuth('username', 'password')
|
||||
})
|
||||
|
||||
r = requests.get('https://api.github.com/user', auth=auth)
|
||||
# => <Response [200]>
|
||||
r = requests.get('https://example.com/some/path', auth=auth)
|
||||
# => <Response [200]>
|
||||
|
||||
s = requests.Session()
|
||||
s.auth = auth
|
||||
r = s.get('https://api.github.com/user')
|
||||
# => <Response [200]>
|
||||
|
||||
.. warning::
|
||||
|
||||
:class:`requests.auth.HTTPDigestAuth` is not yet thread-safe. If you
|
||||
use :class:`AuthHandler` across multiple threads you should
|
||||
instantiate a new AuthHandler for each thread with a new
|
||||
HTTPDigestAuth instance for each thread.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, strategies):
|
||||
self.strategies = dict(strategies)
|
||||
self._make_uniform()
|
||||
|
||||
def __call__(self, request):
|
||||
auth = self.get_strategy_for(request.url)
|
||||
return auth(request)
|
||||
|
||||
def __repr__(self):
|
||||
return '<AuthHandler({0!r})>'.format(self.strategies)
|
||||
|
||||
def _make_uniform(self):
|
||||
existing_strategies = list(self.strategies.items())
|
||||
self.strategies = {}
|
||||
|
||||
for (k, v) in existing_strategies:
|
||||
self.add_strategy(k, v)
|
||||
|
||||
@staticmethod
|
||||
def _key_from_url(url):
|
||||
parsed = urlparse(url)
|
||||
return urlunparse((parsed.scheme.lower(),
|
||||
parsed.netloc.lower(),
|
||||
'', '', '', ''))
|
||||
|
||||
def add_strategy(self, domain, strategy):
|
||||
"""Add a new domain and authentication strategy.
|
||||
|
||||
:param str domain: The domain you wish to match against. For example:
|
||||
``'https://api.github.com'``
|
||||
:param str strategy: The authentication strategy you wish to use for
|
||||
that domain. For example: ``('username', 'password')`` or
|
||||
``requests.HTTPDigestAuth('username', 'password')``
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
a = AuthHandler({})
|
||||
a.add_strategy('https://api.github.com', ('username', 'password'))
|
||||
|
||||
"""
|
||||
# Turn tuples into Basic Authentication objects
|
||||
if isinstance(strategy, tuple):
|
||||
strategy = HTTPBasicAuth(*strategy)
|
||||
|
||||
key = self._key_from_url(domain)
|
||||
self.strategies[key] = strategy
|
||||
|
||||
def get_strategy_for(self, url):
|
||||
"""Retrieve the authentication strategy for a specified URL.
|
||||
|
||||
:param str url: The full URL you will be making a request against. For
|
||||
example, ``'https://api.github.com/user'``
|
||||
:returns: Callable that adds authentication to a request.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import requests
|
||||
a = AuthHandler({'example.com', ('foo', 'bar')})
|
||||
strategy = a.get_strategy_for('http://example.com/example')
|
||||
assert isinstance(strategy, requests.auth.HTTPBasicAuth)
|
||||
|
||||
"""
|
||||
key = self._key_from_url(url)
|
||||
return self.strategies.get(key, NullAuthStrategy())
|
||||
|
||||
def remove_strategy(self, domain):
|
||||
"""Remove the domain and strategy from the collection of strategies.
|
||||
|
||||
:param str domain: The domain you wish remove. For example,
|
||||
``'https://api.github.com'``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
a = AuthHandler({'example.com', ('foo', 'bar')})
|
||||
a.remove_strategy('example.com')
|
||||
assert a.strategies == {}
|
||||
|
||||
"""
|
||||
key = self._key_from_url(domain)
|
||||
if key in self.strategies:
|
||||
del self.strategies[key]
|
||||
|
||||
|
||||
class NullAuthStrategy(AuthBase):
|
||||
def __repr__(self):
|
||||
return '<NullAuthStrategy>'
|
||||
|
||||
def __call__(self, r):
|
||||
return r
|
||||
103
lib/requests_toolbelt/auth/http_proxy_digest.py
Normal file
103
lib/requests_toolbelt/auth/http_proxy_digest.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""The module containing HTTPProxyDigestAuth."""
|
||||
import re
|
||||
|
||||
from requests import cookies, utils
|
||||
|
||||
from . import _digest_auth_compat as auth
|
||||
|
||||
|
||||
class HTTPProxyDigestAuth(auth.HTTPDigestAuth):
|
||||
"""HTTP digest authentication between proxy
|
||||
|
||||
:param stale_rejects: The number of rejects indicate that:
|
||||
the client may wish to simply retry the request
|
||||
with a new encrypted response, without reprompting the user for a
|
||||
new username and password. i.e., retry build_digest_header
|
||||
:type stale_rejects: int
|
||||
"""
|
||||
_pat = re.compile(r'digest ', flags=re.IGNORECASE)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(HTTPProxyDigestAuth, self).__init__(*args, **kwargs)
|
||||
self.stale_rejects = 0
|
||||
|
||||
self.init_per_thread_state()
|
||||
|
||||
@property
|
||||
def stale_rejects(self):
|
||||
thread_local = getattr(self, '_thread_local', None)
|
||||
if thread_local is None:
|
||||
return self._stale_rejects
|
||||
return thread_local.stale_rejects
|
||||
|
||||
@stale_rejects.setter
|
||||
def stale_rejects(self, value):
|
||||
thread_local = getattr(self, '_thread_local', None)
|
||||
if thread_local is None:
|
||||
self._stale_rejects = value
|
||||
else:
|
||||
thread_local.stale_rejects = value
|
||||
|
||||
def init_per_thread_state(self):
|
||||
try:
|
||||
super(HTTPProxyDigestAuth, self).init_per_thread_state()
|
||||
except AttributeError:
|
||||
# If we're not on requests 2.8.0+ this method does not exist
|
||||
pass
|
||||
|
||||
def handle_407(self, r, **kwargs):
|
||||
"""Handle HTTP 407 only once, otherwise give up
|
||||
|
||||
:param r: current response
|
||||
:returns: responses, along with the new response
|
||||
"""
|
||||
if r.status_code == 407 and self.stale_rejects < 2:
|
||||
s_auth = r.headers.get("proxy-authenticate")
|
||||
if s_auth is None:
|
||||
raise IOError(
|
||||
"proxy server violated RFC 7235:"
|
||||
"407 response MUST contain header proxy-authenticate")
|
||||
elif not self._pat.match(s_auth):
|
||||
return r
|
||||
|
||||
self.chal = utils.parse_dict_header(
|
||||
self._pat.sub('', s_auth, count=1))
|
||||
|
||||
# if we present the user/passwd and still get rejected
|
||||
# http://tools.ietf.org/html/rfc2617#section-3.2.1
|
||||
if ('Proxy-Authorization' in r.request.headers and
|
||||
'stale' in self.chal):
|
||||
if self.chal['stale'].lower() == 'true': # try again
|
||||
self.stale_rejects += 1
|
||||
# wrong user/passwd
|
||||
elif self.chal['stale'].lower() == 'false':
|
||||
raise IOError("User or password is invalid")
|
||||
|
||||
# Consume content and release the original connection
|
||||
# to allow our new request to reuse the same one.
|
||||
r.content
|
||||
r.close()
|
||||
prep = r.request.copy()
|
||||
cookies.extract_cookies_to_jar(prep._cookies, r.request, r.raw)
|
||||
prep.prepare_cookies(prep._cookies)
|
||||
|
||||
prep.headers['Proxy-Authorization'] = self.build_digest_header(
|
||||
prep.method, prep.url)
|
||||
_r = r.connection.send(prep, **kwargs)
|
||||
_r.history.append(r)
|
||||
_r.request = prep
|
||||
|
||||
return _r
|
||||
else: # give up authenticate
|
||||
return r
|
||||
|
||||
def __call__(self, r):
|
||||
self.init_per_thread_state()
|
||||
# if we have nonce, then just use it, otherwise server will tell us
|
||||
if self.last_nonce:
|
||||
r.headers['Proxy-Authorization'] = self.build_digest_header(
|
||||
r.method, r.url
|
||||
)
|
||||
r.register_hook('response', self.handle_407)
|
||||
return r
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user