Merge branch 'master' into alpha
This commit is contained in:
10
addon.xml
10
addon.xml
@@ -12,14 +12,10 @@
|
||||
</extension>
|
||||
<extension point="kodi.context.item">
|
||||
<menu id="kodi.core.main">
|
||||
<item library="updatetvshow.py">
|
||||
<label>70269</label>
|
||||
<visible>String.IsEqual(ListItem.dbtype, tvshow) + !String.IsEmpty(ListItem.DBID)</visible>
|
||||
<item library="contextmenu.py">
|
||||
<label>90001</label>
|
||||
<visible>!String.StartsWith(ListItem.FileNameAndPath, plugin://plugin.video.kod/) + [ String.IsEqual(ListItem.dbtype, tvshow) | String.IsEqual(ListItem.dbtype, movie) | String.IsEqual(ListItem.dbtype, season) | String.IsEqual(ListItem.dbtype, episode) ]</visible>
|
||||
</item>
|
||||
<!-- <item library="externalsearch.py">-->
|
||||
<!-- <label>90001</label>-->
|
||||
<!-- <visible>!String.StartsWith(ListItem.FileNameAndPath, plugin://plugin.video.kod/) + [String.IsEqual(ListItem.dbtype,tvshow) | String.IsEqual(ListItem.dbtype,movie)]</visible>-->
|
||||
<!-- </item>-->
|
||||
</menu>
|
||||
</extension>
|
||||
<extension point="xbmc.addon.metadata">
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "altadefinizione01_link",
|
||||
"name": "Altadefinizione01 L",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"language": ["ita","sub-ita"],
|
||||
"thumbnail": "altadefinizione01_L.png",
|
||||
"banner": "altadefinizione01_L.png",
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
# Canale per AnimeUnity
|
||||
# ------------------------------------------------------------
|
||||
|
||||
from lib.requests.sessions import session
|
||||
import requests, json, copy, inspect
|
||||
from core import support
|
||||
from platformcode import autorenumber
|
||||
@@ -115,8 +116,10 @@ def news(item):
|
||||
support.info()
|
||||
item.contentType = 'episode'
|
||||
itemlist = []
|
||||
import cloudscraper
|
||||
session = cloudscraper.create_scraper()
|
||||
|
||||
fullJs = json.loads(support.match(item, headers=headers, patron=r'items-json="([^"]+)"').match.replace('"','"'))
|
||||
fullJs = json.loads(support.match(session.get(item.url).text, headers=headers, patron=r'items-json="([^"]+)"', debug=True).match.replace('"','"'))
|
||||
js = fullJs['data']
|
||||
|
||||
for it in js:
|
||||
|
||||
@@ -30,6 +30,7 @@ def menu(item):
|
||||
item.contentType = ''
|
||||
action = 'peliculas'
|
||||
|
||||
|
||||
patronBlock = r'<div class="filter-header"><b>%s</b>(?P<block>.*?)<div class="filter-box">' % item.args
|
||||
patronMenu = r'<a class="[^"]+" data-state="[^"]+" (?P<other>[^>]+)>[^>]+></i>[^>]+></i>[^>]+></i>(?P<title>[^>]+)</a>'
|
||||
|
||||
@@ -89,7 +90,7 @@ def peliculas(item):
|
||||
patronBlock = r'<div id="%s"[^>]+>(?P<block>.*?)<div class="vistaDettagliata"' % item.args[1]
|
||||
patron = r'<li>\s*<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)" class="thumb">[^>]+>[^>]+>[^>]+>\s*[EePp]+\s*(?P<episode>\d+)[^>]+>\s<img src="(?P<thumb>[^"]+)"'
|
||||
else:
|
||||
patron = r'<div class="showStreaming"> +<b>(?P<title>[^<]+)[^>]+>[^>]+>\s*<span>Lingua:\s*(?P<lang>[^>]+)?>[<>br\s]+a href="(?P<url>[^"]+)"[^>]+>.*?--image-url:url\(/*(?P<thumb>[^\)]+).*?Anno di inizio</b>:\s*(?P<year>[0-9]{4})'
|
||||
patron = r'<div class="showStreaming">\s*<b>(?P<title>[^<]+)[^>]+>[^>]+>\s*<span>Lingua:\s*(?:DUB|JAP)?\s*(?P<lang>(?:SUB )?ITA)[^>]+>[<>br\s]+a href="(?P<url>[^"]+)"[^>]+>.*?--image-url:url\(/*(?P<thumb>[^\)]+).*?Anno di inizio</b>:\s*(?P<year>[0-9]{4})'
|
||||
patronNext = '<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">'
|
||||
|
||||
def itemHook(item):
|
||||
@@ -128,7 +129,6 @@ def findvideos(item):
|
||||
if 'vvvvid' in matches.data:
|
||||
itemlist.append(item.clone(action="play", title='VVVVID', url=support.match(matches.data, patron=r'(http://www.vvvvid[^"]+)').match, server='vvvvid'))
|
||||
else:
|
||||
# matches.matches.sort()
|
||||
support.info('VIDEO')
|
||||
for url in matches.matches:
|
||||
lang = url.split('/')[-2]
|
||||
@@ -139,8 +139,6 @@ def findvideos(item):
|
||||
quality = url.split('/')[-1].split('?')[0]
|
||||
url += '|User-Agent=' + support.httptools.get_user_agent() + '&Referer=' + url
|
||||
|
||||
itemlist.append(item.clone(action="play", title=language, url=url, contentLanguage = language, quality = quality, order = quality.replace('p','').zfill(4), server='directo',))
|
||||
|
||||
itemlist.sort(key=lambda x: (x.title, x.order), reverse=False)
|
||||
itemlist.append(item.clone(action="play", title='', url=url, contentLanguage = language, quality = quality, order = quality.replace('p','').zfill(4), server='directo',))
|
||||
return support.server(item, itemlist=itemlist)
|
||||
|
||||
|
||||
@@ -35,18 +35,17 @@ def mainlist(item):
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
support.info()
|
||||
#debug = True
|
||||
# debug = True
|
||||
#debugBlock = True
|
||||
# support.dbg()
|
||||
|
||||
if item.args != 'newest':
|
||||
patronBlock = r'<ul class="posts">(?P<block>.*)<\/ul>'
|
||||
patron = r'<li><a href="(?P<url>[^"]+)" data-thumbnail="(?P<thumb>[^"]+)">.*?<div class="title[^"]*">(?P<title>.+?)(?:\[(?P<lang>Sub-ITA)\])?(?:[ ]\[?(?P<quality>[HD]+)?\])?(?:[ ]\((?P<year>\d+)\)?)?<\/div>'
|
||||
patronNext = r'<a href="([^"]+)" >Pagina'
|
||||
|
||||
patronNext = r'<a href="([^"]+)[^>]+>Pagina'
|
||||
else:
|
||||
patronBlock = r'<ul class="posts">(?P<block>.*)<div class="clear[^"]*">'
|
||||
patron = r'<li>\s?<a href="(?P<url>[^"]+)" data-thumbnail="(?P<thumb>[^"]+)">.*?<div class="title[^"]*">(?P<title>.+?)(?:\s\[(?P<quality>HD)\])?<\/div><div class="episode[^"]*"[^>]+>(?P<episode>[^<(]+)(?:\((?P<lang>[a-zA-Z\-]+)\))?'
|
||||
pagination = ''
|
||||
|
||||
if item.args == 'search':
|
||||
action = 'select'
|
||||
@@ -58,7 +57,7 @@ def peliculas(item):
|
||||
action = 'select'
|
||||
|
||||
def itemHook(item):
|
||||
item.title = item.title.replace(r'-', ' ')
|
||||
item.title = item.title.replace(' - La Serie', '')
|
||||
return item
|
||||
|
||||
return locals()
|
||||
|
||||
@@ -22,10 +22,11 @@ def mainlist(item):
|
||||
def peliculas(item):
|
||||
# debug = True
|
||||
if item.args == 'alternative':
|
||||
patron = r'<a title="(?P<title>[^\(]+)\(\s*(?P<year>\d+)\)\s\D+(?P<quality>\d+p) ... (?P<lang>[^ ]+).*?[^"]+"\s*href="(?P<url>[^"]+)'
|
||||
pagination = ''
|
||||
patron = r'<a title="(?P<title>[^\(]+)\(\s*(?P<year>\d+)\)\s\D+(?P<quality>\d+p).{3}(?P<lang>[^ ]+).*?[^"]+"\s*href="(?P<url>[^"]+)'
|
||||
else:
|
||||
patron = r'<a href="(?P<url>[^"]+)" (?:rel="?[0-9]+"?)? title="(?P<title>[^\(]+)(?!\()\s*\((?P<year>\d+)\)\s(?:[^\]]+\])?\D+(?P<quality>\d+p).{3}(?P<lang>[^ ]+).*?<img id="?cov"?.*?src="(?P<thumb>[^"]+)'
|
||||
patronNext = r'rel="?next"? href="([^"]+)"'
|
||||
patronNext = r'current(?:[^>]*>){2}\s*<a class="[^"]+"\s* href="([^"]+)'
|
||||
return locals()
|
||||
|
||||
|
||||
|
||||
@@ -89,4 +89,11 @@ def newest(categoria):
|
||||
|
||||
def findvideos(item):
|
||||
info()
|
||||
return support.server(item)
|
||||
urls = []
|
||||
data = support.match(item).data
|
||||
urls += support.match(data, patron=r'id="urlEmbed" value="([^"]+)').matches
|
||||
matches = support.match(data, patron=r'<iframe.*?src="([^"]+)').matches
|
||||
for m in matches:
|
||||
if 'youtube' not in m and not m.endswith('.js'):
|
||||
urls += support.match(m, patron=r'data-link="([^"]+)').matches
|
||||
return support.server(item, urls)
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "mondoserietv",
|
||||
"name": "MondoSerieTV",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "mondoserietv.png",
|
||||
"bannermenu": "mondoserietv.png",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "serietvonline",
|
||||
"name": "SerieTvOnline",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "serietvonline.png",
|
||||
"bannermenu": "serietvonline.png",
|
||||
|
||||
@@ -10,13 +10,13 @@ from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
|
||||
def findhost(url):
|
||||
permUrl = httptools.downloadpage(url).data
|
||||
host = scrapertools.find_single_match(permUrl, r'<a href="([^"]+)')
|
||||
return host
|
||||
# def findhost(url):
|
||||
# permUrl = httptools.downloadpage(url).data
|
||||
# host = scrapertools.find_single_match(permUrl, r'<a href="([^"]+)')
|
||||
# return host
|
||||
|
||||
|
||||
host = config.get_channel_url(findhost)
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
player_iframe = r'<iframe.*?src="([^"]+)"[^>]+></iframe>\s*<div class="player'
|
||||
@@ -133,7 +133,7 @@ def search(item, texto):
|
||||
@support.scrape
|
||||
def newest(categoria):
|
||||
if categoria == 'series':
|
||||
item = Item(url=host + '/aggiornamenti-giornalieri-serie-tv-2')
|
||||
item = Item(url=host + '/aggiornamenti-giornalieri-serie-tv')
|
||||
data = support.match(item).data.replace('<u>','').replace('</u>','')
|
||||
item.contentType = 'episode'
|
||||
patronBlock = r'Aggiornamenti Giornalieri Serie TV.*?<div class="sp-body folded">(?P<block>.*?)</div>'
|
||||
|
||||
11
channels/tapmovie.json
Normal file
11
channels/tapmovie.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"id": "tapmovie",
|
||||
"name": "Tap Movie",
|
||||
"language": ["ita", "sub-ita"],
|
||||
"active": true,
|
||||
"thumbnail": "tapmovie.png",
|
||||
"banner": "tapmovie.png",
|
||||
"categories": ["movie", "tvshow", "anime"],
|
||||
"not_active": ["include_in_newest"],
|
||||
"settings": []
|
||||
}
|
||||
102
channels/tapmovie.py
Normal file
102
channels/tapmovie.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per 'dvdita'
|
||||
|
||||
from core import support, httptools
|
||||
from core.item import Item
|
||||
import sys
|
||||
if sys.version_info[0] >= 3: from concurrent import futures
|
||||
else: from concurrent_py2 import futures
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
api_url = '/api/v2/'
|
||||
per_page = 24
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
film = ['/browse/movie']
|
||||
tvshow = ['/browse/tvshow']
|
||||
search = ''
|
||||
|
||||
# [Voce Menu,['url','action','args',contentType]
|
||||
top = [('Generi', ['', 'genres', '', 'undefined'])]
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def episodios(item):
|
||||
support.info(item)
|
||||
itemlist = []
|
||||
|
||||
with futures.ThreadPoolExecutor() as executor:
|
||||
thL = []
|
||||
for season in httptools.downloadpage(host + api_url + 'tvshow', post={'tvshow_id': item.id}).json.get('season', []):
|
||||
season_id = season['season_number']
|
||||
thL.append(executor.submit(httptools.downloadpage, host + api_url + 'episodes', post={'tvshow_id': item.id, 'season_id': season_id}))
|
||||
for th in futures.as_completed(thL):
|
||||
for episode in th.result().json.get('episodes', []):
|
||||
itemlist.append(item.clone(action="findvideos", contentSeason=episode['season_id'], contentEpisodeNumber=episode['episode_number'], id=item.id,
|
||||
title=episode['season_id']+'x'+episode['episode_number'], contentType='episode'))
|
||||
support.scraper.sort_episode_list(itemlist)
|
||||
support.videolibrary(itemlist, item)
|
||||
support.download(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def genres(item):
|
||||
itemlist = []
|
||||
for n, genre in enumerate(httptools.downloadpage(host + api_url + 'categories', post={}).json.get('categories', [])):
|
||||
itemlist.append(item.clone(action="peliculas", genre=genre.get('name'), title=genre.get('value'), n=n))
|
||||
return support.thumb(itemlist, genre=True)
|
||||
|
||||
|
||||
def peliculas(item, text=''):
|
||||
support.info('search', item)
|
||||
itemlist = []
|
||||
filter_type = False
|
||||
if item.genre:
|
||||
text = item.genre
|
||||
cmd = 'search/category'
|
||||
else:
|
||||
cmd = 'search'
|
||||
if not text:
|
||||
filter_type = True
|
||||
|
||||
try:
|
||||
page = int(item.url.split('?p=')[1])
|
||||
except:
|
||||
page = 1
|
||||
results = httptools.downloadpage(host + api_url + cmd, post={'search': text, 'page': page}).json.get('results', [])
|
||||
for result in results:
|
||||
contentType = 'movie' if result['type'] == 'FILM' else 'tvshow'
|
||||
if not filter_type or (filter_type and contentType == item.contentType):
|
||||
itemlist.append(item.clone(id=result.get('id'), title=result.get('title'), contentTitle=result.get('title'),
|
||||
contentSerieName='' if contentType == 'movie' else result.get('title'),
|
||||
contentPlot=result.get('description'), thumbnail=result.get('poster'),
|
||||
fanart=result.get('backdrop'), year=result.get('year'), action='episodios' if contentType == 'tvshow' else 'findvideos',
|
||||
url='{}/{}/{}-{}'.format('https://filmigratis.org', contentType, result.get('id'), support.scrapertools.slugify(result.get('title'))),
|
||||
contentType=contentType))
|
||||
support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if len(results) >= per_page:
|
||||
page += 1
|
||||
support.nextPage(itemlist, item, next_page='https://filmigratis.org/category/' + str(item.n) + '/' + item.genre + '?p=' + str(page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, text):
|
||||
return peliculas(item, text)
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
itemlist = []
|
||||
if not item.contentSeason: # film
|
||||
json = httptools.downloadpage(host + api_url + 'movie', post={'movie_id': item.id}).json
|
||||
else:
|
||||
json = httptools.downloadpage(host + api_url + 'episode/links', post={'tvshow_id': item.id, 'season_id': item.contentSeason, 'episode_id': item.contentEpisodeNumber}).json
|
||||
|
||||
for i in json.get('links', []) + json.get('special', []):
|
||||
itemlist.append(Item(url=i.get('link')))
|
||||
return support.server(item, itemlist=itemlist)
|
||||
@@ -3,7 +3,7 @@
|
||||
# Canale per ToonItalia
|
||||
# ------------------------------------------------------------
|
||||
|
||||
from core import support
|
||||
from core import scrapertools, support
|
||||
import sys
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
@@ -76,7 +76,7 @@ def peliculas(item):
|
||||
else:
|
||||
patronBlock = '<main[^>]+>(?P<block>.*)</main>'
|
||||
# patron = r'<a href="(?P<url>[^"]+)" rel="bookmark">(?P<title>[^<]+)</a>[^>]+>[^>]+>[^>]+><img.*?src="(?P<thumb>[^"]+)".*?<p>(?P<plot>[^<]+)</p>.*?<span class="cat-links">Pubblicato in.*?.*?(?P<type>(?:[Ff]ilm|</artic))[^>]+>'
|
||||
patron = r'<a href="(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+)</a>[^>]+>[^>]+>[^>]+><img.*?src="(?P<thumb>[^"]+)".*?<p>(?P<plot>[^<]+)</p>.*?tag">.*?(?P<type>(?:[Ff]ilm|</art|Serie Tv))'
|
||||
patron = r'<a href="(?P<url>[^"]+)" rel="bookmark">(?P<title>[^<]+)</a>(:?[^>]+>){3}(?:<img.*?src="(?P<thumb>[^"]+)")?.*?<p>(?P<plot>[^<]+)</p>.*?tag">.*?(?P<type>(?:[Ff]ilm|</art|Serie Tv))'
|
||||
typeContentDict={'movie':['film']}
|
||||
typeActionDict={'findvideos':['film']}
|
||||
patronNext = '<a class="next page-numbers" href="([^"]+)">'
|
||||
@@ -93,21 +93,27 @@ def peliculas(item):
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
anime = True
|
||||
def get_ep(item):
|
||||
find = ''
|
||||
data = support.match(item, headers=headers).data
|
||||
if 'https://vcrypt.net' in data:
|
||||
patron = r'(?: /> |<p>)(?P<episode>\d+.\d+)?(?: – )?(?P<title>[^<]+)<a (?P<data>.*?)(?:<br|</p)'
|
||||
else:
|
||||
patron = r'<br />\s*<a href="(?P<url>[^"]+)" target="_blank" rel="noopener[^>]+>(?P<episode>\d+.\d+)?(?: – )?(?P<title>[^<]+)</a>'
|
||||
match = support.match(data, patron=r'(?: /> |<p>)(?:(?P<season>\d+)×)?(?P<episode>\d+)(?:\s+–\s+)?(?P<title>[^<]+)<a (?P<data>.*?)(?:<br|</p)').matches
|
||||
if match:
|
||||
for m in match:
|
||||
find += '{}{:02d}|{}|{}|'.format(m[0]+'x' if m[0] else '', int(m[1]), clean_title(m[2]), m[3])
|
||||
return find
|
||||
|
||||
def itemHook(item):
|
||||
item.title = support.re.sub(r'\[B\]|\[/B\]', '', item.title)
|
||||
item.title = item.title.replace('_',' ').replace('–','-').replace('×','x').replace('-','-').replace(' ',' ')
|
||||
item.title = support.re.sub(item.fulltitle + ' - ','',item.title)
|
||||
item.title = support.typo(item.title.strip(' -'),'bold')
|
||||
return item
|
||||
data = get_ep(item)
|
||||
patron = r'(?P<episode>[^|]+)\|(?P<title>[^|]+)\|(?P<data>[^|]+)\|'
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
return support.server(item, item.data if item.contentType != 'movie' else support.match(item.url, headers=headers).data )
|
||||
|
||||
|
||||
def clean_title(title):
|
||||
title = scrapertools.unescape(title)
|
||||
title = title.replace('_',' ').replace('–','-').replace(' ',' ')
|
||||
title = title.strip(' - ')
|
||||
return title
|
||||
63
contextmenu.py
Normal file
63
contextmenu.py
Normal file
@@ -0,0 +1,63 @@
|
||||
from platformcode import config, logger
|
||||
import xbmc, sys, xbmcgui, os
|
||||
|
||||
librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib'))
|
||||
sys.path.insert(0, librerias)
|
||||
|
||||
from core import jsontools, support
|
||||
|
||||
addon_id = config.get_addon_core().getAddonInfo('id')
|
||||
|
||||
LOCAL_FILE = os.path.join(config.get_runtime_path(), "platformcode/contextmenu/contextmenu.json")
|
||||
f = open(LOCAL_FILE)
|
||||
contextmenu_settings = jsontools.load(open(LOCAL_FILE).read())
|
||||
f.close()
|
||||
|
||||
|
||||
def build_menu():
|
||||
tmdbid = xbmc.getInfoLabel('ListItem.Property(tmdb_id)')
|
||||
mediatype = xbmc.getInfoLabel('ListItem.DBTYPE')
|
||||
title = xbmc.getInfoLabel('ListItem.Title')
|
||||
year = xbmc.getInfoLabel('ListItem.Year')
|
||||
imdb = xbmc.getInfoLabel('ListItem.IMDBNumber')
|
||||
filePath = xbmc.getInfoLabel('ListItem.FileNameAndPath')
|
||||
containerPath = xbmc.getInfoLabel('Container.FolderPath')
|
||||
|
||||
logstr = "Selected ListItem is: 'IMDB: {}' - TMDB: {}' - 'Title: {}' - 'Year: {}'' - 'Type: {}'".format(imdb, tmdbid, title, year, mediatype)
|
||||
logger.info(logstr)
|
||||
logger.info(filePath)
|
||||
logger.info(containerPath)
|
||||
|
||||
contextmenuitems = []
|
||||
contextmenuactions = []
|
||||
|
||||
for itemmodule in contextmenu_settings:
|
||||
logger.debug('check contextmenu', itemmodule)
|
||||
module = __import__(itemmodule, None, None, [itemmodule])
|
||||
|
||||
logger.info('Add contextmenu item ->', itemmodule)
|
||||
module_item_actions = module.get_menu_items()
|
||||
contextmenuitems.extend([item for item, fn in module_item_actions])
|
||||
contextmenuactions.extend([fn for item, fn in module_item_actions])
|
||||
|
||||
if len(contextmenuitems) == 0:
|
||||
logger.info('No contextmodule found, build an empty one')
|
||||
contextmenuitems.append(empty_item())
|
||||
contextmenuactions.append(lambda: None)
|
||||
|
||||
ret = xbmcgui.Dialog().contextmenu(contextmenuitems)
|
||||
|
||||
if ret > -1:
|
||||
logger.info('Contextmenu module index', ret, ', label=' + contextmenuitems[ret])
|
||||
contextmenuactions[ret]()
|
||||
|
||||
|
||||
def empty_item():
|
||||
return config.get_localized_string(90004)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
build_menu()
|
||||
|
||||
|
||||
|
||||
@@ -34,8 +34,9 @@ def hdpass_get_servers(item):
|
||||
for mir_url, srv in scrapertools.find_multiple_matches(mir, patron_option):
|
||||
mir_url = scrapertools.decodeHtmlentities(mir_url)
|
||||
logger.debug(mir_url)
|
||||
it = item.clone(action="play", quality=quality, title=srv, server=srv, url= mir_url)
|
||||
if not servertools.get_server_parameters(srv.lower()): it = hdpass_get_url(it)[0] # do not exists or it's empty
|
||||
it = hdpass_get_url(item.clone(action='play', quality=quality, url=mir_url))[0]
|
||||
# it = item.clone(action="play", quality=quality, title=srv, server=srv, url= mir_url)
|
||||
# if not servertools.get_server_parameters(srv.lower()): it = hdpass_get_url(it)[0] # do not exists or it's empty
|
||||
ret.append(it)
|
||||
return ret
|
||||
# Carica la pagina
|
||||
@@ -1143,7 +1144,10 @@ def nextPage(itemlist, item, data='', patron='', function_or_level=1, next_page=
|
||||
if next_page != "":
|
||||
if resub: next_page = re.sub(resub[0], resub[1], next_page)
|
||||
if 'http' not in next_page:
|
||||
if '/' in next_page:
|
||||
next_page = scrapertools.find_single_match(item.url, 'https?://[a-z0-9.-]+') + (next_page if next_page.startswith('/') else '/' + next_page)
|
||||
else:
|
||||
next_page = '/'.join(item.url.split('/')[:-1]) + '/' + next_page
|
||||
next_page = next_page.replace('&', '&')
|
||||
logger.debug('NEXT= ', next_page)
|
||||
itemlist.append(
|
||||
@@ -1370,9 +1374,21 @@ def addQualityTag(item, itemlist, data, patron):
|
||||
info('nessun tag qualità trovato')
|
||||
|
||||
def get_jwplayer_mediaurl(data, srvName, onlyHttp=False, dataIsBlock=False):
|
||||
from core import jsontools
|
||||
|
||||
video_urls = []
|
||||
block = scrapertools.find_single_match(data, r'sources:\s*\[([^\]]+)\]') if not dataIsBlock else data
|
||||
block = scrapertools.find_single_match(data, r'sources:\s*([^\]]+\])') if not dataIsBlock else data
|
||||
if block:
|
||||
json = jsontools.load(block)
|
||||
if json:
|
||||
sources = []
|
||||
for s in json:
|
||||
if 'file' in s.keys():
|
||||
src = s['file']
|
||||
else:
|
||||
src = s['src']
|
||||
sources.append((src, s.get('label')))
|
||||
else:
|
||||
if 'file:' in block:
|
||||
sources = scrapertools.find_multiple_matches(block, r'file:\s*"([^"]+)"(?:,label:\s*"([^"]+)")?')
|
||||
elif 'src:' in block:
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
import xbmc, sys, xbmcgui, os
|
||||
from platformcode import config, logger
|
||||
|
||||
# incliuding folder libraries
|
||||
librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib'))
|
||||
sys.path.insert(0, librerias)
|
||||
|
||||
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
|
||||
def execute_search():
|
||||
"""
|
||||
Gather the selected ListItem's attributes in order to compute the `Item` parameters
|
||||
and perform the KOD's globalsearch.
|
||||
Globalsearch will be executed specifing the content-type of the selected ListItem
|
||||
|
||||
NOTE: this method needs the DBTYPE and TMDB_ID specified as ListItem's properties
|
||||
"""
|
||||
|
||||
# These following lines are commented and keep in the code just as reminder.
|
||||
# In future, they could be used to filter the search outcome
|
||||
|
||||
# ADDON: maybe can we know if the current windows is related to a specific addon?
|
||||
# we could skip the ContextMenu if we already are in KOD's window
|
||||
|
||||
tmdbid = xbmc.getInfoLabel('ListItem.Property(tmdb_id)')
|
||||
mediatype = xbmc.getInfoLabel('ListItem.DBTYPE')
|
||||
title = xbmc.getInfoLabel('ListItem.Title')
|
||||
year = xbmc.getInfoLabel('ListItem.Year')
|
||||
imdb = xbmc.getInfoLabel('ListItem.IMDBNumber')
|
||||
# folderPath = xbmc.getInfoLabel('Container.FolderPath')
|
||||
# filePath = xbmc.getInfoLabel('ListItem.FileNameAndPath')
|
||||
# logger.info("****")
|
||||
# logger.info( xbmc.getCondVisibility("String.Contains(Container.FolderPath, 'plugin.video.kod')") )
|
||||
# logger.info( xbmc.getCondVisibility("String.Contains(ListItem.FileNameAndPath, 'plugin.video.kod')") )
|
||||
# logger.info( xbmc.getCondVisibility("String.IsEqual(ListItem.dbtype,tvshow)") )
|
||||
# logger.info( xbmc.getCondVisibility("String.IsEqual(ListItem.dbtype,movie)") )
|
||||
# logger.info("****")
|
||||
|
||||
# visible = xbmc.getCondVisibility("!String.StartsWith(ListItem.FileNameAndPath, 'plugin://plugin.video.kod/') + [String.IsEqual(ListItem.dbtype,tvshow) | String.IsEqual(ListItem.dbtype,movie)]")
|
||||
|
||||
logstr = "Selected ListItem is: 'IMDB: {}' - TMDB: {}' - 'Title: {}' - 'Year: {}'' - 'Type: {}'".format(imdb, tmdbid, title, year, mediatype)
|
||||
logger.info(logstr)
|
||||
|
||||
if not tmdbid and imdb:
|
||||
logger.info('No TMDBid found. Try to get by IMDB')
|
||||
it = Item(contentType= mediatype, infoLabels={'imdb_id' : imdb})
|
||||
tmdb.set_infoLabels(it)
|
||||
tmdbid = it.infoLabels.get('tmdb_id', '')
|
||||
|
||||
if not tmdbid:
|
||||
logger.info('No TMDBid found. Try to get by Title/Year')
|
||||
it = Item(contentTitle= title, contentType= mediatype, infoLabels={'year' : year})
|
||||
tmdb.set_infoLabels(it)
|
||||
tmdbid = it.infoLabels.get('tmdb_id', '')
|
||||
|
||||
|
||||
item = Item(
|
||||
action="Search",
|
||||
channel="globalsearch",
|
||||
contentType= mediatype,
|
||||
mode="search",
|
||||
text= title,
|
||||
type= mediatype,
|
||||
infoLabels= {
|
||||
'tmdb_id': tmdbid,
|
||||
'year': year
|
||||
},
|
||||
folder= False
|
||||
)
|
||||
|
||||
logger.info("Invoking Item: {}".format(item.tostring()))
|
||||
|
||||
itemurl = item.tourl()
|
||||
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?" + itemurl + ")")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
execute_search()
|
||||
846
lib/cloudscraper/__init__.py
Normal file
846
lib/cloudscraper/__init__.py
Normal file
@@ -0,0 +1,846 @@
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
import logging
|
||||
import re
|
||||
import requests
|
||||
import sys
|
||||
import ssl
|
||||
|
||||
from collections import OrderedDict
|
||||
from copy import deepcopy
|
||||
|
||||
from requests.adapters import HTTPAdapter
|
||||
from requests.sessions import Session
|
||||
from requests_toolbelt.utils import dump
|
||||
|
||||
from time import sleep
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
try:
|
||||
import brotli
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
import copyreg
|
||||
except ImportError:
|
||||
import copy_reg as copyreg
|
||||
|
||||
try:
|
||||
from HTMLParser import HTMLParser
|
||||
except ImportError:
|
||||
if sys.version_info >= (3, 4):
|
||||
import html
|
||||
else:
|
||||
from html.parser import HTMLParser
|
||||
|
||||
try:
|
||||
from urlparse import urlparse, urljoin
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse, urljoin
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
from .exceptions import (
|
||||
CloudflareLoopProtection,
|
||||
CloudflareCode1020,
|
||||
CloudflareIUAMError,
|
||||
CloudflareSolveError,
|
||||
CloudflareChallengeError,
|
||||
CloudflareCaptchaError,
|
||||
CloudflareCaptchaProvider
|
||||
)
|
||||
|
||||
from .interpreters import JavaScriptInterpreter
|
||||
from .captcha import Captcha
|
||||
from .user_agent import User_Agent
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
__version__ = '1.2.58'
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class CipherSuiteAdapter(HTTPAdapter):
|
||||
|
||||
__attrs__ = [
|
||||
'ssl_context',
|
||||
'max_retries',
|
||||
'config',
|
||||
'_pool_connections',
|
||||
'_pool_maxsize',
|
||||
'_pool_block',
|
||||
'source_address'
|
||||
]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.ssl_context = kwargs.pop('ssl_context', None)
|
||||
self.cipherSuite = kwargs.pop('cipherSuite', None)
|
||||
self.source_address = kwargs.pop('source_address', None)
|
||||
|
||||
if self.source_address:
|
||||
if isinstance(self.source_address, str):
|
||||
self.source_address = (self.source_address, 0)
|
||||
|
||||
if not isinstance(self.source_address, tuple):
|
||||
raise TypeError(
|
||||
"source_address must be IP address string or (ip, port) tuple"
|
||||
)
|
||||
|
||||
if not self.ssl_context:
|
||||
self.ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
|
||||
self.ssl_context.set_ciphers(self.cipherSuite)
|
||||
self.ssl_context.set_ecdh_curve('prime256v1')
|
||||
self.ssl_context.options |= (ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1)
|
||||
|
||||
super(CipherSuiteAdapter, self).__init__(**kwargs)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def init_poolmanager(self, *args, **kwargs):
|
||||
kwargs['ssl_context'] = self.ssl_context
|
||||
kwargs['source_address'] = self.source_address
|
||||
return super(CipherSuiteAdapter, self).init_poolmanager(*args, **kwargs)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def proxy_manager_for(self, *args, **kwargs):
|
||||
kwargs['ssl_context'] = self.ssl_context
|
||||
kwargs['source_address'] = self.source_address
|
||||
return super(CipherSuiteAdapter, self).proxy_manager_for(*args, **kwargs)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class CloudScraper(Session):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.debug = kwargs.pop('debug', False)
|
||||
self.delay = kwargs.pop('delay', None)
|
||||
self.cipherSuite = kwargs.pop('cipherSuite', None)
|
||||
self.ssl_context = kwargs.pop('ssl_context', None)
|
||||
self.interpreter = kwargs.pop('interpreter', 'native')
|
||||
self.captcha = kwargs.pop('captcha', {})
|
||||
self.requestPreHook = kwargs.pop('requestPreHook', None)
|
||||
self.requestPostHook = kwargs.pop('requestPostHook', None)
|
||||
self.source_address = kwargs.pop('source_address', None)
|
||||
self.doubleDown = kwargs.pop('doubleDown', True)
|
||||
|
||||
self.allow_brotli = kwargs.pop(
|
||||
'allow_brotli',
|
||||
True if 'brotli' in sys.modules.keys() else False
|
||||
)
|
||||
|
||||
self.user_agent = User_Agent(
|
||||
allow_brotli=self.allow_brotli,
|
||||
browser=kwargs.pop('browser', None)
|
||||
)
|
||||
|
||||
self._solveDepthCnt = 0
|
||||
self.solveDepth = kwargs.pop('solveDepth', 3)
|
||||
|
||||
super(CloudScraper, self).__init__(*args, **kwargs)
|
||||
|
||||
# pylint: disable=E0203
|
||||
if 'requests' in self.headers['User-Agent']:
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Set a random User-Agent if no custom User-Agent has been set
|
||||
# ------------------------------------------------------------------------------- #
|
||||
self.headers = self.user_agent.headers
|
||||
if not self.cipherSuite:
|
||||
self.cipherSuite = self.user_agent.cipherSuite
|
||||
|
||||
if isinstance(self.cipherSuite, list):
|
||||
self.cipherSuite = ':'.join(self.cipherSuite)
|
||||
|
||||
self.mount(
|
||||
'https://',
|
||||
CipherSuiteAdapter(
|
||||
cipherSuite=self.cipherSuite,
|
||||
ssl_context=self.ssl_context,
|
||||
source_address=self.source_address
|
||||
)
|
||||
)
|
||||
|
||||
# purely to allow us to pickle dump
|
||||
copyreg.pickle(ssl.SSLContext, lambda obj: (obj.__class__, (obj.protocol,)))
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Allow us to pickle our session back with all variables
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def __getstate__(self):
|
||||
return self.__dict__
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Allow replacing actual web request call via subclassing
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def perform_request(self, method, url, *args, **kwargs):
|
||||
return super(CloudScraper, self).request(method, url, *args, **kwargs)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Raise an Exception with no stacktrace and reset depth counter.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def simpleException(self, exception, msg):
|
||||
self._solveDepthCnt = 0
|
||||
sys.tracebacklimit = 0
|
||||
raise exception(msg)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# debug the request via the response
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def debugRequest(req):
|
||||
try:
|
||||
print(dump.dump_all(req).decode('utf-8', errors='backslashreplace'))
|
||||
except ValueError as e:
|
||||
print("Debug Error: {}".format(getattr(e, 'message', e)))
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Unescape / decode html entities
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def unescape(html_text):
|
||||
if sys.version_info >= (3, 0):
|
||||
if sys.version_info >= (3, 4):
|
||||
return html.unescape(html_text)
|
||||
|
||||
return HTMLParser().unescape(html_text)
|
||||
|
||||
return HTMLParser().unescape(html_text)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Decode Brotli on older versions of urllib3 manually
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def decodeBrotli(self, resp):
|
||||
if requests.packages.urllib3.__version__ < '1.25.1' and resp.headers.get('Content-Encoding') == 'br':
|
||||
if self.allow_brotli and resp._content:
|
||||
resp._content = brotli.decompress(resp.content)
|
||||
else:
|
||||
logging.warning(
|
||||
'You\'re running urllib3 {}, Brotli content detected, '
|
||||
'Which requires manual decompression, '
|
||||
'But option allow_brotli is set to False, '
|
||||
'We will not continue to decompress.'.format(requests.packages.urllib3.__version__)
|
||||
)
|
||||
|
||||
return resp
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Our hijacker request function
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def request(self, method, url, *args, **kwargs):
|
||||
# pylint: disable=E0203
|
||||
if kwargs.get('proxies') and kwargs.get('proxies') != self.proxies:
|
||||
self.proxies = kwargs.get('proxies')
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Pre-Hook the request via user defined function.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if self.requestPreHook:
|
||||
(method, url, args, kwargs) = self.requestPreHook(
|
||||
self,
|
||||
method,
|
||||
url,
|
||||
*args,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Make the request via requests.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
response = self.decodeBrotli(
|
||||
self.perform_request(method, url, *args, **kwargs)
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Debug the request via the Response object.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if self.debug:
|
||||
self.debugRequest(response)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Post-Hook the request aka Post-Hook the response via user defined function.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if self.requestPostHook:
|
||||
response = self.requestPostHook(self, response)
|
||||
|
||||
if self.debug:
|
||||
self.debugRequest(response)
|
||||
|
||||
# Check if Cloudflare anti-bot is on
|
||||
if self.is_Challenge_Request(response):
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Try to solve the challenge and send it back
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if self._solveDepthCnt >= self.solveDepth:
|
||||
_ = self._solveDepthCnt
|
||||
self.simpleException(
|
||||
CloudflareLoopProtection,
|
||||
"!!Loop Protection!! We have tried to solve {} time(s) in a row.".format(_)
|
||||
)
|
||||
|
||||
self._solveDepthCnt += 1
|
||||
|
||||
response = self.Challenge_Response(response, **kwargs)
|
||||
else:
|
||||
if not response.is_redirect and response.status_code not in [429, 503]:
|
||||
self._solveDepthCnt = 0
|
||||
|
||||
return response
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# check if the response contains a valid Cloudflare Bot Fight Mode challenge
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def is_BFM_Challenge(resp):
|
||||
try:
|
||||
return (
|
||||
resp.headers.get('Server', '').startswith('cloudflare')
|
||||
and re.search(
|
||||
r"\/cdn-cgi\/bm\/cv\/\d+\/api\.js.*?"
|
||||
r"window\['__CF\$cv\$params'\]\s*=\s*{",
|
||||
resp.text,
|
||||
re.M | re.S
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# check if the response contains a valid Cloudflare challenge
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def is_IUAM_Challenge(resp):
|
||||
try:
|
||||
return (
|
||||
resp.headers.get('Server', '').startswith('cloudflare')
|
||||
and resp.status_code in [429, 503]
|
||||
and re.search(
|
||||
r'<form .*?="challenge-form" action="/.*?__cf_chl_jschl_tk__=\S+"',
|
||||
resp.text,
|
||||
re.M | re.S
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# check if the response contains new Cloudflare challenge
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def is_New_IUAM_Challenge(resp):
|
||||
try:
|
||||
return (
|
||||
resp.headers.get('Server', '').startswith('cloudflare')
|
||||
and resp.status_code in [429, 503]
|
||||
and re.search(
|
||||
r'cpo.src\s*=\s*"/cdn-cgi/challenge-platform/\S+orchestrate/jsch/v1',
|
||||
resp.text,
|
||||
re.M | re.S
|
||||
)
|
||||
and re.search(r'window._cf_chl_enter\s*[\(=]', resp.text, re.M | re.S)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# check if the response contains a v2 hCaptcha Cloudflare challenge
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def is_New_Captcha_Challenge(resp):
|
||||
try:
|
||||
return (
|
||||
CloudScraper.is_Captcha_Challenge(resp)
|
||||
and re.search(
|
||||
r'cpo.src\s*=\s*"/cdn-cgi/challenge-platform/\S+orchestrate/captcha/v1',
|
||||
resp.text,
|
||||
re.M | re.S
|
||||
)
|
||||
and re.search(r'\s*id="trk_captcha_js"', resp.text, re.M | re.S)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# check if the response contains a Cloudflare hCaptcha challenge
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def is_Captcha_Challenge(resp):
|
||||
try:
|
||||
return (
|
||||
resp.headers.get('Server', '').startswith('cloudflare')
|
||||
and resp.status_code == 403
|
||||
and re.search(
|
||||
r'action="/\S+__cf_chl_captcha_tk__=\S+',
|
||||
resp.text,
|
||||
re.M | re.DOTALL
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# check if the response contains Firewall 1020 Error
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def is_Firewall_Blocked(resp):
|
||||
try:
|
||||
return (
|
||||
resp.headers.get('Server', '').startswith('cloudflare')
|
||||
and resp.status_code == 403
|
||||
and re.search(
|
||||
r'<span class="cf-error-code">1020</span>',
|
||||
resp.text,
|
||||
re.M | re.DOTALL
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Wrapper for is_Captcha_Challenge, is_IUAM_Challenge, is_Firewall_Blocked
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def is_Challenge_Request(self, resp):
|
||||
if self.is_Firewall_Blocked(resp):
|
||||
self.simpleException(
|
||||
CloudflareCode1020,
|
||||
'Cloudflare has blocked this request (Code 1020 Detected).'
|
||||
)
|
||||
|
||||
if self.is_New_Captcha_Challenge(resp):
|
||||
self.simpleException(
|
||||
CloudflareChallengeError,
|
||||
'Detected a Cloudflare version 2 Captcha challenge, This feature is not available in the opensource (free) version.'
|
||||
)
|
||||
|
||||
if self.is_New_IUAM_Challenge(resp):
|
||||
self.simpleException(
|
||||
CloudflareChallengeError,
|
||||
'Detected a Cloudflare version 2 challenge, This feature is not available in the opensource (free) version.'
|
||||
)
|
||||
|
||||
if self.is_Captcha_Challenge(resp) or self.is_IUAM_Challenge(resp):
|
||||
if self.debug:
|
||||
print('Detected a Cloudflare version 1 challenge.')
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Try to solve cloudflare javascript challenge.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def IUAM_Challenge_Response(self, body, url, interpreter):
|
||||
try:
|
||||
formPayload = re.search(
|
||||
r'<form (?P<form>.*?="challenge-form" '
|
||||
r'action="(?P<challengeUUID>.*?'
|
||||
r'__cf_chl_jschl_tk__=\S+)"(.*?)</form>)',
|
||||
body,
|
||||
re.M | re.DOTALL
|
||||
).groupdict()
|
||||
|
||||
if not all(key in formPayload for key in ['form', 'challengeUUID']):
|
||||
self.simpleException(
|
||||
CloudflareIUAMError,
|
||||
"Cloudflare IUAM detected, unfortunately we can't extract the parameters correctly."
|
||||
)
|
||||
|
||||
payload = OrderedDict()
|
||||
for challengeParam in re.findall(r'^\s*<input\s(.*?)/>', formPayload['form'], re.M | re.S):
|
||||
inputPayload = dict(re.findall(r'(\S+)="(\S+)"', challengeParam))
|
||||
if inputPayload.get('name') in ['r', 'jschl_vc', 'pass']:
|
||||
payload.update({inputPayload['name']: inputPayload['value']})
|
||||
|
||||
except AttributeError:
|
||||
self.simpleException(
|
||||
CloudflareIUAMError,
|
||||
"Cloudflare IUAM detected, unfortunately we can't extract the parameters correctly."
|
||||
)
|
||||
|
||||
hostParsed = urlparse(url)
|
||||
|
||||
try:
|
||||
payload['jschl_answer'] = JavaScriptInterpreter.dynamicImport(
|
||||
interpreter
|
||||
).solveChallenge(body, hostParsed.netloc)
|
||||
except Exception as e:
|
||||
self.simpleException(
|
||||
CloudflareIUAMError,
|
||||
"Unable to parse Cloudflare anti-bots page: {}".format(getattr(e, 'message', e))
|
||||
)
|
||||
|
||||
return {
|
||||
'url': "{}://{}{}".format(hostParsed.scheme, hostParsed.netloc, self.unescape(formPayload['challengeUUID'])),
|
||||
'data': payload
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Try to solve the Captcha challenge via 3rd party.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def captcha_Challenge_Response(self, provider, provider_params, body, url):
|
||||
try:
|
||||
formPayload = re.search(
|
||||
r'<form (?P<form>.*?="challenge-form" '
|
||||
r'action="(?P<challengeUUID>.*?__cf_chl_captcha_tk__=\S+)"(.*?)</form>)',
|
||||
body,
|
||||
re.M | re.DOTALL
|
||||
).groupdict()
|
||||
|
||||
if not all(key in formPayload for key in ['form', 'challengeUUID']):
|
||||
self.simpleException(
|
||||
CloudflareCaptchaError,
|
||||
"Cloudflare Captcha detected, unfortunately we can't extract the parameters correctly."
|
||||
)
|
||||
|
||||
payload = OrderedDict(
|
||||
re.findall(
|
||||
r'(name="r"\svalue|data-ray|data-sitekey|name="cf_captcha_kind"\svalue)="(.*?)"',
|
||||
formPayload['form']
|
||||
)
|
||||
)
|
||||
|
||||
captchaType = 'reCaptcha' if payload['name="cf_captcha_kind" value'] == 're' else 'hCaptcha'
|
||||
|
||||
except (AttributeError, KeyError):
|
||||
self.simpleException(
|
||||
CloudflareCaptchaError,
|
||||
"Cloudflare Captcha detected, unfortunately we can't extract the parameters correctly."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Pass proxy parameter to provider to solve captcha.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if self.proxies and self.proxies != self.captcha.get('proxy'):
|
||||
self.captcha['proxy'] = self.proxies
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Pass User-Agent if provider supports it to solve captcha.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
self.captcha['User-Agent'] = self.headers['User-Agent']
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Submit job to provider to request captcha solve.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
captchaResponse = Captcha.dynamicImport(
|
||||
provider.lower()
|
||||
).solveCaptcha(
|
||||
captchaType,
|
||||
url,
|
||||
payload['data-sitekey'],
|
||||
provider_params
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Parse and handle the response of solved captcha.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
dataPayload = OrderedDict([
|
||||
('r', payload.get('name="r" value', '')),
|
||||
('cf_captcha_kind', payload['name="cf_captcha_kind" value']),
|
||||
('id', payload.get('data-ray')),
|
||||
('g-recaptcha-response', captchaResponse)
|
||||
])
|
||||
|
||||
if captchaType == 'hCaptcha':
|
||||
dataPayload.update({'h-captcha-response': captchaResponse})
|
||||
|
||||
hostParsed = urlparse(url)
|
||||
|
||||
return {
|
||||
'url': "{}://{}{}".format(hostParsed.scheme, hostParsed.netloc, self.unescape(formPayload['challengeUUID'])),
|
||||
'data': dataPayload
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Attempt to handle and send the challenge response back to cloudflare
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def Challenge_Response(self, resp, **kwargs):
|
||||
if self.is_Captcha_Challenge(resp):
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# double down on the request as some websites are only checking
|
||||
# if cfuid is populated before issuing Captcha.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if self.doubleDown:
|
||||
resp = self.decodeBrotli(
|
||||
self.perform_request(resp.request.method, resp.url, **kwargs)
|
||||
)
|
||||
|
||||
if not self.is_Captcha_Challenge(resp):
|
||||
return resp
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# if no captcha provider raise a runtime error.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if not self.captcha or not isinstance(self.captcha, dict) or not self.captcha.get('provider'):
|
||||
self.simpleException(
|
||||
CloudflareCaptchaProvider,
|
||||
"Cloudflare Captcha detected, unfortunately you haven't loaded an anti Captcha provider "
|
||||
"correctly via the 'captcha' parameter."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# if provider is return_response, return the response without doing anything.
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if self.captcha.get('provider') == 'return_response':
|
||||
return resp
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Submit request to parser wrapper to solve captcha
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
submit_url = self.captcha_Challenge_Response(
|
||||
self.captcha.get('provider'),
|
||||
self.captcha,
|
||||
resp.text,
|
||||
resp.url
|
||||
)
|
||||
else:
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Cloudflare requires a delay before solving the challenge
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if not self.delay:
|
||||
try:
|
||||
delay = float(
|
||||
re.search(
|
||||
r'submit\(\);\r?\n\s*},\s*([0-9]+)',
|
||||
resp.text
|
||||
).group(1)
|
||||
) / float(1000)
|
||||
if isinstance(delay, (int, float)):
|
||||
self.delay = delay
|
||||
except (AttributeError, ValueError):
|
||||
self.simpleException(
|
||||
CloudflareIUAMError,
|
||||
"Cloudflare IUAM possibility malformed, issue extracing delay value."
|
||||
)
|
||||
|
||||
sleep(self.delay)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
submit_url = self.IUAM_Challenge_Response(
|
||||
resp.text,
|
||||
resp.url,
|
||||
self.interpreter
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Send the Challenge Response back to Cloudflare
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if submit_url:
|
||||
|
||||
def updateAttr(obj, name, newValue):
|
||||
try:
|
||||
obj[name].update(newValue)
|
||||
return obj[name]
|
||||
except (AttributeError, KeyError):
|
||||
obj[name] = {}
|
||||
obj[name].update(newValue)
|
||||
return obj[name]
|
||||
|
||||
cloudflare_kwargs = deepcopy(kwargs)
|
||||
cloudflare_kwargs['allow_redirects'] = False
|
||||
cloudflare_kwargs['data'] = updateAttr(
|
||||
cloudflare_kwargs,
|
||||
'data',
|
||||
submit_url['data']
|
||||
)
|
||||
|
||||
urlParsed = urlparse(resp.url)
|
||||
cloudflare_kwargs['headers'] = updateAttr(
|
||||
cloudflare_kwargs,
|
||||
'headers',
|
||||
{
|
||||
'Origin': '{}://{}'.format(urlParsed.scheme, urlParsed.netloc),
|
||||
'Referer': resp.url
|
||||
}
|
||||
)
|
||||
|
||||
challengeSubmitResponse = self.request(
|
||||
'POST',
|
||||
submit_url['url'],
|
||||
**cloudflare_kwargs
|
||||
)
|
||||
|
||||
if challengeSubmitResponse.status_code == 400:
|
||||
self.simpleException(
|
||||
CloudflareSolveError,
|
||||
'Invalid challenge answer detected, Cloudflare broken?'
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Return response if Cloudflare is doing content pass through instead of 3xx
|
||||
# else request with redirect URL also handle protocol scheme change http -> https
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if not challengeSubmitResponse.is_redirect:
|
||||
return challengeSubmitResponse
|
||||
|
||||
else:
|
||||
cloudflare_kwargs = deepcopy(kwargs)
|
||||
cloudflare_kwargs['headers'] = updateAttr(
|
||||
cloudflare_kwargs,
|
||||
'headers',
|
||||
{'Referer': challengeSubmitResponse.url}
|
||||
)
|
||||
|
||||
if not urlparse(challengeSubmitResponse.headers['Location']).netloc:
|
||||
redirect_location = urljoin(
|
||||
challengeSubmitResponse.url,
|
||||
challengeSubmitResponse.headers['Location']
|
||||
)
|
||||
else:
|
||||
redirect_location = challengeSubmitResponse.headers['Location']
|
||||
|
||||
return self.request(
|
||||
resp.request.method,
|
||||
redirect_location,
|
||||
**cloudflare_kwargs
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# We shouldn't be here...
|
||||
# Re-request the original query and/or process again....
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
return self.request(resp.request.method, resp.url, **kwargs)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@classmethod
|
||||
def create_scraper(cls, sess=None, **kwargs):
|
||||
"""
|
||||
Convenience function for creating a ready-to-go CloudScraper object.
|
||||
"""
|
||||
scraper = cls(**kwargs)
|
||||
|
||||
if sess:
|
||||
for attr in ['auth', 'cert', 'cookies', 'headers', 'hooks', 'params', 'proxies', 'data']:
|
||||
val = getattr(sess, attr, None)
|
||||
if val:
|
||||
setattr(scraper, attr, val)
|
||||
|
||||
return scraper
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
# Functions for integrating cloudscraper with other applications and scripts
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@classmethod
|
||||
def get_tokens(cls, url, **kwargs):
|
||||
scraper = cls.create_scraper(
|
||||
**{
|
||||
field: kwargs.pop(field, None) for field in [
|
||||
'allow_brotli',
|
||||
'browser',
|
||||
'debug',
|
||||
'delay',
|
||||
'doubleDown',
|
||||
'captcha',
|
||||
'interpreter',
|
||||
'source_address'
|
||||
'requestPreHook',
|
||||
'requestPostHook'
|
||||
] if field in kwargs
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
resp = scraper.get(url, **kwargs)
|
||||
resp.raise_for_status()
|
||||
except Exception:
|
||||
logging.error('"{}" returned an error. Could not collect tokens.'.format(url))
|
||||
raise
|
||||
|
||||
domain = urlparse(resp.url).netloc
|
||||
# noinspection PyUnusedLocal
|
||||
cookie_domain = None
|
||||
|
||||
for d in scraper.cookies.list_domains():
|
||||
if d.startswith('.') and d in ('.{}'.format(domain)):
|
||||
cookie_domain = d
|
||||
break
|
||||
else:
|
||||
cls.simpleException(
|
||||
CloudflareIUAMError,
|
||||
"Unable to find Cloudflare cookies. Does the site actually "
|
||||
"have Cloudflare IUAM (I'm Under Attack Mode) enabled?"
|
||||
)
|
||||
|
||||
return (
|
||||
{
|
||||
'__cfduid': scraper.cookies.get('__cfduid', '', domain=cookie_domain),
|
||||
'cf_clearance': scraper.cookies.get('cf_clearance', '', domain=cookie_domain)
|
||||
},
|
||||
scraper.headers['User-Agent']
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@classmethod
|
||||
def get_cookie_string(cls, url, **kwargs):
|
||||
"""
|
||||
Convenience function for building a Cookie HTTP header value.
|
||||
"""
|
||||
tokens, user_agent = cls.get_tokens(url, **kwargs)
|
||||
return '; '.join('='.join(pair) for pair in tokens.items()), user_agent
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if ssl.OPENSSL_VERSION_INFO < (1, 1, 1):
|
||||
print(
|
||||
"DEPRECATION: The OpenSSL being used by this python install ({}) does not meet the minimum supported "
|
||||
"version (>= OpenSSL 1.1.1) in order to support TLS 1.3 required by Cloudflare, "
|
||||
"You may encounter an unexpected Captcha or cloudflare 1020 blocks.".format(ssl.OPENSSL_VERSION)
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
create_scraper = CloudScraper.create_scraper
|
||||
get_tokens = CloudScraper.get_tokens
|
||||
get_cookie_string = CloudScraper.get_cookie_string
|
||||
260
lib/cloudscraper/captcha/2captcha.py
Normal file
260
lib/cloudscraper/captcha/2captcha.py
Normal file
@@ -0,0 +1,260 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import requests
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ..exceptions import (
|
||||
CaptchaServiceUnavailable,
|
||||
CaptchaAPIError,
|
||||
CaptchaTimeout,
|
||||
CaptchaParameter,
|
||||
CaptchaBadJobID,
|
||||
CaptchaReportError
|
||||
)
|
||||
|
||||
try:
|
||||
import polling2
|
||||
except ImportError:
|
||||
raise ImportError("Please install the python module 'polling2' via pip")
|
||||
|
||||
from . import Captcha
|
||||
|
||||
|
||||
class captchaSolver(Captcha):
|
||||
|
||||
def __init__(self):
|
||||
super(captchaSolver, self).__init__('2captcha')
|
||||
self.host = 'https://2captcha.com'
|
||||
self.session = requests.Session()
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def checkErrorStatus(response, request_type):
|
||||
if response.status_code in [500, 502]:
|
||||
raise CaptchaServiceUnavailable(f'2Captcha: Server Side Error {response.status_code}')
|
||||
|
||||
errors = {
|
||||
'in.php': {
|
||||
"ERROR_WRONG_USER_KEY": "You've provided api_key parameter value is in incorrect format, it should contain 32 symbols.",
|
||||
"ERROR_KEY_DOES_NOT_EXIST": "The api_key you've provided does not exists.",
|
||||
"ERROR_ZERO_BALANCE": "You don't have sufficient funds on your account.",
|
||||
"ERROR_PAGEURL": "pageurl parameter is missing in your request.",
|
||||
"ERROR_NO_SLOT_AVAILABLE":
|
||||
"No Slots Available.\nYou can receive this error in two cases:\n"
|
||||
"1. If you solve ReCaptcha: the queue of your captchas that are not distributed to workers is too long. "
|
||||
"Queue limit changes dynamically and depends on total amount of captchas awaiting solution and usually it's between 50 and 100 captchas.\n"
|
||||
"2. If you solve Normal Captcha: your maximum rate for normal captchas is lower than current rate on the server."
|
||||
"You can change your maximum rate in your account's settings.",
|
||||
"ERROR_IP_NOT_ALLOWED": "The request is sent from the IP that is not on the list of your allowed IPs.",
|
||||
"IP_BANNED": "Your IP address is banned due to many frequent attempts to access the server using wrong authorization keys.",
|
||||
"ERROR_BAD_TOKEN_OR_PAGEURL":
|
||||
"You can get this error code when sending ReCaptcha V2. "
|
||||
"That happens if your request contains invalid pair of googlekey and pageurl. "
|
||||
"The common reason for that is that ReCaptcha is loaded inside an iframe hosted on another domain/subdomain.",
|
||||
"ERROR_GOOGLEKEY":
|
||||
"You can get this error code when sending ReCaptcha V2. "
|
||||
"That means that sitekey value provided in your request is incorrect: it's blank or malformed.",
|
||||
"MAX_USER_TURN": "You made more than 60 requests within 3 seconds.Your account is banned for 10 seconds. Ban will be lifted automatically."
|
||||
},
|
||||
'res.php': {
|
||||
"ERROR_CAPTCHA_UNSOLVABLE":
|
||||
"We are unable to solve your captcha - three of our workers were unable solve it "
|
||||
"or we didn't get an answer within 90 seconds (300 seconds for ReCaptcha V2). "
|
||||
"We will not charge you for that request.",
|
||||
"ERROR_WRONG_USER_KEY": "You've provided api_key parameter value in incorrect format, it should contain 32 symbols.",
|
||||
"ERROR_KEY_DOES_NOT_EXIST": "The api_key you've provided does not exists.",
|
||||
"ERROR_WRONG_ID_FORMAT": "You've provided captcha ID in wrong format. The ID can contain numbers only.",
|
||||
"ERROR_WRONG_CAPTCHA_ID": "You've provided incorrect captcha ID.",
|
||||
"ERROR_BAD_DUPLICATES":
|
||||
"Error is returned when 100% accuracy feature is enabled. "
|
||||
"The error means that max numbers of tries is reached but min number of matches not found.",
|
||||
"REPORT_NOT_RECORDED": "Error is returned to your complain request if you already complained lots of correctly solved captchas.",
|
||||
"ERROR_IP_ADDRES":
|
||||
"You can receive this error code when registering a pingback (callback) IP or domain."
|
||||
"That happes if your request is coming from an IP address that doesn't match the IP address of your pingback IP or domain.",
|
||||
"ERROR_TOKEN_EXPIRED": "You can receive this error code when sending GeeTest. That error means that challenge value you provided is expired.",
|
||||
"ERROR_EMPTY_ACTION": "Action parameter is missing or no value is provided for action parameter."
|
||||
}
|
||||
}
|
||||
|
||||
rPayload = response.json()
|
||||
if rPayload.get('status') == 0 and rPayload.get('request') in errors.get(request_type):
|
||||
raise CaptchaAPIError(
|
||||
f"{rPayload['request']} {errors.get(request_type).get(rPayload['request'])}"
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def reportJob(self, jobID):
|
||||
if not jobID:
|
||||
raise CaptchaBadJobID(
|
||||
"2Captcha: Error bad job id to request Captcha."
|
||||
)
|
||||
|
||||
def _checkRequest(response):
|
||||
self.checkErrorStatus(response, 'res.php')
|
||||
if response.ok and response.json().get('status') == 1:
|
||||
return response
|
||||
return None
|
||||
|
||||
response = polling2.poll(
|
||||
lambda: self.session.get(
|
||||
'{}/res.php'.format(self.host),
|
||||
params={
|
||||
'key': self.api_key,
|
||||
'action': 'reportbad',
|
||||
'id': jobID,
|
||||
'json': '1'
|
||||
},
|
||||
timeout=30
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=5,
|
||||
timeout=180
|
||||
)
|
||||
|
||||
if response:
|
||||
return True
|
||||
else:
|
||||
raise CaptchaReportError(
|
||||
"2Captcha: Error - Failed to report bad Captcha solve."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestJob(self, jobID):
|
||||
if not jobID:
|
||||
raise CaptchaBadJobID("2Captcha: Error bad job id to request Captcha.")
|
||||
|
||||
def _checkRequest(response):
|
||||
self.checkErrorStatus(response, 'res.php')
|
||||
if response.ok and response.json().get('status') == 1:
|
||||
return response
|
||||
return None
|
||||
|
||||
response = polling2.poll(
|
||||
lambda: self.session.get(
|
||||
'{}/res.php'.format(self.host),
|
||||
params={
|
||||
'key': self.api_key,
|
||||
'action': 'get',
|
||||
'id': jobID,
|
||||
'json': '1'
|
||||
},
|
||||
timeout=30
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=5,
|
||||
timeout=180
|
||||
)
|
||||
|
||||
if response:
|
||||
return response.json().get('request')
|
||||
else:
|
||||
raise CaptchaTimeout(
|
||||
"2Captcha: Error failed to solve Captcha."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestSolve(self, captchaType, url, siteKey):
|
||||
def _checkRequest(response):
|
||||
self.checkErrorStatus(response, 'in.php')
|
||||
if response.ok and response.json().get("status") == 1 and response.json().get('request'):
|
||||
return response
|
||||
return None
|
||||
|
||||
data = {
|
||||
'key': self.api_key,
|
||||
'pageurl': url,
|
||||
'json': 1,
|
||||
'soft_id': 2905
|
||||
}
|
||||
|
||||
data.update(
|
||||
{
|
||||
'method': 'userrcaptcha',
|
||||
'googlekey': siteKey
|
||||
} if captchaType == 'reCaptcha' else {
|
||||
'method': 'hcaptcha',
|
||||
'sitekey': siteKey
|
||||
}
|
||||
)
|
||||
|
||||
if self.proxy:
|
||||
data.update(
|
||||
{
|
||||
'proxy': self.proxy,
|
||||
'proxytype': self.proxyType
|
||||
}
|
||||
)
|
||||
|
||||
response = polling2.poll(
|
||||
lambda: self.session.post(
|
||||
'{}/in.php'.format(self.host),
|
||||
data=data,
|
||||
allow_redirects=False,
|
||||
timeout=30
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=5,
|
||||
timeout=180
|
||||
)
|
||||
|
||||
if response:
|
||||
return response.json().get('request')
|
||||
else:
|
||||
raise CaptchaBadJobID(
|
||||
'2Captcha: Error no job id was returned.'
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, captchaParams):
|
||||
jobID = None
|
||||
|
||||
if not captchaParams.get('api_key'):
|
||||
raise CaptchaParameter(
|
||||
"2Captcha: Missing api_key parameter."
|
||||
)
|
||||
|
||||
self.api_key = captchaParams.get('api_key')
|
||||
|
||||
if captchaParams.get('proxy') and not captchaParams.get('no_proxy'):
|
||||
hostParsed = urlparse(captchaParams.get('proxy', {}).get('https'))
|
||||
|
||||
if not hostParsed.scheme:
|
||||
raise CaptchaParameter('Cannot parse proxy correctly, bad scheme')
|
||||
|
||||
if not hostParsed.netloc:
|
||||
raise CaptchaParameter('Cannot parse proxy correctly, bad netloc')
|
||||
|
||||
self.proxyType = hostParsed.scheme
|
||||
self.proxy = hostParsed.netloc
|
||||
else:
|
||||
self.proxy = None
|
||||
|
||||
try:
|
||||
jobID = self.requestSolve(captchaType, url, siteKey)
|
||||
return self.requestJob(jobID)
|
||||
except polling2.TimeoutException:
|
||||
try:
|
||||
if jobID:
|
||||
self.reportJob(jobID)
|
||||
except polling2.TimeoutException:
|
||||
raise CaptchaTimeout(
|
||||
f"2Captcha: Captcha solve took to long and also failed reporting the job the job id {jobID}."
|
||||
)
|
||||
|
||||
raise CaptchaTimeout(
|
||||
f"2Captcha: Captcha solve took to long to execute job id {jobID}, aborting."
|
||||
)
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
captchaSolver()
|
||||
212
lib/cloudscraper/captcha/9kw.py
Normal file
212
lib/cloudscraper/captcha/9kw.py
Normal file
@@ -0,0 +1,212 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re
|
||||
import requests
|
||||
|
||||
try:
|
||||
import polling
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Please install the python module 'polling' via pip or download it from "
|
||||
"https://github.com/justiniso/polling/"
|
||||
)
|
||||
|
||||
from ..exceptions import (
|
||||
reCaptchaServiceUnavailable,
|
||||
reCaptchaAPIError,
|
||||
reCaptchaTimeout,
|
||||
reCaptchaParameter,
|
||||
reCaptchaBadJobID
|
||||
)
|
||||
|
||||
from . import reCaptcha
|
||||
|
||||
|
||||
class captchaSolver(reCaptcha):
|
||||
|
||||
def __init__(self):
|
||||
super(captchaSolver, self).__init__('9kw')
|
||||
self.host = 'https://www.9kw.eu/index.cgi'
|
||||
self.maxtimeout = 180
|
||||
self.session = requests.Session()
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def checkErrorStatus(response):
|
||||
if response.status_code in [500, 502]:
|
||||
raise reCaptchaServiceUnavailable(
|
||||
'9kw: Server Side Error {}'.format(response.status_code)
|
||||
)
|
||||
|
||||
error_codes = {
|
||||
1: 'No API Key available.',
|
||||
2: 'No API key found.',
|
||||
3: 'No active API key found.',
|
||||
4: 'API Key has been disabled by the operator. ',
|
||||
5: 'No user found.',
|
||||
6: 'No data found.',
|
||||
7: 'Found No ID.',
|
||||
8: 'found No captcha.',
|
||||
9: 'No image found.',
|
||||
10: 'Image size not allowed.',
|
||||
11: 'credit is not sufficient.',
|
||||
12: 'what was done.',
|
||||
13: 'No answer contain.',
|
||||
14: 'Captcha already been answered.',
|
||||
15: 'Captcha to quickly filed.',
|
||||
16: 'JD check active.',
|
||||
17: 'Unknown problem.',
|
||||
18: 'Found No ID.',
|
||||
19: 'Incorrect answer.',
|
||||
20: 'Do not timely filed (Incorrect UserID).',
|
||||
21: 'Link not allowed.',
|
||||
22: 'Prohibited submit.',
|
||||
23: 'Entering prohibited.',
|
||||
24: 'Too little credit.',
|
||||
25: 'No entry found.',
|
||||
26: 'No Conditions accepted.',
|
||||
27: 'No coupon code found in the database.',
|
||||
28: 'Already unused voucher code.',
|
||||
29: 'maxTimeout under 60 seconds.',
|
||||
30: 'User not found.',
|
||||
31: 'An account is not yet 24 hours in system.',
|
||||
32: 'An account does not have the full rights.',
|
||||
33: 'Plugin needed a update.',
|
||||
34: 'No HTTPS allowed.',
|
||||
35: 'No HTTP allowed.',
|
||||
36: 'Source not allowed.',
|
||||
37: 'Transfer denied.',
|
||||
38: 'Incorrect answer without space',
|
||||
39: 'Incorrect answer with space',
|
||||
40: 'Incorrect answer with not only numbers',
|
||||
41: 'Incorrect answer with not only A-Z, a-z',
|
||||
42: 'Incorrect answer with not only 0-9, A-Z, a-z',
|
||||
43: 'Incorrect answer with not only [0-9,- ]',
|
||||
44: 'Incorrect answer with not only [0-9A-Za-z,- ]',
|
||||
45: 'Incorrect answer with not only coordinates',
|
||||
46: 'Incorrect answer with not only multiple coordinates',
|
||||
47: 'Incorrect answer with not only data',
|
||||
48: 'Incorrect answer with not only rotate number',
|
||||
49: 'Incorrect answer with not only text',
|
||||
50: 'Incorrect answer with not only text and too short',
|
||||
51: 'Incorrect answer with not enough chars',
|
||||
52: 'Incorrect answer with too many chars',
|
||||
53: 'Incorrect answer without no or yes',
|
||||
54: 'Assignment was not found.'
|
||||
}
|
||||
|
||||
if response.text.startswith('{'):
|
||||
if response.json().get('error'):
|
||||
raise reCaptchaAPIError(error_codes.get(int(response.json().get('error'))))
|
||||
else:
|
||||
error_code = int(re.search(r'^00(?P<error_code>\d+)', response.text).groupdict().get('error_code', 0))
|
||||
if error_code:
|
||||
raise reCaptchaAPIError(error_codes.get(error_code))
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestJob(self, jobID):
|
||||
if not jobID:
|
||||
raise reCaptchaBadJobID(
|
||||
"9kw: Error bad job id to request reCaptcha against."
|
||||
)
|
||||
|
||||
def _checkRequest(response):
|
||||
if response.ok and response.json().get('answer') != 'NO DATA':
|
||||
return response
|
||||
|
||||
self.checkErrorStatus(response)
|
||||
|
||||
return None
|
||||
|
||||
response = polling.poll(
|
||||
lambda: self.session.get(
|
||||
self.host,
|
||||
params={
|
||||
'apikey': self.api_key,
|
||||
'action': 'usercaptchacorrectdata',
|
||||
'id': jobID,
|
||||
'info': 1,
|
||||
'json': 1
|
||||
}
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=10,
|
||||
timeout=(self.maxtimeout + 10)
|
||||
)
|
||||
|
||||
if response:
|
||||
return response.json().get('answer')
|
||||
else:
|
||||
raise reCaptchaTimeout("9kw: Error failed to solve reCaptcha.")
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestSolve(self, captchaType, url, siteKey):
|
||||
def _checkRequest(response):
|
||||
if response.ok and response.text.startswith('{') and response.json().get('captchaid'):
|
||||
return response
|
||||
|
||||
self.checkErrorStatus(response)
|
||||
|
||||
return None
|
||||
|
||||
captchaMap = {
|
||||
'reCaptcha': 'recaptchav2',
|
||||
'hCaptcha': 'hcaptcha'
|
||||
}
|
||||
|
||||
response = polling.poll(
|
||||
lambda: self.session.post(
|
||||
self.host,
|
||||
data={
|
||||
'apikey': self.api_key,
|
||||
'action': 'usercaptchaupload',
|
||||
'interactive': 1,
|
||||
'file-upload-01': siteKey,
|
||||
'oldsource': captchaMap[captchaType],
|
||||
'pageurl': url,
|
||||
'maxtimeout': self.maxtimeout,
|
||||
'json': 1
|
||||
},
|
||||
allow_redirects=False
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=5,
|
||||
timeout=(self.maxtimeout + 10)
|
||||
)
|
||||
|
||||
if response:
|
||||
return response.json().get('captchaid')
|
||||
else:
|
||||
raise reCaptchaBadJobID('9kw: Error no valid job id was returned.')
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, reCaptchaParams):
|
||||
jobID = None
|
||||
|
||||
if not reCaptchaParams.get('api_key'):
|
||||
raise reCaptchaParameter("9kw: Missing api_key parameter.")
|
||||
|
||||
self.api_key = reCaptchaParams.get('api_key')
|
||||
|
||||
if reCaptchaParams.get('maxtimeout'):
|
||||
self.maxtimeout = reCaptchaParams.get('maxtimeout')
|
||||
|
||||
if reCaptchaParams.get('proxy'):
|
||||
self.session.proxies = reCaptchaParams.get('proxies')
|
||||
|
||||
try:
|
||||
jobID = self.requestSolve(captchaType, url, siteKey)
|
||||
return self.requestJob(jobID)
|
||||
except polling.TimeoutException:
|
||||
raise reCaptchaTimeout(
|
||||
f"9kw: reCaptcha solve took to long to execute 'captchaid' {jobID}, aborting."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
captchaSolver()
|
||||
47
lib/cloudscraper/captcha/__init__.py
Normal file
47
lib/cloudscraper/captcha/__init__.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import abc
|
||||
import logging
|
||||
import sys
|
||||
|
||||
if sys.version_info >= (3, 4):
|
||||
ABC = abc.ABC # noqa
|
||||
else:
|
||||
ABC = abc.ABCMeta('ABC', (), {})
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
captchaSolvers = {}
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class Captcha(ABC):
|
||||
@abc.abstractmethod
|
||||
def __init__(self, name):
|
||||
captchaSolvers[name] = self
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@classmethod
|
||||
def dynamicImport(cls, name):
|
||||
if name not in captchaSolvers:
|
||||
try:
|
||||
__import__('{}.{}'.format(cls.__module__, name))
|
||||
if not isinstance(captchaSolvers.get(name), Captcha):
|
||||
raise ImportError('The anti captcha provider was not initialized.')
|
||||
except ImportError as e:
|
||||
sys.tracebacklimit = 0
|
||||
logging.error('Unable to load {} anti captcha provider -> {}'.format(name, e))
|
||||
raise
|
||||
|
||||
return captchaSolvers[name]
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@abc.abstractmethod
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, captchaParams):
|
||||
pass
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def solveCaptcha(self, captchaType, url, siteKey, captchaParams):
|
||||
return self.getCaptchaAnswer(captchaType, url, siteKey, captchaParams)
|
||||
109
lib/cloudscraper/captcha/anticaptcha.py
Normal file
109
lib/cloudscraper/captcha/anticaptcha.py
Normal file
@@ -0,0 +1,109 @@
|
||||
from __future__ import absolute_import
|
||||
from ..exceptions import (
|
||||
CaptchaParameter,
|
||||
CaptchaTimeout,
|
||||
CaptchaAPIError
|
||||
)
|
||||
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse
|
||||
|
||||
try:
|
||||
from python_anticaptcha import (
|
||||
AnticaptchaClient,
|
||||
NoCaptchaTaskProxylessTask,
|
||||
HCaptchaTaskProxyless,
|
||||
NoCaptchaTask,
|
||||
HCaptchaTask,
|
||||
AnticaptchaException
|
||||
)
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Please install/upgrade the python module 'python_anticaptcha' via "
|
||||
"pip install python-anticaptcha or https://github.com/ad-m/python-anticaptcha/"
|
||||
)
|
||||
|
||||
import sys
|
||||
|
||||
from . import Captcha
|
||||
|
||||
|
||||
class captchaSolver(Captcha):
|
||||
|
||||
def __init__(self):
|
||||
if sys.modules['python_anticaptcha'].__version__ < '0.6':
|
||||
raise ImportError(
|
||||
"Please upgrade the python module 'python_anticaptcha' via "
|
||||
"pip install -U python-anticaptcha or https://github.com/ad-m/python-anticaptcha/"
|
||||
)
|
||||
super(captchaSolver, self).__init__('anticaptcha')
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def parseProxy(self, url, user_agent):
|
||||
parsed = urlparse(url)
|
||||
|
||||
return dict(
|
||||
proxy_type=parsed.scheme,
|
||||
proxy_address=parsed.hostname,
|
||||
proxy_port=parsed.port,
|
||||
proxy_login=parsed.username,
|
||||
proxy_password=parsed.password,
|
||||
user_agent=user_agent
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, captchaParams):
|
||||
if not captchaParams.get('api_key'):
|
||||
raise CaptchaParameter("anticaptcha: Missing api_key parameter.")
|
||||
|
||||
client = AnticaptchaClient(captchaParams.get('api_key'))
|
||||
|
||||
if captchaParams.get('proxy') and not captchaParams.get('no_proxy'):
|
||||
captchaMap = {
|
||||
'reCaptcha': NoCaptchaTask,
|
||||
'hCaptcha': HCaptchaTask
|
||||
}
|
||||
|
||||
proxy = self.parseProxy(
|
||||
captchaParams.get('proxy', {}).get('https'),
|
||||
captchaParams.get('User-Agent', '')
|
||||
)
|
||||
|
||||
task = captchaMap[captchaType](
|
||||
url,
|
||||
siteKey,
|
||||
**proxy
|
||||
)
|
||||
else:
|
||||
captchaMap = {
|
||||
'reCaptcha': NoCaptchaTaskProxylessTask,
|
||||
'hCaptcha': HCaptchaTaskProxyless
|
||||
}
|
||||
task = captchaMap[captchaType](url, siteKey)
|
||||
|
||||
if not hasattr(client, 'createTaskSmee'):
|
||||
raise NotImplementedError(
|
||||
"Please upgrade 'python_anticaptcha' via pip or download it from "
|
||||
"https://github.com/ad-m/python-anticaptcha/tree/hcaptcha"
|
||||
)
|
||||
|
||||
job = client.createTaskSmee(task, timeout=180)
|
||||
|
||||
try:
|
||||
job.join(maximum_time=180)
|
||||
except (AnticaptchaException) as e:
|
||||
raise CaptchaTimeout(f"{getattr(e, 'message', e)}")
|
||||
|
||||
if 'solution' in job._last_result:
|
||||
return job.get_solution_response()
|
||||
else:
|
||||
raise CaptchaAPIError('Job did not return `solution` key in payload.')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
captchaSolver()
|
||||
190
lib/cloudscraper/captcha/capmonster.py
Normal file
190
lib/cloudscraper/captcha/capmonster.py
Normal file
@@ -0,0 +1,190 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import requests
|
||||
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ..exceptions import (
|
||||
CaptchaServiceUnavailable,
|
||||
CaptchaAPIError,
|
||||
CaptchaTimeout,
|
||||
CaptchaParameter,
|
||||
CaptchaBadJobID
|
||||
)
|
||||
|
||||
try:
|
||||
import polling2
|
||||
except ImportError:
|
||||
raise ImportError("Please install the python module 'polling2' via pip")
|
||||
|
||||
from . import Captcha
|
||||
|
||||
|
||||
class captchaSolver(Captcha):
|
||||
|
||||
def __init__(self):
|
||||
super(captchaSolver, self).__init__('capmonster')
|
||||
self.host = 'https://api.capmonster.cloud'
|
||||
self.session = requests.Session()
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def checkErrorStatus(response):
|
||||
if response.status_code in [500, 502]:
|
||||
raise CaptchaServiceUnavailable(
|
||||
'CapMonster: Server Side Error {}'.format(response.status_code)
|
||||
)
|
||||
|
||||
payload = response.json()
|
||||
if payload['errorId'] == 1:
|
||||
if 'errorDescription' in payload:
|
||||
raise CaptchaAPIError(
|
||||
payload['errorDescription']
|
||||
)
|
||||
else:
|
||||
raise CaptchaAPIError(payload['errorCode'])
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestJob(self, taskID):
|
||||
if not taskID:
|
||||
raise CaptchaBadJobID(
|
||||
'CapMonster: Error bad task id to request Captcha.'
|
||||
)
|
||||
|
||||
def _checkRequest(response):
|
||||
self.checkErrorStatus(response)
|
||||
|
||||
if response.ok and response.json()['status'] == 'ready':
|
||||
return True
|
||||
|
||||
return None
|
||||
|
||||
response = polling2.poll(
|
||||
lambda: self.session.post(
|
||||
'{}/getTaskResult'.format(self.host),
|
||||
json={
|
||||
'clientKey': self.clientKey,
|
||||
'taskId': taskID
|
||||
},
|
||||
timeout=30
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=5,
|
||||
timeout=180
|
||||
)
|
||||
|
||||
if response:
|
||||
return response.json()['solution']['gRecaptchaResponse']
|
||||
else:
|
||||
raise CaptchaTimeout(
|
||||
"CapMonster: Error failed to solve Captcha."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestSolve(self, captchaType, url, siteKey):
|
||||
def _checkRequest(response):
|
||||
self.checkErrorStatus(response)
|
||||
|
||||
if response.ok and response.json()['taskId']:
|
||||
return True
|
||||
|
||||
return None
|
||||
|
||||
data = {
|
||||
'clientKey': self.clientKey,
|
||||
'task': {
|
||||
'websiteURL': url,
|
||||
'websiteKey': siteKey,
|
||||
'softId': 37,
|
||||
'type': 'NoCaptchaTask' if captchaType == 'reCaptcha' else 'HCaptchaTask'
|
||||
}
|
||||
}
|
||||
|
||||
if self.proxy:
|
||||
data['task'].update(self.proxy)
|
||||
else:
|
||||
data['task']['type'] = f"{data['task']['type']}Proxyless"
|
||||
|
||||
response = polling2.poll(
|
||||
lambda: self.session.post(
|
||||
'{}/createTask'.format(self.host),
|
||||
json=data,
|
||||
allow_redirects=False,
|
||||
timeout=30
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=5,
|
||||
timeout=180
|
||||
)
|
||||
|
||||
if response:
|
||||
return response.json()['taskId']
|
||||
else:
|
||||
raise CaptchaBadJobID(
|
||||
'CapMonster: Error no task id was returned.'
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, captchaParams):
|
||||
taskID = None
|
||||
|
||||
if not captchaParams.get('clientKey'):
|
||||
raise CaptchaParameter(
|
||||
"CapMonster: Missing clientKey parameter."
|
||||
)
|
||||
|
||||
self.clientKey = captchaParams.get('clientKey')
|
||||
|
||||
if captchaParams.get('proxy') and not captchaParams.get('no_proxy'):
|
||||
hostParsed = urlparse(captchaParams.get('proxy', {}).get('https'))
|
||||
|
||||
if not hostParsed.scheme:
|
||||
raise CaptchaParameter('Cannot parse proxy correctly, bad scheme')
|
||||
|
||||
if not hostParsed.netloc:
|
||||
raise CaptchaParameter('Cannot parse proxy correctly, bad netloc')
|
||||
|
||||
ports = {
|
||||
'http': 80,
|
||||
'https': 443
|
||||
}
|
||||
|
||||
self.proxy = {
|
||||
'proxyType': hostParsed.scheme,
|
||||
'proxyAddress': hostParsed.hostname,
|
||||
'proxyPort': hostParsed.port if hostParsed.port else ports[self.proxy['proxyType']],
|
||||
'proxyLogin': hostParsed.username,
|
||||
'proxyPassword': hostParsed.password,
|
||||
}
|
||||
else:
|
||||
self.proxy = None
|
||||
|
||||
try:
|
||||
taskID = self.requestSolve(captchaType, url, siteKey)
|
||||
return self.requestJob(taskID)
|
||||
except polling2.TimeoutException:
|
||||
try:
|
||||
if taskID:
|
||||
self.reportJob(taskID)
|
||||
except polling2.TimeoutException:
|
||||
raise CaptchaTimeout(
|
||||
"CapMonster: Captcha solve took to long and also failed "
|
||||
f"reporting the task with task id {taskID}."
|
||||
)
|
||||
|
||||
raise CaptchaTimeout(
|
||||
"CapMonster: Captcha solve took to long to execute "
|
||||
f"task id {taskID}, aborting."
|
||||
)
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
captchaSolver()
|
||||
268
lib/cloudscraper/captcha/deathbycaptcha.py
Normal file
268
lib/cloudscraper/captcha/deathbycaptcha.py
Normal file
@@ -0,0 +1,268 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import json
|
||||
import requests
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse
|
||||
|
||||
try:
|
||||
import polling2
|
||||
except ImportError:
|
||||
raise ImportError("Please install the python module 'polling2' via pip")
|
||||
|
||||
from ..exceptions import (
|
||||
CaptchaServiceUnavailable,
|
||||
CaptchaTimeout,
|
||||
CaptchaParameter,
|
||||
CaptchaBadJobID,
|
||||
CaptchaReportError
|
||||
)
|
||||
|
||||
from . import Captcha
|
||||
|
||||
|
||||
class captchaSolver(Captcha):
|
||||
|
||||
def __init__(self):
|
||||
super(captchaSolver, self).__init__('deathbycaptcha')
|
||||
self.host = 'http://api.dbcapi.me/api'
|
||||
self.session = requests.Session()
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@staticmethod
|
||||
def checkErrorStatus(response):
|
||||
errors = dict(
|
||||
[
|
||||
(400, "DeathByCaptcha: 400 Bad Request"),
|
||||
(403, "DeathByCaptcha: 403 Forbidden - Invalid credentails or insufficient credits."),
|
||||
# (500, "DeathByCaptcha: 500 Internal Server Error."),
|
||||
(503, "DeathByCaptcha: 503 Service Temporarily Unavailable.")
|
||||
]
|
||||
)
|
||||
|
||||
if response.status_code in errors:
|
||||
raise CaptchaServiceUnavailable(errors.get(response.status_code))
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def login(self, username, password):
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
def _checkRequest(response):
|
||||
if response.ok:
|
||||
if response.json().get('is_banned'):
|
||||
raise CaptchaServiceUnavailable('DeathByCaptcha: Your account is banned.')
|
||||
|
||||
if response.json().get('balanace') == 0:
|
||||
raise CaptchaServiceUnavailable('DeathByCaptcha: insufficient credits.')
|
||||
|
||||
return response
|
||||
|
||||
self.checkErrorStatus(response)
|
||||
|
||||
return None
|
||||
|
||||
response = polling2.poll(
|
||||
lambda: self.session.post(
|
||||
'{}/user'.format(self.host),
|
||||
headers={'Accept': 'application/json'},
|
||||
data={
|
||||
'username': self.username,
|
||||
'password': self.password
|
||||
}
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=10,
|
||||
timeout=120
|
||||
)
|
||||
|
||||
self.debugRequest(response)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def reportJob(self, jobID):
|
||||
if not jobID:
|
||||
raise CaptchaBadJobID(
|
||||
"DeathByCaptcha: Error bad job id to report failed reCaptcha."
|
||||
)
|
||||
|
||||
def _checkRequest(response):
|
||||
if response.status_code == 200:
|
||||
return response
|
||||
|
||||
self.checkErrorStatus(response)
|
||||
|
||||
return None
|
||||
|
||||
response = polling2.poll(
|
||||
lambda: self.session.post(
|
||||
'{}/captcha/{}/report'.format(self.host, jobID),
|
||||
headers={'Accept': 'application/json'},
|
||||
data={
|
||||
'username': self.username,
|
||||
'password': self.password
|
||||
}
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=10,
|
||||
timeout=180
|
||||
)
|
||||
|
||||
if response:
|
||||
return True
|
||||
else:
|
||||
raise CaptchaReportError(
|
||||
"DeathByCaptcha: Error report failed reCaptcha."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestJob(self, jobID):
|
||||
if not jobID:
|
||||
raise CaptchaBadJobID(
|
||||
"DeathByCaptcha: Error bad job id to request reCaptcha."
|
||||
)
|
||||
|
||||
def _checkRequest(response):
|
||||
if response.ok and response.json().get('text'):
|
||||
return response
|
||||
|
||||
self.checkErrorStatus(response)
|
||||
|
||||
return None
|
||||
|
||||
response = polling2.poll(
|
||||
lambda: self.session.get(
|
||||
'{}/captcha/{}'.format(self.host, jobID),
|
||||
headers={'Accept': 'application/json'}
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=10,
|
||||
timeout=180
|
||||
)
|
||||
|
||||
if response:
|
||||
return response.json().get('text')
|
||||
else:
|
||||
raise CaptchaTimeout(
|
||||
"DeathByCaptcha: Error failed to solve reCaptcha."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def requestSolve(self, captchaType, url, siteKey):
|
||||
def _checkRequest(response):
|
||||
if response.ok and response.json().get("is_correct") and response.json().get('captcha'):
|
||||
return response
|
||||
|
||||
self.checkErrorStatus(response)
|
||||
|
||||
return None
|
||||
|
||||
data = {
|
||||
'username': self.username,
|
||||
'password': self.password,
|
||||
}
|
||||
|
||||
if captchaType == 'reCaptcha':
|
||||
jPayload = {
|
||||
'googlekey': siteKey,
|
||||
'pageurl': url
|
||||
}
|
||||
|
||||
if self.proxy:
|
||||
jPayload.update({
|
||||
'proxy': self.proxy,
|
||||
'proxytype': self.proxyType
|
||||
})
|
||||
|
||||
data.update({
|
||||
'type': '4',
|
||||
'token_params': json.dumps(jPayload)
|
||||
})
|
||||
else:
|
||||
jPayload = {
|
||||
'sitekey': siteKey,
|
||||
'pageurl': url
|
||||
}
|
||||
|
||||
if self.proxy:
|
||||
jPayload.update({
|
||||
'proxy': self.proxy,
|
||||
'proxytype': self.proxyType
|
||||
})
|
||||
|
||||
data.update({
|
||||
'type': '7',
|
||||
'hcaptcha_params': json.dumps(jPayload)
|
||||
})
|
||||
|
||||
response = polling2.poll(
|
||||
lambda: self.session.post(
|
||||
'{}/captcha'.format(self.host),
|
||||
headers={'Accept': 'application/json'},
|
||||
data=data,
|
||||
allow_redirects=False
|
||||
),
|
||||
check_success=_checkRequest,
|
||||
step=10,
|
||||
timeout=180
|
||||
)
|
||||
|
||||
if response:
|
||||
return response.json().get('captcha')
|
||||
else:
|
||||
raise CaptchaBadJobID(
|
||||
'DeathByCaptcha: Error no job id was returned.'
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def getCaptchaAnswer(self, captchaType, url, siteKey, captchaParams):
|
||||
jobID = None
|
||||
|
||||
for param in ['username', 'password']:
|
||||
if not captchaParams.get(param):
|
||||
raise CaptchaParameter(
|
||||
f"DeathByCaptcha: Missing '{param}' parameter."
|
||||
)
|
||||
setattr(self, param, captchaParams.get(param))
|
||||
|
||||
if captchaParams.get('proxy') and not captchaParams.get('no_proxy'):
|
||||
hostParsed = urlparse(captchaParams.get('proxy', {}).get('https'))
|
||||
|
||||
if not hostParsed.scheme:
|
||||
raise CaptchaParameter('Cannot parse proxy correctly, bad scheme')
|
||||
|
||||
if not hostParsed.netloc:
|
||||
raise CaptchaParameter('Cannot parse proxy correctly, bad netloc')
|
||||
|
||||
self.proxyType = hostParsed.scheme.upper()
|
||||
self.proxy = captchaParams.get('proxy', {}).get('https')
|
||||
else:
|
||||
self.proxy = None
|
||||
|
||||
try:
|
||||
jobID = self.requestSolve(captchaType, url, siteKey)
|
||||
return self.requestJob(jobID)
|
||||
except polling2.TimeoutException:
|
||||
try:
|
||||
if jobID:
|
||||
self.reportJob(jobID)
|
||||
except polling2.TimeoutException:
|
||||
raise CaptchaTimeout(
|
||||
f"DeathByCaptcha: Captcha solve took to long and also failed reporting the job id {jobID}."
|
||||
)
|
||||
|
||||
raise CaptchaTimeout(
|
||||
f"DeathByCaptcha: Captcha solve took to long to execute job id {jobID}, aborting."
|
||||
)
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
captchaSolver()
|
||||
111
lib/cloudscraper/exceptions.py
Normal file
111
lib/cloudscraper/exceptions.py
Normal file
@@ -0,0 +1,111 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
"""
|
||||
cloudscraper.exceptions
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
This module contains the set of cloudscraper exceptions.
|
||||
"""
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class CloudflareException(Exception):
|
||||
"""
|
||||
Base exception class for cloudscraper for Cloudflare
|
||||
"""
|
||||
|
||||
|
||||
class CloudflareLoopProtection(CloudflareException):
|
||||
"""
|
||||
Raise an exception for recursive depth protection
|
||||
"""
|
||||
|
||||
|
||||
class CloudflareCode1020(CloudflareException):
|
||||
"""
|
||||
Raise an exception for Cloudflare code 1020 block
|
||||
"""
|
||||
|
||||
|
||||
class CloudflareIUAMError(CloudflareException):
|
||||
"""
|
||||
Raise an error for problem extracting IUAM paramters
|
||||
from Cloudflare payload
|
||||
"""
|
||||
|
||||
|
||||
class CloudflareChallengeError(CloudflareException):
|
||||
"""
|
||||
Raise an error when detected new Cloudflare challenge
|
||||
"""
|
||||
|
||||
|
||||
class CloudflareSolveError(CloudflareException):
|
||||
"""
|
||||
Raise an error when issue with solving Cloudflare challenge
|
||||
"""
|
||||
|
||||
|
||||
class CloudflareCaptchaError(CloudflareException):
|
||||
"""
|
||||
Raise an error for problem extracting Captcha paramters
|
||||
from Cloudflare payload
|
||||
"""
|
||||
|
||||
|
||||
class CloudflareCaptchaProvider(CloudflareException):
|
||||
"""
|
||||
Raise an exception for no Captcha provider loaded for Cloudflare.
|
||||
"""
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class CaptchaException(Exception):
|
||||
"""
|
||||
Base exception class for cloudscraper captcha Providers
|
||||
"""
|
||||
|
||||
|
||||
class CaptchaServiceUnavailable(CaptchaException):
|
||||
"""
|
||||
Raise an exception for external services that cannot be reached
|
||||
"""
|
||||
|
||||
|
||||
class CaptchaAPIError(CaptchaException):
|
||||
"""
|
||||
Raise an error for error from API response.
|
||||
"""
|
||||
|
||||
|
||||
class CaptchaAccountError(CaptchaException):
|
||||
"""
|
||||
Raise an error for captcha provider account problem.
|
||||
"""
|
||||
|
||||
|
||||
class CaptchaTimeout(CaptchaException):
|
||||
"""
|
||||
Raise an exception for captcha provider taking too long.
|
||||
"""
|
||||
|
||||
|
||||
class CaptchaParameter(CaptchaException):
|
||||
"""
|
||||
Raise an exception for bad or missing Parameter.
|
||||
"""
|
||||
|
||||
|
||||
class CaptchaBadJobID(CaptchaException):
|
||||
"""
|
||||
Raise an exception for invalid job id.
|
||||
"""
|
||||
|
||||
|
||||
class CaptchaReportError(CaptchaException):
|
||||
"""
|
||||
Raise an error for captcha provider unable to report bad solve.
|
||||
"""
|
||||
72
lib/cloudscraper/help.py
Normal file
72
lib/cloudscraper/help.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import json
|
||||
import platform
|
||||
import requests
|
||||
import ssl
|
||||
import sys
|
||||
import urllib3
|
||||
|
||||
from collections import OrderedDict
|
||||
from . import __version__ as cloudscraper_version
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
def getPossibleCiphers():
|
||||
try:
|
||||
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
|
||||
context.set_ciphers('ALL')
|
||||
return sorted([cipher['name'] for cipher in context.get_ciphers()])
|
||||
except AttributeError:
|
||||
return 'get_ciphers() is unsupported'
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
def _pythonVersion():
|
||||
interpreter = platform.python_implementation()
|
||||
interpreter_version = platform.python_version()
|
||||
|
||||
if interpreter == 'PyPy':
|
||||
interpreter_version = \
|
||||
'{}.{}.{}'.format(sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro)
|
||||
if sys.pypy_version_info.releaselevel != 'final':
|
||||
interpreter_version = '{}{}'.format(interpreter_version, sys.pypy_version_info.releaselevel)
|
||||
return {
|
||||
'name': interpreter,
|
||||
'version': interpreter_version
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
def systemInfo():
|
||||
try:
|
||||
platform_info = {
|
||||
'system': platform.system(),
|
||||
'release': platform.release(),
|
||||
}
|
||||
except IOError:
|
||||
platform_info = {
|
||||
'system': 'Unknown',
|
||||
'release': 'Unknown',
|
||||
}
|
||||
|
||||
return OrderedDict([
|
||||
('platform', platform_info),
|
||||
('interpreter', _pythonVersion()),
|
||||
('cloudscraper', cloudscraper_version),
|
||||
('requests', requests.__version__),
|
||||
('urllib3', urllib3.__version__),
|
||||
('OpenSSL', OrderedDict(
|
||||
[
|
||||
('version', ssl.OPENSSL_VERSION),
|
||||
('ciphers', getPossibleCiphers())
|
||||
]
|
||||
))
|
||||
])
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(json.dumps(systemInfo(), indent=4))
|
||||
56
lib/cloudscraper/interpreters/__init__.py
Normal file
56
lib/cloudscraper/interpreters/__init__.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import sys
|
||||
import logging
|
||||
import abc
|
||||
|
||||
from ..exceptions import CloudflareSolveError
|
||||
|
||||
if sys.version_info >= (3, 4):
|
||||
ABC = abc.ABC # noqa
|
||||
else:
|
||||
ABC = abc.ABCMeta('ABC', (), {})
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
interpreters = {}
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class JavaScriptInterpreter(ABC):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@abc.abstractmethod
|
||||
def __init__(self, name):
|
||||
interpreters[name] = self
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@classmethod
|
||||
def dynamicImport(cls, name):
|
||||
if name not in interpreters:
|
||||
try:
|
||||
__import__('{}.{}'.format(cls.__module__, name))
|
||||
if not isinstance(interpreters.get(name), JavaScriptInterpreter):
|
||||
raise ImportError('The interpreter was not initialized.')
|
||||
except ImportError:
|
||||
logging.error('Unable to load {} interpreter'.format(name))
|
||||
raise
|
||||
|
||||
return interpreters[name]
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@abc.abstractmethod
|
||||
def eval(self, jsEnv, js):
|
||||
pass
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def solveChallenge(self, body, domain):
|
||||
try:
|
||||
return '{0:.10f}'.format(float(self.eval(body, domain)))
|
||||
except Exception:
|
||||
raise CloudflareSolveError(
|
||||
'Error trying to solve Cloudflare IUAM Javascript, they may have changed their technique.'
|
||||
)
|
||||
103
lib/cloudscraper/interpreters/chakracore.py
Normal file
103
lib/cloudscraper/interpreters/chakracore.py
Normal file
@@ -0,0 +1,103 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import sys
|
||||
import ctypes.util
|
||||
|
||||
from ctypes import c_void_p, c_size_t, byref, create_string_buffer, CDLL
|
||||
|
||||
from . import JavaScriptInterpreter
|
||||
from .encapsulated import template
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class ChallengeInterpreter(JavaScriptInterpreter):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def __init__(self):
|
||||
super(ChallengeInterpreter, self).__init__('chakracore')
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def eval(self, body, domain):
|
||||
chakraCoreLibrary = None
|
||||
|
||||
# check current working directory.
|
||||
for _libraryFile in ['libChakraCore.so', 'libChakraCore.dylib', 'ChakraCore.dll']:
|
||||
if os.path.isfile(os.path.join(os.getcwd(), _libraryFile)):
|
||||
chakraCoreLibrary = os.path.join(os.getcwd(), _libraryFile)
|
||||
continue
|
||||
|
||||
if not chakraCoreLibrary:
|
||||
chakraCoreLibrary = ctypes.util.find_library('ChakraCore')
|
||||
|
||||
if not chakraCoreLibrary:
|
||||
sys.tracebacklimit = 0
|
||||
raise RuntimeError(
|
||||
'ChakraCore library not found in current path or any of your system library paths, '
|
||||
'please download from https://www.github.com/VeNoMouS/cloudscraper/tree/ChakraCore/, '
|
||||
'or https://github.com/Microsoft/ChakraCore/'
|
||||
)
|
||||
|
||||
try:
|
||||
chakraCore = CDLL(chakraCoreLibrary)
|
||||
except OSError:
|
||||
sys.tracebacklimit = 0
|
||||
raise RuntimeError('There was an error loading the ChakraCore library {}'.format(chakraCoreLibrary))
|
||||
|
||||
if sys.platform != 'win32':
|
||||
chakraCore.DllMain(0, 1, 0)
|
||||
chakraCore.DllMain(0, 2, 0)
|
||||
|
||||
script = create_string_buffer(template(body, domain).encode('utf-16'))
|
||||
|
||||
runtime = c_void_p()
|
||||
chakraCore.JsCreateRuntime(0, 0, byref(runtime))
|
||||
|
||||
context = c_void_p()
|
||||
chakraCore.JsCreateContext(runtime, byref(context))
|
||||
chakraCore.JsSetCurrentContext(context)
|
||||
|
||||
fname = c_void_p()
|
||||
chakraCore.JsCreateString(
|
||||
'iuam-challenge.js',
|
||||
len('iuam-challenge.js'),
|
||||
byref(fname)
|
||||
)
|
||||
|
||||
scriptSource = c_void_p()
|
||||
chakraCore.JsCreateExternalArrayBuffer(
|
||||
script,
|
||||
len(script),
|
||||
0,
|
||||
0,
|
||||
byref(scriptSource)
|
||||
)
|
||||
|
||||
jsResult = c_void_p()
|
||||
chakraCore.JsRun(scriptSource, 0, fname, 0x02, byref(jsResult))
|
||||
|
||||
resultJSString = c_void_p()
|
||||
chakraCore.JsConvertValueToString(jsResult, byref(resultJSString))
|
||||
|
||||
stringLength = c_size_t()
|
||||
chakraCore.JsCopyString(resultJSString, 0, 0, byref(stringLength))
|
||||
|
||||
resultSTR = create_string_buffer(stringLength.value + 1)
|
||||
chakraCore.JsCopyString(
|
||||
resultJSString,
|
||||
byref(resultSTR),
|
||||
stringLength.value + 1,
|
||||
0
|
||||
)
|
||||
|
||||
chakraCore.JsDisposeRuntime(runtime)
|
||||
|
||||
return resultSTR.value
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
ChallengeInterpreter()
|
||||
62
lib/cloudscraper/interpreters/encapsulated.py
Normal file
62
lib/cloudscraper/interpreters/encapsulated.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import logging
|
||||
import re
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
def template(body, domain):
|
||||
BUG_REPORT = 'Cloudflare may have changed their technique, or there may be a bug in the script.'
|
||||
|
||||
try:
|
||||
js = re.search(
|
||||
r'setTimeout\(function\(\){\s+(.*?a\.value\s*=\s*\S+toFixed\(10\);)',
|
||||
body,
|
||||
re.M | re.S
|
||||
).group(1)
|
||||
except Exception:
|
||||
raise ValueError('Unable to identify Cloudflare IUAM Javascript on website. {}'.format(BUG_REPORT))
|
||||
|
||||
jsEnv = '''String.prototype.italics=function(str) {{return "<i>" + this + "</i>";}};
|
||||
var subVars= {{{subVars}}};
|
||||
var document = {{
|
||||
createElement: function () {{
|
||||
return {{ firstChild: {{ href: "https://{domain}/" }} }}
|
||||
}},
|
||||
getElementById: function (str) {{
|
||||
return {{"innerHTML": subVars[str]}};
|
||||
}}
|
||||
}};
|
||||
'''
|
||||
|
||||
try:
|
||||
js = js.replace(
|
||||
r"(setInterval(function(){}, 100),t.match(/https?:\/\//)[0]);",
|
||||
r"t.match(/https?:\/\//)[0];"
|
||||
)
|
||||
|
||||
k = re.search(r" k\s*=\s*'(?P<k>\S+)';", body).group('k')
|
||||
r = re.compile(r'<div id="{}(?P<id>\d+)">\s*(?P<jsfuck>[^<>]*)</div>'.format(k))
|
||||
|
||||
subVars = ''
|
||||
for m in r.finditer(body):
|
||||
subVars = '{}\n\t\t{}{}: {},\n'.format(subVars, k, m.group('id'), m.group('jsfuck'))
|
||||
subVars = subVars[:-2]
|
||||
|
||||
except: # noqa
|
||||
logging.error('Error extracting Cloudflare IUAM Javascript. {}'.format(BUG_REPORT))
|
||||
raise
|
||||
|
||||
return '{}{}'.format(
|
||||
re.sub(
|
||||
r'\s{2,}',
|
||||
' ',
|
||||
jsEnv.format(
|
||||
domain=domain,
|
||||
subVars=subVars
|
||||
),
|
||||
re.MULTILINE | re.DOTALL
|
||||
),
|
||||
js
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
44
lib/cloudscraper/interpreters/js2py.py
Normal file
44
lib/cloudscraper/interpreters/js2py.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import js2py
|
||||
import logging
|
||||
import base64
|
||||
|
||||
from . import JavaScriptInterpreter
|
||||
|
||||
from .encapsulated import template
|
||||
from .jsunfuck import jsunfuck
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class ChallengeInterpreter(JavaScriptInterpreter):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def __init__(self):
|
||||
super(ChallengeInterpreter, self).__init__('js2py')
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def eval(self, body, domain):
|
||||
|
||||
jsPayload = template(body, domain)
|
||||
|
||||
if js2py.eval_js('(+(+!+[]+[+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[!+[]+!+[]]+[+[]])+[])[+!+[]]') == '1':
|
||||
logging.warning('WARNING - Please upgrade your js2py https://github.com/PiotrDabkowski/Js2Py, applying work around for the meantime.')
|
||||
jsPayload = jsunfuck(jsPayload)
|
||||
|
||||
def atob(s):
|
||||
return base64.b64decode('{}'.format(s)).decode('utf-8')
|
||||
|
||||
js2py.disable_pyimport()
|
||||
context = js2py.EvalJs({'atob': atob})
|
||||
result = context.eval(jsPayload)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
ChallengeInterpreter()
|
||||
97
lib/cloudscraper/interpreters/jsunfuck.py
Normal file
97
lib/cloudscraper/interpreters/jsunfuck.py
Normal file
@@ -0,0 +1,97 @@
|
||||
MAPPING = {
|
||||
'a': '(false+"")[1]',
|
||||
'b': '([]["entries"]()+"")[2]',
|
||||
'c': '([]["fill"]+"")[3]',
|
||||
'd': '(undefined+"")[2]',
|
||||
'e': '(true+"")[3]',
|
||||
'f': '(false+"")[0]',
|
||||
'g': '(false+[0]+String)[20]',
|
||||
'h': '(+(101))["to"+String["name"]](21)[1]',
|
||||
'i': '([false]+undefined)[10]',
|
||||
'j': '([]["entries"]()+"")[3]',
|
||||
'k': '(+(20))["to"+String["name"]](21)',
|
||||
'l': '(false+"")[2]',
|
||||
'm': '(Number+"")[11]',
|
||||
'n': '(undefined+"")[1]',
|
||||
'o': '(true+[]["fill"])[10]',
|
||||
'p': '(+(211))["to"+String["name"]](31)[1]',
|
||||
'q': '(+(212))["to"+String["name"]](31)[1]',
|
||||
'r': '(true+"")[1]',
|
||||
's': '(false+"")[3]',
|
||||
't': '(true+"")[0]',
|
||||
'u': '(undefined+"")[0]',
|
||||
'v': '(+(31))["to"+String["name"]](32)',
|
||||
'w': '(+(32))["to"+String["name"]](33)',
|
||||
'x': '(+(101))["to"+String["name"]](34)[1]',
|
||||
'y': '(NaN+[Infinity])[10]',
|
||||
'z': '(+(35))["to"+String["name"]](36)',
|
||||
'A': '(+[]+Array)[10]',
|
||||
'B': '(+[]+Boolean)[10]',
|
||||
'C': 'Function("return escape")()(("")["italics"]())[2]',
|
||||
'D': 'Function("return escape")()([]["fill"])["slice"]("-1")',
|
||||
'E': '(RegExp+"")[12]',
|
||||
'F': '(+[]+Function)[10]',
|
||||
'G': '(false+Function("return Date")()())[30]',
|
||||
'I': '(Infinity+"")[0]',
|
||||
'M': '(true+Function("return Date")()())[30]',
|
||||
'N': '(NaN+"")[0]',
|
||||
'O': '(NaN+Function("return{}")())[11]',
|
||||
'R': '(+[]+RegExp)[10]',
|
||||
'S': '(+[]+String)[10]',
|
||||
'T': '(NaN+Function("return Date")()())[30]',
|
||||
'U': '(NaN+Function("return{}")()["to"+String["name"]]["call"]())[11]',
|
||||
' ': '(NaN+[]["fill"])[11]',
|
||||
'"': '("")["fontcolor"]()[12]',
|
||||
'%': 'Function("return escape")()([]["fill"])[21]',
|
||||
'&': '("")["link"](0+")[10]',
|
||||
'(': '(undefined+[]["fill"])[22]',
|
||||
')': '([0]+false+[]["fill"])[20]',
|
||||
'+': '(+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]])+[])[2]',
|
||||
',': '([]["slice"]["call"](false+"")+"")[1]',
|
||||
'-': '(+(.+[0000000001])+"")[2]',
|
||||
'.': '(+(+!+[]+[+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[!+[]+!+[]]+[+[]])+[])[+!+[]]',
|
||||
'/': '(false+[0])["italics"]()[10]',
|
||||
':': '(RegExp()+"")[3]',
|
||||
';': '("")["link"](")[14]',
|
||||
'<': '("")["italics"]()[0]',
|
||||
'=': '("")["fontcolor"]()[11]',
|
||||
'>': '("")["italics"]()[2]',
|
||||
'?': '(RegExp()+"")[2]',
|
||||
'[': '([]["entries"]()+"")[0]',
|
||||
']': '([]["entries"]()+"")[22]',
|
||||
'{': '(true+[]["fill"])[20]',
|
||||
'}': '([]["fill"]+"")["slice"]("-1")'
|
||||
}
|
||||
|
||||
SIMPLE = {
|
||||
'false': '![]',
|
||||
'true': '!![]',
|
||||
'undefined': '[][[]]',
|
||||
'NaN': '+[![]]',
|
||||
'Infinity': '+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]]+[+[]])' # +"1e1000"
|
||||
}
|
||||
|
||||
CONSTRUCTORS = {
|
||||
'Array': '[]',
|
||||
'Number': '(+[])',
|
||||
'String': '([]+[])',
|
||||
'Boolean': '(![])',
|
||||
'Function': '[]["fill"]',
|
||||
'RegExp': 'Function("return/"+false+"/")()'
|
||||
}
|
||||
|
||||
|
||||
def jsunfuck(jsfuckString):
|
||||
for key in sorted(MAPPING, key=lambda k: len(MAPPING[k]), reverse=True):
|
||||
if MAPPING.get(key) in jsfuckString:
|
||||
jsfuckString = jsfuckString.replace(MAPPING.get(key), '"{}"'.format(key))
|
||||
|
||||
for key in sorted(SIMPLE, key=lambda k: len(SIMPLE[k]), reverse=True):
|
||||
if SIMPLE.get(key) in jsfuckString:
|
||||
jsfuckString = jsfuckString.replace(SIMPLE.get(key), '{}'.format(key))
|
||||
|
||||
# for key in sorted(CONSTRUCTORS, key=lambda k: len(CONSTRUCTORS[k]), reverse=True):
|
||||
# if CONSTRUCTORS.get(key) in jsfuckString:
|
||||
# jsfuckString = jsfuckString.replace(CONSTRUCTORS.get(key), '{}'.format(key))
|
||||
|
||||
return jsfuckString
|
||||
233
lib/cloudscraper/interpreters/native.py
Normal file
233
lib/cloudscraper/interpreters/native.py
Normal file
@@ -0,0 +1,233 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import ast
|
||||
import re
|
||||
import operator as op
|
||||
import pyparsing
|
||||
|
||||
from ..exceptions import CloudflareSolveError
|
||||
from . import JavaScriptInterpreter
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
_OP_MAP = {
|
||||
ast.Add: op.add,
|
||||
ast.Sub: op.sub,
|
||||
ast.Mult: op.mul,
|
||||
ast.Div: op.truediv,
|
||||
ast.Invert: op.neg,
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class Calc(ast.NodeVisitor):
|
||||
|
||||
def visit_BinOp(self, node):
|
||||
return _OP_MAP[type(node.op)](self.visit(node.left), self.visit(node.right))
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def visit_Num(self, node):
|
||||
return node.n
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def visit_Expr(self, node):
|
||||
return self.visit(node.value)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
@classmethod
|
||||
def doMath(cls, expression):
|
||||
tree = ast.parse(expression)
|
||||
calc = cls()
|
||||
return calc.visit(tree.body[0])
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class Parentheses(object):
|
||||
|
||||
def fix(self, s):
|
||||
res = []
|
||||
self.visited = set([s])
|
||||
self.dfs(s, self.invalid(s), res)
|
||||
return res
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def dfs(self, s, n, res):
|
||||
if n == 0:
|
||||
res.append(s)
|
||||
return
|
||||
for i in range(len(s)):
|
||||
if s[i] in ['(', ')']:
|
||||
s_new = s[:i] + s[i + 1:]
|
||||
if s_new not in self.visited and self.invalid(s_new) < n:
|
||||
self.visited.add(s_new)
|
||||
self.dfs(s_new, self.invalid(s_new), res)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def invalid(self, s):
|
||||
plus = minus = 0
|
||||
memo = {"(": 1, ")": -1}
|
||||
for c in s:
|
||||
plus += memo.get(c, 0)
|
||||
minus += 1 if plus < 0 else 0
|
||||
plus = max(0, plus)
|
||||
return plus + minus
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class ChallengeInterpreter(JavaScriptInterpreter):
|
||||
|
||||
def __init__(self):
|
||||
super(ChallengeInterpreter, self).__init__('native')
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def eval(self, body, domain):
|
||||
|
||||
operators = {
|
||||
'+': op.add,
|
||||
'-': op.sub,
|
||||
'*': op.mul,
|
||||
'/': op.truediv
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def flatten(lists):
|
||||
return sum(map(flatten, lists), []) if isinstance(lists, list) else [lists]
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def jsfuckToNumber(jsFuck):
|
||||
# "Clean Up" JSFuck
|
||||
jsFuck = jsFuck.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0')
|
||||
jsFuck = jsFuck.lstrip('+').replace('(+', '(').replace(' ', '')
|
||||
jsFuck = Parentheses().fix(jsFuck)[0]
|
||||
|
||||
# Hackery Parser for Math
|
||||
stack = []
|
||||
bstack = []
|
||||
|
||||
for i in flatten(pyparsing.nestedExpr().parseString(jsFuck).asList()):
|
||||
if i == '+':
|
||||
stack.append(bstack)
|
||||
bstack = []
|
||||
continue
|
||||
bstack.append(i)
|
||||
stack.append(bstack)
|
||||
|
||||
return int(''.join([str(Calc.doMath(''.join(i))) for i in stack]))
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def divisorMath(payload, needle, domain):
|
||||
jsfuckMath = payload.split('/')
|
||||
if needle in jsfuckMath[1]:
|
||||
expression = re.findall(r"^(.*?)(.)\(function", jsfuckMath[1])[0]
|
||||
|
||||
expression_value = operators[expression[1]](
|
||||
float(jsfuckToNumber(expression[0])),
|
||||
float(ord(domain[jsfuckToNumber(jsfuckMath[1][
|
||||
jsfuckMath[1].find('"("+p+")")}') + len('"("+p+")")}'):-2
|
||||
])]))
|
||||
)
|
||||
else:
|
||||
expression_value = jsfuckToNumber(jsfuckMath[1])
|
||||
|
||||
expression_value = jsfuckToNumber(jsfuckMath[0]) / float(expression_value)
|
||||
|
||||
return expression_value
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def challengeSolve(body, domain):
|
||||
jschl_answer = 0
|
||||
|
||||
try:
|
||||
jsfuckChallenge = re.search(
|
||||
r"setTimeout\(function\(\){\s+var.*?f,\s*(?P<variable>\w+).*?:(?P<init>\S+)};"
|
||||
r".*?\('challenge-form'\);.*?;(?P<challenge>.*?a\.value)\s*=\s*\S+\.toFixed\(10\);",
|
||||
body,
|
||||
re.DOTALL | re.MULTILINE
|
||||
).groupdict()
|
||||
except AttributeError:
|
||||
raise CloudflareSolveError('There was an issue extracting "jsfuckChallenge" from the Cloudflare challenge.')
|
||||
|
||||
kJSFUCK = re.search(r'(;|)\s*k.=(?P<kJSFUCK>\S+);', jsfuckChallenge['challenge'], re.S | re.M)
|
||||
if kJSFUCK:
|
||||
try:
|
||||
kJSFUCK = jsfuckToNumber(kJSFUCK.group('kJSFUCK'))
|
||||
except IndexError:
|
||||
raise CloudflareSolveError('There was an issue extracting "kJSFUCK" from the Cloudflare challenge.')
|
||||
|
||||
try:
|
||||
kID = re.search(r"\s*k\s*=\s*'(?P<kID>\S+)';", body).group('kID')
|
||||
except IndexError:
|
||||
raise CloudflareSolveError('There was an issue extracting "kID" from the Cloudflare challenge.')
|
||||
|
||||
try:
|
||||
r = re.compile(r'<div id="{}(?P<id>\d+)">\s*(?P<jsfuck>[^<>]*)</div>'.format(kID))
|
||||
|
||||
kValues = {}
|
||||
for m in r.finditer(body):
|
||||
kValues[int(m.group('id'))] = m.group('jsfuck')
|
||||
|
||||
jsfuckChallenge['k'] = kValues[kJSFUCK]
|
||||
except (AttributeError, IndexError):
|
||||
raise CloudflareSolveError('There was an issue extracting "kValues" from the Cloudflare challenge.')
|
||||
|
||||
jsfuckChallenge['challenge'] = re.finditer(
|
||||
r'{}.*?([+\-*/])=(.*?);(?=a\.value|{})'.format(
|
||||
jsfuckChallenge['variable'],
|
||||
jsfuckChallenge['variable']
|
||||
),
|
||||
jsfuckChallenge['challenge']
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
if '/' in jsfuckChallenge['init']:
|
||||
val = jsfuckChallenge['init'].split('/')
|
||||
jschl_answer = jsfuckToNumber(val[0]) / float(jsfuckToNumber(val[1]))
|
||||
else:
|
||||
jschl_answer = jsfuckToNumber(jsfuckChallenge['init'])
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
for expressionMatch in jsfuckChallenge['challenge']:
|
||||
oper, expression = expressionMatch.groups()
|
||||
|
||||
if '/' in expression:
|
||||
expression_value = divisorMath(expression, 'function(p)', domain)
|
||||
else:
|
||||
if 'Element' in expression:
|
||||
expression_value = divisorMath(jsfuckChallenge['k'], '"("+p+")")}', domain)
|
||||
else:
|
||||
expression_value = jsfuckToNumber(expression)
|
||||
|
||||
jschl_answer = operators[oper](jschl_answer, expression_value)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
# if not jsfuckChallenge['k'] and '+ t.length' in body:
|
||||
# jschl_answer += len(domain)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
return '{0:.10f}'.format(jschl_answer)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
return challengeSolve(body, domain)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
ChallengeInterpreter()
|
||||
49
lib/cloudscraper/interpreters/nodejs.py
Normal file
49
lib/cloudscraper/interpreters/nodejs.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import base64
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from . import JavaScriptInterpreter
|
||||
from .encapsulated import template
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class ChallengeInterpreter(JavaScriptInterpreter):
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def __init__(self):
|
||||
super(ChallengeInterpreter, self).__init__('nodejs')
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def eval(self, body, domain):
|
||||
try:
|
||||
js = 'var atob = function(str) {return Buffer.from(str, "base64").toString("binary");};' \
|
||||
'var challenge = atob("%s");' \
|
||||
'var context = {atob: atob};' \
|
||||
'var options = {filename: "iuam-challenge.js", timeout: 4000};' \
|
||||
'var answer = require("vm").runInNewContext(challenge, context, options);' \
|
||||
'process.stdout.write(String(answer));' \
|
||||
% base64.b64encode(template(body, domain).encode('UTF-8')).decode('ascii')
|
||||
|
||||
return subprocess.check_output(['node', '-e', js])
|
||||
|
||||
except OSError as e:
|
||||
if e.errno == 2:
|
||||
raise EnvironmentError(
|
||||
'Missing Node.js runtime. Node is required and must be in the PATH (check with `node -v`).\n\n'
|
||||
'Your Node binary may be called `nodejs` rather than `node`, '
|
||||
'in which case you may need to run `apt-get install nodejs-legacy` on some Debian-based systems.\n\n'
|
||||
'(Please read the cloudscraper README\'s Dependencies section: '
|
||||
'https://github.com/VeNoMouS/cloudscraper#dependencies.)'
|
||||
)
|
||||
raise
|
||||
except Exception:
|
||||
sys.tracebacklimit = 0
|
||||
raise RuntimeError('Error executing Cloudflare IUAM Javascript in nodejs')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
ChallengeInterpreter()
|
||||
33
lib/cloudscraper/interpreters/v8.py
Normal file
33
lib/cloudscraper/interpreters/v8.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
try:
|
||||
import v8eval
|
||||
except ImportError:
|
||||
sys.tracebacklimit = 0
|
||||
raise RuntimeError('Please install the python module v8eval either via pip or download it from https://github.com/sony/v8eval')
|
||||
|
||||
from . import JavaScriptInterpreter
|
||||
from .encapsulated import template
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class ChallengeInterpreter(JavaScriptInterpreter):
|
||||
|
||||
def __init__(self):
|
||||
super(ChallengeInterpreter, self).__init__('v8')
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def eval(self, body, domain):
|
||||
try:
|
||||
return v8eval.V8().eval(template(body, domain))
|
||||
except (TypeError, v8eval.V8Error):
|
||||
RuntimeError('We encountered an error running the V8 Engine.')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
ChallengeInterpreter()
|
||||
124
lib/cloudscraper/user_agent/__init__.py
Normal file
124
lib/cloudscraper/user_agent/__init__.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import sys
|
||||
import ssl
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
|
||||
class User_Agent():
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.headers = None
|
||||
self.cipherSuite = []
|
||||
self.loadUserAgent(*args, **kwargs)
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def filterAgents(self, user_agents):
|
||||
filtered = {}
|
||||
|
||||
if self.mobile:
|
||||
if self.platform in user_agents['mobile'] and user_agents['mobile'][self.platform]:
|
||||
filtered.update(user_agents['mobile'][self.platform])
|
||||
|
||||
if self.desktop:
|
||||
if self.platform in user_agents['desktop'] and user_agents['desktop'][self.platform]:
|
||||
filtered.update(user_agents['desktop'][self.platform])
|
||||
|
||||
return filtered
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def tryMatchCustom(self, user_agents):
|
||||
for device_type in user_agents['user_agents']:
|
||||
for platform in user_agents['user_agents'][device_type]:
|
||||
for browser in user_agents['user_agents'][device_type][platform]:
|
||||
if re.search(re.escape(self.custom), ' '.join(user_agents['user_agents'][device_type][platform][browser])):
|
||||
self.headers = user_agents['headers'][browser]
|
||||
self.headers['User-Agent'] = self.custom
|
||||
self.cipherSuite = user_agents['cipherSuite'][browser]
|
||||
return True
|
||||
return False
|
||||
|
||||
# ------------------------------------------------------------------------------- #
|
||||
|
||||
def loadUserAgent(self, *args, **kwargs):
|
||||
self.browser = kwargs.pop('browser', None)
|
||||
|
||||
self.platforms = ['linux', 'windows', 'darwin', 'android', 'ios']
|
||||
self.browsers = ['chrome', 'firefox']
|
||||
|
||||
if isinstance(self.browser, dict):
|
||||
self.custom = self.browser.get('custom', None)
|
||||
self.platform = self.browser.get('platform', None)
|
||||
self.desktop = self.browser.get('desktop', True)
|
||||
self.mobile = self.browser.get('mobile', True)
|
||||
self.browser = self.browser.get('browser', None)
|
||||
else:
|
||||
self.custom = kwargs.pop('custom', None)
|
||||
self.platform = kwargs.pop('platform', None)
|
||||
self.desktop = kwargs.pop('desktop', True)
|
||||
self.mobile = kwargs.pop('mobile', True)
|
||||
|
||||
if not self.desktop and not self.mobile:
|
||||
sys.tracebacklimit = 0
|
||||
raise RuntimeError("Sorry you can't have mobile and desktop disabled at the same time.")
|
||||
|
||||
with open(os.path.join(os.path.dirname(__file__), 'browsers.json'), 'r') as fp:
|
||||
user_agents = json.load(
|
||||
fp,
|
||||
object_pairs_hook=OrderedDict
|
||||
)
|
||||
|
||||
if self.custom:
|
||||
if not self.tryMatchCustom(user_agents):
|
||||
self.cipherSuite = [
|
||||
ssl._DEFAULT_CIPHERS,
|
||||
'!AES128-SHA',
|
||||
'!ECDHE-RSA-AES256-SHA',
|
||||
]
|
||||
self.headers = OrderedDict([
|
||||
('User-Agent', self.custom),
|
||||
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'),
|
||||
('Accept-Language', 'en-US,en;q=0.9'),
|
||||
('Accept-Encoding', 'gzip, deflate, br')
|
||||
])
|
||||
else:
|
||||
if self.browser and self.browser not in self.browsers:
|
||||
sys.tracebacklimit = 0
|
||||
raise RuntimeError('Sorry "{}" browser is not valid, valid browsers are [{}].'.format(self.browser), ", ".join(self.browsers))
|
||||
|
||||
if not self.platform:
|
||||
self.platform = random.SystemRandom().choice(self.platforms)
|
||||
|
||||
if self.platform not in self.platforms:
|
||||
sys.tracebacklimit = 0
|
||||
raise RuntimeError('Sorry the platform "{}" is not valid, valid platforms are [{)}]'.format(self.platform, ", ".join(self.platforms)))
|
||||
|
||||
filteredAgents = self.filterAgents(user_agents['user_agents'])
|
||||
|
||||
if not self.browser:
|
||||
# has to be at least one in there...
|
||||
while not filteredAgents.get(self.browser):
|
||||
self.browser = random.SystemRandom().choice(list(filteredAgents.keys()))
|
||||
|
||||
if not filteredAgents[self.browser]:
|
||||
sys.tracebacklimit = 0
|
||||
raise RuntimeError('Sorry "{}" browser was not found with a platform of "{}".'.format(self.browser, self.platform))
|
||||
|
||||
self.cipherSuite = user_agents['cipherSuite'][self.browser]
|
||||
self.headers = user_agents['headers'][self.browser]
|
||||
|
||||
self.headers['User-Agent'] = random.SystemRandom().choice(filteredAgents[self.browser])
|
||||
|
||||
if not kwargs.get('allow_brotli', False) and 'br' in self.headers['Accept-Encoding']:
|
||||
self.headers['Accept-Encoding'] = ','.join([
|
||||
encoding for encoding in self.headers['Accept-Encoding'].split(',') if encoding.strip() != 'br'
|
||||
]).strip()
|
||||
7913
lib/cloudscraper/user_agent/browsers.json
Normal file
7913
lib/cloudscraper/user_agent/browsers.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,7 @@ if sys.version_info[0] >= 3:
|
||||
from urllib.request import Request, urlopen
|
||||
else:
|
||||
from urllib2 import Request, urlopen
|
||||
str = unicode
|
||||
|
||||
|
||||
class Cursor(object):
|
||||
@@ -62,6 +63,7 @@ class Cursor(object):
|
||||
if len(self._file.cursors) == 0: self._file.cursor = False
|
||||
|
||||
def decode(self, data):
|
||||
if type(data) == str: data = data.encode()
|
||||
return self.decryptor.decrypt(data)
|
||||
|
||||
def prepare_decoder(self,offset):
|
||||
|
||||
@@ -36,7 +36,7 @@ class UnshortenIt(object):
|
||||
_anonymz_regex = r'anonymz\.com'
|
||||
_shrink_service_regex = r'shrink-service\.it'
|
||||
_rapidcrypt_regex = r'rapidcrypt\.net'
|
||||
_vcrypt_regex = r'vcrypt\.net|vcrypt\.pw'
|
||||
# _vcrypt_regex = r'vcrypt\.net|vcrypt\.pw'
|
||||
_linkup_regex = r'linkup\.pro|buckler.link'
|
||||
_linkhub_regex = r'linkhub\.icu'
|
||||
_swzz_regex = r'swzz\.xyz'
|
||||
@@ -48,7 +48,7 @@ class UnshortenIt(object):
|
||||
_simple_redirect = r'streamcrypt\.net/[^/]+'
|
||||
|
||||
listRegex = [_adfly_regex, _linkbucks_regex, _adfocus_regex, _lnxlu_regex, _shst_regex, _hrefli_regex, _anonymz_regex,
|
||||
_shrink_service_regex, _rapidcrypt_regex, _simple_iframe_regex, _vcrypt_regex, _linkup_regex, _linkhub_regex,
|
||||
_shrink_service_regex, _rapidcrypt_regex, _simple_iframe_regex, _linkup_regex, _linkhub_regex,
|
||||
_swzz_regex, _stayonline_regex, _snip_regex, _simple_redirect]
|
||||
|
||||
_maxretries = 5
|
||||
@@ -85,8 +85,8 @@ class UnshortenIt(object):
|
||||
uri, code = self._unshorten_rapidcrypt(uri)
|
||||
if re.search(self._simple_iframe_regex, uri, re.IGNORECASE):
|
||||
uri, code = self._unshorten_simple_iframe(uri)
|
||||
if re.search(self._vcrypt_regex, uri, re.IGNORECASE):
|
||||
uri, code = self._unshorten_vcrypt(uri)
|
||||
# if re.search(self._vcrypt_regex, uri, re.IGNORECASE):
|
||||
# uri, code = self._unshorten_vcrypt(uri)
|
||||
if re.search(self._linkup_regex, uri, re.IGNORECASE):
|
||||
uri, code = self._unshorten_linkup(uri)
|
||||
if re.search(self._linkhub_regex, uri, re.IGNORECASE):
|
||||
@@ -556,6 +556,8 @@ class UnshortenIt(object):
|
||||
# fix by greko inizio
|
||||
if not link:
|
||||
link = re.findall('action="(?:[^/]+.*?/[^/]+/([a-zA-Z0-9_]+))">', r.data)
|
||||
if not link:
|
||||
link = scrapertools.find_single_match(r.data, '\$\("a\.redirect"\)\.attr\("href",\s*"\s*(http[^"]+)')
|
||||
if link:
|
||||
uri = link
|
||||
short = re.findall('^https?://.*?(https?://.*)', uri)
|
||||
|
||||
5
platformcode/contextmenu/contextmenu.json
Normal file
5
platformcode/contextmenu/contextmenu.json
Normal file
@@ -0,0 +1,5 @@
|
||||
[
|
||||
"platformcode.contextmenu.search",
|
||||
"platformcode.contextmenu.update_tv_show",
|
||||
"platformcode.contextmenu.trailer"
|
||||
]
|
||||
127
platformcode/contextmenu/search.py
Normal file
127
platformcode/contextmenu/search.py
Normal file
@@ -0,0 +1,127 @@
|
||||
import xbmc, sys, os
|
||||
from platformcode import config, logger
|
||||
import re
|
||||
# incliuding folder libraries
|
||||
librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib'))
|
||||
sys.path.insert(0, librerias)
|
||||
|
||||
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
|
||||
addon_id = config.get_addon_core().getAddonInfo('id')
|
||||
global item_is_coming_from_kod
|
||||
|
||||
|
||||
def check_condition():
|
||||
global item_is_coming_from_kod
|
||||
logger.debug('check item condition')
|
||||
mediatype = xbmc.getInfoLabel('ListItem.DBTYPE')
|
||||
|
||||
folderPath = xbmc.getInfoLabel('Container.FolderPath')
|
||||
filePath = xbmc.getInfoLabel('ListItem.Path')
|
||||
fileNameAndPath = xbmc.getInfoLabel('ListItem.FileNameAndPath')
|
||||
|
||||
logger.debug('Container:',folderPath )
|
||||
logger.debug('listitem mediatype:',mediatype )
|
||||
logger.debug('filenamepath:', fileNameAndPath )
|
||||
logger.info('filepath:', filePath )
|
||||
|
||||
item_is_coming_from_kod = addon_id in filePath
|
||||
if not item_is_coming_from_kod:
|
||||
videolibpath = config.get_setting("videolibrarypath")
|
||||
if filePath.startswith(videolibpath):
|
||||
pattern = re.compile("\[.*\][\\\/]?$")
|
||||
item_is_coming_from_kod = pattern.search(filePath)
|
||||
|
||||
if item_is_coming_from_kod:
|
||||
logger.debug("item IS already managed by KOD")
|
||||
|
||||
return mediatype
|
||||
|
||||
|
||||
def get_menu_items():
|
||||
logger.debug('get menu item')
|
||||
if check_condition():
|
||||
return [(config.get_localized_string(90003 if item_is_coming_from_kod else 90005), execute)]
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def execute():
|
||||
"""
|
||||
Gather the selected ListItem's attributes in order to compute the `Item` parameters
|
||||
and perform the KOD's globalsearch.
|
||||
Globalsearch will be executed specifing the content-type of the selected ListItem
|
||||
|
||||
NOTE: this method needs the DBTYPE and TMDB_ID specified as ListItem's properties
|
||||
"""
|
||||
|
||||
# These following lines are commented and keep in the code just as reminder.
|
||||
# In future, they could be used to filter the search outcome
|
||||
|
||||
# ADDON: maybe can we know if the current windows is related to a specific addon?
|
||||
# we could skip the ContextMenu if we already are in KOD's window
|
||||
|
||||
tmdbid = xbmc.getInfoLabel('ListItem.Property(tmdb_id)')
|
||||
mediatype = xbmc.getInfoLabel('ListItem.DBTYPE')
|
||||
title = xbmc.getInfoLabel('ListItem.Title')
|
||||
year = xbmc.getInfoLabel('ListItem.Year')
|
||||
imdb = xbmc.getInfoLabel('ListItem.IMDBNumber')
|
||||
|
||||
if mediatype in ('episode', 'season'):
|
||||
mediatype = 'tvshow'
|
||||
title = xbmc.getInfoLabel('ListItem.TVShowTitle')
|
||||
|
||||
logstr = "Selected ListItem is: 'IMDB: {}' - TMDB: {}' - 'Title: {}' - 'Year: {}'' - 'Type: {}'".format(imdb, tmdbid, title, year, mediatype)
|
||||
logger.info(logstr)
|
||||
|
||||
if not tmdbid and imdb:
|
||||
logger.info('No TMDBid found. Try to get by IMDB')
|
||||
it = Item(contentType= mediatype, infoLabels={'imdb_id' : imdb})
|
||||
try:
|
||||
tmdb.set_infoLabels(it)
|
||||
tmdbid = it.infoLabels.get('tmdb_id', '')
|
||||
except:
|
||||
logger.info("Cannot find TMDB via imdb")
|
||||
|
||||
if not tmdbid:
|
||||
logger.info('No TMDBid found. Try to get by Title/Year')
|
||||
it = Item(contentTitle= title, contentType= mediatype, infoLabels={'year' : year})
|
||||
try:
|
||||
tmdb.set_infoLabels(it)
|
||||
tmdbid = it.infoLabels.get('tmdb_id', '')
|
||||
except:
|
||||
logger.info("Cannot find TMDB via title/year")
|
||||
|
||||
if not tmdbid:
|
||||
# We can continue searching by 'title (year)'
|
||||
logger.info( "No TMDB found, proceed with title/year:", title , "(" , year, ")" )
|
||||
|
||||
# User wants to search on other channels
|
||||
logger.info("Search on other channels")
|
||||
|
||||
item = Item(
|
||||
action="from_context",
|
||||
channel="search",
|
||||
contentType= mediatype,
|
||||
mode="search",
|
||||
contextual= True,
|
||||
text=title,
|
||||
type= mediatype,
|
||||
infoLabels= {
|
||||
'tmdb_id': tmdbid,
|
||||
'year': year,
|
||||
'mediatype': mediatype
|
||||
},
|
||||
folder= False
|
||||
)
|
||||
|
||||
logger.info("Invoking Item: ", item.tostring() )
|
||||
|
||||
itemurl = item.tourl()
|
||||
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?" + itemurl + ")")
|
||||
|
||||
|
||||
|
||||
|
||||
23
platformcode/contextmenu/trailer.py
Normal file
23
platformcode/contextmenu/trailer.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import xbmc
|
||||
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
|
||||
|
||||
def get_menu_items():
|
||||
return [(config.get_localized_string(60359), execute)]
|
||||
|
||||
|
||||
def execute():
|
||||
tmdbid = xbmc.getInfoLabel('ListItem.Property(tmdb_id)')
|
||||
year = xbmc.getInfoLabel('ListItem.Year')
|
||||
mediatype = xbmc.getInfoLabel('ListItem.DBTYPE')
|
||||
title = xbmc.getInfoLabel('ListItem.Title')
|
||||
if mediatype in ('episode', 'season'):
|
||||
mediatype = 'tvshow'
|
||||
title = xbmc.getInfoLabel('ListItem.TVShowTitle')
|
||||
|
||||
item = Item(channel="trailertools", action="buscartrailer", search_title=title, contentType=mediatype,
|
||||
year=year, contentTitle=title, contextual=True)
|
||||
item.infoLabels['tmdb_id'] = tmdbid
|
||||
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?" + item.tourl() + ")")
|
||||
203
platformcode/contextmenu/update_tv_show.py
Normal file
203
platformcode/contextmenu/update_tv_show.py
Normal file
@@ -0,0 +1,203 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import xbmc, sys, xbmcgui, os, xbmcvfs, traceback
|
||||
from platformcode import config, logger
|
||||
|
||||
librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib'))
|
||||
sys.path.insert(0, librerias)
|
||||
|
||||
from core.item import Item
|
||||
from lib.sambatools import libsmb as samba
|
||||
from core import scrapertools, support
|
||||
|
||||
path = ''
|
||||
mediatype = ''
|
||||
|
||||
|
||||
def exists(path, silent=False, vfs=True):
|
||||
path = xbmc.translatePath(path)
|
||||
try:
|
||||
if vfs:
|
||||
result = bool(xbmcvfs.exists(path))
|
||||
if not result and not path.endswith('/') and not path.endswith('\\'):
|
||||
result = bool(xbmcvfs.exists(join(path, ' ').rstrip()))
|
||||
return result
|
||||
elif path.lower().startswith("smb://"):
|
||||
return samba.exists(path)
|
||||
else:
|
||||
return os.path.exists(path)
|
||||
except:
|
||||
logger.error("ERROR when checking the path: %s" % path)
|
||||
if not silent:
|
||||
logger.error(traceback.format_exc())
|
||||
return False
|
||||
|
||||
|
||||
def join(*paths):
|
||||
list_path = []
|
||||
if paths[0].startswith("/"):
|
||||
list_path.append("")
|
||||
for path in paths:
|
||||
if path:
|
||||
list_path += path.replace("\\", "/").strip("/").split("/")
|
||||
|
||||
if scrapertools.find_single_match(paths[0], r'(^\w+:\/\/)'):
|
||||
return str("/".join(list_path))
|
||||
else:
|
||||
return str(os.sep.join(list_path))
|
||||
|
||||
|
||||
def search_paths(Id):
|
||||
records = execute_sql('SELECT idPath FROM tvshowlinkpath WHERE idShow LIKE "%s"' % Id)
|
||||
if len(records) >= 1:
|
||||
for record in records:
|
||||
path_records = execute_sql('SELECT strPath FROM path WHERE idPath LIKE "%s"' % record[0])
|
||||
for path in path_records:
|
||||
if config.get_setting('videolibrarypath') in path[0] and exists(join(path[0], 'tvshow.nfo')):
|
||||
return path[0]
|
||||
return ''
|
||||
|
||||
|
||||
def execute_sql(sql):
|
||||
logger.debug()
|
||||
file_db = ""
|
||||
records = None
|
||||
|
||||
# We look for the archive of the video database according to the version of kodi
|
||||
video_db = config.get_platform(True)['video_db']
|
||||
if video_db:
|
||||
file_db = os.path.join(xbmc.translatePath("special://userdata/Database"), video_db)
|
||||
|
||||
# alternative method to locate the database
|
||||
if not file_db or not os.path.exists(file_db):
|
||||
file_db = ""
|
||||
for f in os.path.listdir(xbmc.translatePath("special://userdata/Database")):
|
||||
path_f = os.path.join(xbmc.translatePath("special://userdata/Database"), f)
|
||||
|
||||
if os.path.pathoos.pathols.isfile(path_f) and f.lower().startswith('myvideos') and f.lower().endswith('.db'):
|
||||
file_db = path_f
|
||||
break
|
||||
|
||||
if file_db:
|
||||
logger.debug("DB file: %s" % file_db)
|
||||
conn = None
|
||||
try:
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(file_db)
|
||||
cursor = conn.cursor()
|
||||
|
||||
logger.debug("Running sql: %s" % sql)
|
||||
cursor.execute(sql)
|
||||
conn.commit()
|
||||
|
||||
records = cursor.fetchall()
|
||||
if sql.lower().startswith("select"):
|
||||
if len(records) == 1 and records[0][0] is None:
|
||||
records = []
|
||||
|
||||
conn.close()
|
||||
logger.debug("Query executed. Records: %s" % len(records))
|
||||
|
||||
except:
|
||||
logger.error("Error executing sql query")
|
||||
if conn:
|
||||
conn.close()
|
||||
|
||||
else:
|
||||
logger.debug("Database not found")
|
||||
|
||||
return records
|
||||
|
||||
|
||||
def get_id():
|
||||
global mediatype
|
||||
|
||||
mediatype = xbmc.getInfoLabel('ListItem.DBTYPE')
|
||||
if mediatype == 'tvshow':
|
||||
dbid = xbmc.getInfoLabel('ListItem.DBID')
|
||||
elif mediatype in ('season', 'episode'):
|
||||
dbid = xbmc.getInfoLabel('ListItem.TvShowDBID')
|
||||
else:
|
||||
dbid = ''
|
||||
return dbid
|
||||
|
||||
def check_condition():
|
||||
# support.dbg()
|
||||
global path
|
||||
path = search_paths(get_id())
|
||||
return path
|
||||
|
||||
|
||||
def get_menu_items():
|
||||
logger.debug('get menu item')
|
||||
if check_condition():
|
||||
items = [(config.get_localized_string(70269), update)]
|
||||
from core import videolibrarytools
|
||||
nfo = path + 'tvshow.nfo'
|
||||
item = videolibrarytools.read_nfo(nfo)[1]
|
||||
if item:
|
||||
item.nfo = nfo
|
||||
item_url = item.tourl()
|
||||
# Context menu: Automatically search for new episodes or not
|
||||
if item.active and int(item.active) > 0:
|
||||
update_text = config.get_localized_string(60022)
|
||||
value = 0
|
||||
else:
|
||||
update_text = config.get_localized_string(60023)
|
||||
value = 1
|
||||
items.append((update_text,
|
||||
lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?{}&title={}&action=mark_tvshow_as_updatable&channel=videolibrary&active={})".format(item_url, update_text, str(value)))))
|
||||
if item.local_episodes_path == "":
|
||||
items.append((config.get_localized_string(80048), lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?{}&action=add_local_episodes&channel=videolibrary&path={})".format(item_url, path))))
|
||||
|
||||
# if config.get_setting('downloadenabled'):
|
||||
# from core import videolibrarytools
|
||||
# from core import filetools
|
||||
# if xbmc.getInfoLabel('ListItem.FilenameAndPath'):
|
||||
# item = Item().fromurl(filetools.read(xbmc.getInfoLabel('ListItem.FilenameAndPath')))
|
||||
# else:
|
||||
# item = videolibrarytools.read_nfo(path + 'tvshow.nfo')[1]
|
||||
# if item:
|
||||
# item_url = item.tourl()
|
||||
#
|
||||
# Download movie
|
||||
# if mediatype == "movie":
|
||||
# items.append((config.get_localized_string(60354), lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?%s&%s)" % (item_url,
|
||||
# 'channel=downloads&action=save_download&from_channel=' + item.channel + '&from_action=' + item.action))))
|
||||
#
|
||||
# elif item.contentSerieName:
|
||||
# Download series
|
||||
# if mediatype == "tvshow" and item.action not in ['findvideos']:
|
||||
# if item.channel == 'videolibrary':
|
||||
# items.append((config.get_localized_string(60003), lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?%s&%s)" % (
|
||||
# item_url,
|
||||
# 'channel=downloads&action=save_download&unseen=true&from_channel=' + item.channel + '&from_action=' + item.action))))
|
||||
# items.append((config.get_localized_string(60355), lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?%s&%s)" % (
|
||||
# item_url,
|
||||
# 'channel=downloads&action=save_download&from_channel=' + item.channel + '&from_action=' + item.action))))
|
||||
# items.append((config.get_localized_string(60357), lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?%s&%s)" % (
|
||||
# item_url,
|
||||
# 'channel=downloads&action=save_download&download=season&from_channel=' + item.channel + '&from_action=' + item.action))))
|
||||
# Download episode
|
||||
# elif mediatype == "episode" and item.action in ['findvideos']:
|
||||
# items.append((config.get_localized_string(60356), lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?%s&%s)" % (
|
||||
# item_url,
|
||||
# 'channel=downloads&action=save_download&from_channel=' + item.channel + '&from_action=' + item.action))))
|
||||
# Download season
|
||||
# elif mediatype == "season":
|
||||
# items.append((config.get_localized_string(60357), lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?%s&%s)" % (
|
||||
# item_url,
|
||||
# 'channel=downloads&action=save_download&download=season&from_channel=' + item.channel + '&from_action=' + item.action))))
|
||||
|
||||
return items
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def update():
|
||||
dbid = get_id()
|
||||
path = search_paths(dbid)
|
||||
if path:
|
||||
item = Item(action="update_tvshow", channel="videolibrary", path=path)
|
||||
# Why? I think it is not necessary, just commented
|
||||
# item.tourl()
|
||||
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?" + item.tourl() + ")")
|
||||
@@ -69,6 +69,15 @@ def dialog_multiselect(heading, _list, autoclose=0, preselect=[], useDetails=Fal
|
||||
|
||||
|
||||
def dialog_progress(heading, message):
|
||||
if get_window() in ('WINDOW_HOME', 'WINDOW_SETTINGS_MENU', 'WINDOW_SETTINGS_INTERFACE', 'WINDOW_SKIN_SETTINGS', 'SKIN'):
|
||||
# in widget, hide any progress
|
||||
class Dummy(object):
|
||||
def __getattr__(self, name):
|
||||
def _missing(*args, **kwargs):
|
||||
pass
|
||||
return _missing
|
||||
return Dummy()
|
||||
else:
|
||||
dialog = xbmcgui.DialogProgress()
|
||||
dialog.create(heading, message)
|
||||
return dialog
|
||||
@@ -179,6 +188,7 @@ def dialog_register(heading, user=False, email=False, password=False, user_defau
|
||||
dialog = Register('Register.xml', config.get_runtime_path()).Start(heading, user, email, password, user_default, email_default, password_default, captcha_img)
|
||||
return dialog
|
||||
|
||||
|
||||
def dialog_info(item, scraper):
|
||||
class TitleOrIDWindow(xbmcgui.WindowXMLDialog):
|
||||
def Start(self, item, scraper):
|
||||
@@ -233,6 +243,7 @@ def dialog_info(item, scraper):
|
||||
dialog = TitleOrIDWindow('TitleOrIDWindow.xml', config.get_runtime_path()).Start(item, scraper)
|
||||
return dialog
|
||||
|
||||
|
||||
def dialog_select_group(heading, _list, preselect=0):
|
||||
class SelectGroup(xbmcgui.WindowXMLDialog):
|
||||
def start(self, heading, _list, preselect):
|
||||
@@ -688,7 +699,7 @@ def set_context_commands(item, item_url, parent_item, **kwargs):
|
||||
context_commands.append((config.get_localized_string(60354), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'channel=downloads&action=save_download&from_channel=' + item.channel + '&from_action=' + item.action)))
|
||||
|
||||
elif item.contentSerieName:
|
||||
# Descargar series
|
||||
# Download series
|
||||
if item.contentType == "tvshow" and item.action not in ['findvideos']:
|
||||
if item.channel == 'videolibrary':
|
||||
context_commands.append((config.get_localized_string(60003), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'channel=downloads&action=save_download&unseen=true&from_channel=' + item.channel + '&from_action=' + item.action)))
|
||||
@@ -714,6 +725,265 @@ def is_playing():
|
||||
return xbmc_player.isPlaying()
|
||||
|
||||
|
||||
def get_window():
|
||||
"""
|
||||
Return if addon is used as widget
|
||||
For doing so, it check current window ID (https://kodi.wiki/view/Window_IDs)
|
||||
"""
|
||||
winId = xbmcgui.getCurrentWindowId()
|
||||
if winId == 9999:
|
||||
return 'WINDOW_INVALID'
|
||||
elif winId == 10000:
|
||||
return 'WINDOW_HOME'
|
||||
elif winId == 10001:
|
||||
return 'WINDOW_PROGRAMS'
|
||||
elif winId == 10002:
|
||||
return 'WINDOW_PICTURES'
|
||||
elif winId == 10003:
|
||||
return 'WINDOW_FILES'
|
||||
elif winId == 10004:
|
||||
return 'WINDOW_SETTINGS_MENU'
|
||||
elif winId == 10007:
|
||||
return 'WINDOW_SYSTEM_INFORMATION'
|
||||
elif winId == 10011:
|
||||
return 'WINDOW_SCREEN_CALIBRATION'
|
||||
|
||||
elif winId == 10016:
|
||||
return 'WINDOW_SETTINGS_START'
|
||||
elif winId == 10016:
|
||||
return 'WINDOW_SETTINGS_SYSTEM'
|
||||
elif winId == 10018:
|
||||
return 'WINDOW_SETTINGS_SERVICE'
|
||||
|
||||
elif winId == 10021:
|
||||
return 'WINDOW_SETTINGS_MYPVR'
|
||||
elif winId == 10022:
|
||||
return 'WINDOW_SETTINGS_MYGAMES'
|
||||
|
||||
elif winId == 10025:
|
||||
return 'WINDOW_VIDEO_NAV'
|
||||
elif winId == 10028:
|
||||
return 'WINDOW_VIDEO_PLAYLIST'
|
||||
|
||||
elif winId == 10029:
|
||||
return 'WINDOW_LOGIN_SCREEN'
|
||||
|
||||
elif winId == 10030:
|
||||
return 'WINDOW_SETTINGS_PLAYER'
|
||||
elif winId == 10031:
|
||||
return 'WINDOW_SETTINGS_MEDIA'
|
||||
elif winId == 10032:
|
||||
return 'WINDOW_SETTINGS_INTERFACE'
|
||||
|
||||
elif winId == 10034:
|
||||
return 'WINDOW_SETTINGS_PROFILES'
|
||||
elif winId == 10035:
|
||||
return 'WINDOW_SKIN_SETTINGS'
|
||||
|
||||
elif winId == 10040:
|
||||
return 'WINDOW_ADDON_BROWSER'
|
||||
|
||||
elif winId == 10050:
|
||||
return 'WINDOW_EVENT_LOG'
|
||||
|
||||
elif winId == 97:
|
||||
return 'WINDOW_SCREENSAVER_DIM'
|
||||
elif winId == 98:
|
||||
return 'WINDOW_DEBUG_INFO'
|
||||
elif winId == 10099:
|
||||
return 'WINDOW_DIALOG_POINTER'
|
||||
elif winId == 10100:
|
||||
return 'WINDOW_DIALOG_YES_NO'
|
||||
elif winId == 10101:
|
||||
return 'WINDOW_DIALOG_PROGRESS'
|
||||
elif winId == 10103:
|
||||
return 'WINDOW_DIALOG_KEYBOARD'
|
||||
elif winId == 10104:
|
||||
return 'WINDOW_DIALOG_VOLUME_BAR'
|
||||
elif winId == 10105:
|
||||
return 'WINDOW_DIALOG_SUB_MENU'
|
||||
elif winId == 10106:
|
||||
return 'WINDOW_DIALOG_CONTEXT_MENU'
|
||||
elif winId == 10107:
|
||||
return 'WINDOW_DIALOG_KAI_TOAST'
|
||||
elif winId == 10109:
|
||||
return 'WINDOW_DIALOG_NUMERIC'
|
||||
elif winId == 10110:
|
||||
return 'WINDOW_DIALOG_GAMEPAD'
|
||||
elif winId == 10111:
|
||||
return 'WINDOW_DIALOG_BUTTON_MENU'
|
||||
elif winId == 10114:
|
||||
return 'WINDOW_DIALOG_PLAYER_CONTROLS'
|
||||
elif winId == 10115:
|
||||
return 'WINDOW_DIALOG_SEEK_BAR'
|
||||
elif winId == 10116:
|
||||
return 'WINDOW_DIALOG_PLAYER_PROCESS_INFO'
|
||||
elif winId == 10120:
|
||||
return 'WINDOW_DIALOG_MUSIC_OSD'
|
||||
elif winId == 10121:
|
||||
return 'WINDOW_DIALOG_VIS_SETTINGS'
|
||||
elif winId == 10122:
|
||||
return 'WINDOW_DIALOG_VIS_PRESET_LIST'
|
||||
elif winId == 10123:
|
||||
return 'WINDOW_DIALOG_VIDEO_OSD_SETTINGS'
|
||||
elif winId == 10124:
|
||||
return 'WINDOW_DIALOG_AUDIO_OSD_SETTINGS'
|
||||
elif winId == 10125:
|
||||
return 'WINDOW_DIALOG_VIDEO_BOOKMARKS'
|
||||
elif winId == 10126:
|
||||
return 'WINDOW_DIALOG_FILE_BROWSER'
|
||||
elif winId == 10128:
|
||||
return 'WINDOW_DIALOG_NETWORK_SETUP'
|
||||
elif winId == 10129:
|
||||
return 'WINDOW_DIALOG_MEDIA_SOURCE'
|
||||
elif winId == 10130:
|
||||
return 'WINDOW_DIALOG_PROFILE_SETTINGS'
|
||||
elif winId == 10131:
|
||||
return 'WINDOW_DIALOG_LOCK_SETTINGS'
|
||||
elif winId == 10132:
|
||||
return 'WINDOW_DIALOG_CONTENT_SETTINGS'
|
||||
elif winId == 10133:
|
||||
return 'WINDOW_DIALOG_LIBEXPORT_SETTINGS'
|
||||
elif winId == 10134:
|
||||
return 'WINDOW_DIALOG_FAVOURITES'
|
||||
elif winId == 10135:
|
||||
return 'WINDOW_DIALOG_SONG_INFO'
|
||||
elif winId == 10136:
|
||||
return 'WINDOW_DIALOG_SMART_PLAYLIST_EDITOR'
|
||||
elif winId == 10137:
|
||||
return 'WINDOW_DIALOG_SMART_PLAYLIST_RULE'
|
||||
elif winId == 10138:
|
||||
return 'WINDOW_DIALOG_BUSY'
|
||||
elif winId == 10139:
|
||||
return 'WINDOW_DIALOG_PICTURE_INFO'
|
||||
elif winId == 10140:
|
||||
return 'WINDOW_DIALOG_ADDON_SETTINGS'
|
||||
elif winId == 10142:
|
||||
return 'WINDOW_DIALOG_FULLSCREEN_INFO'
|
||||
elif winId == 10145:
|
||||
return 'WINDOW_DIALOG_SLIDER'
|
||||
elif winId == 10146:
|
||||
return 'WINDOW_DIALOG_ADDON_INFO'
|
||||
elif winId == 10147:
|
||||
return 'WINDOW_DIALOG_TEXT_VIEWER'
|
||||
elif winId == 10148:
|
||||
return 'WINDOW_DIALOG_PLAY_EJECT'
|
||||
elif winId == 10149:
|
||||
return 'WINDOW_DIALOG_PERIPHERALS'
|
||||
elif winId == 10150:
|
||||
return 'WINDOW_DIALOG_PERIPHERAL_SETTINGS'
|
||||
elif winId == 10151:
|
||||
return 'WINDOW_DIALOG_EXT_PROGRESS'
|
||||
elif winId == 10152:
|
||||
return 'WINDOW_DIALOG_MEDIA_FILTER'
|
||||
elif winId == 10153:
|
||||
return 'WINDOW_DIALOG_SUBTITLES'
|
||||
elif winId == 10156:
|
||||
return 'WINDOW_DIALOG_KEYBOARD_TOUCH'
|
||||
elif winId == 10157:
|
||||
return 'WINDOW_DIALOG_CMS_OSD_SETTINGS'
|
||||
elif winId == 10158:
|
||||
return 'WINDOW_DIALOG_INFOPROVIDER_SETTINGS'
|
||||
elif winId == 10159:
|
||||
return 'WINDOW_DIALOG_SUBTITLE_OSD_SETTINGS'
|
||||
elif winId == 10160:
|
||||
return 'WINDOW_DIALOG_BUSY_NOCANCEL'
|
||||
|
||||
elif winId == 10500:
|
||||
return 'WINDOW_MUSIC_PLAYLIST'
|
||||
elif winId == 10502:
|
||||
return 'WINDOW_MUSIC_NAV'
|
||||
elif winId == 10503:
|
||||
return 'WINDOW_MUSIC_PLAYLIST_EDITOR'
|
||||
|
||||
elif winId == 10550:
|
||||
return 'WINDOW_DIALOG_OSD_TELETEXT'
|
||||
|
||||
# PVR related Window and Dialog ID's
|
||||
|
||||
elif 10600 < winId < 10613:
|
||||
return 'WINDOW_DIALOG_PVR'
|
||||
|
||||
|
||||
elif 10700 < winId < 10711:
|
||||
return 'WINDOW_PVR_ID'
|
||||
|
||||
# virtual windows for PVR specific keymap bindings in fullscreen playback
|
||||
elif winId == 10800:
|
||||
return 'WINDOW_FULLSCREEN_LIVETV'
|
||||
elif winId == 10801:
|
||||
return 'WINDOW_FULLSCREEN_RADIO'
|
||||
elif winId == 10802:
|
||||
return 'WINDOW_FULLSCREEN_LIVETV_PREVIEW'
|
||||
elif winId == 10803:
|
||||
return 'WINDOW_FULLSCREEN_RADIO_PREVIEW'
|
||||
elif winId == 10804:
|
||||
return 'WINDOW_FULLSCREEN_LIVETV_INPUT'
|
||||
elif winId == 10805:
|
||||
return 'WINDOW_FULLSCREEN_RADIO_INPUT'
|
||||
|
||||
elif winId == 10820:
|
||||
return 'WINDOW_DIALOG_GAME_CONTROLLERS'
|
||||
elif winId == 10821:
|
||||
return 'WINDOW_GAMES'
|
||||
elif winId == 10822:
|
||||
return 'WINDOW_DIALOG_GAME_OSD'
|
||||
elif winId == 10823:
|
||||
return 'WINDOW_DIALOG_GAME_VIDEO_FILTER'
|
||||
elif winId == 10824:
|
||||
return 'WINDOW_DIALOG_GAME_STRETCH_MODE'
|
||||
elif winId == 10825:
|
||||
return 'WINDOW_DIALOG_GAME_VOLUME'
|
||||
elif winId == 10826:
|
||||
return 'WINDOW_DIALOG_GAME_ADVANCED_SETTINGS'
|
||||
elif winId == 10827:
|
||||
return 'WINDOW_DIALOG_GAME_VIDEO_ROTATION'
|
||||
elif 11100 < winId < 11199:
|
||||
return 'SKIN' # WINDOW_ID's from 11100 to 11199 reserved for Skins
|
||||
|
||||
elif winId == 12000:
|
||||
return 'WINDOW_DIALOG_SELECT'
|
||||
elif winId == 12001:
|
||||
return 'WINDOW_DIALOG_MUSIC_INFO'
|
||||
elif winId == 12002:
|
||||
return 'WINDOW_DIALOG_OK'
|
||||
elif winId == 12003:
|
||||
return 'WINDOW_DIALOG_VIDEO_INFO'
|
||||
elif winId == 12005:
|
||||
return 'WINDOW_FULLSCREEN_VIDEO'
|
||||
elif winId == 12006:
|
||||
return 'WINDOW_VISUALISATION'
|
||||
elif winId == 12007:
|
||||
return 'WINDOW_SLIDESHOW'
|
||||
elif winId == 12600:
|
||||
return 'WINDOW_WEATHER'
|
||||
elif winId == 12900:
|
||||
return 'WINDOW_SCREENSAVER'
|
||||
elif winId == 12901:
|
||||
return 'WINDOW_DIALOG_VIDEO_OSD'
|
||||
|
||||
elif winId == 12902:
|
||||
return 'WINDOW_VIDEO_MENU'
|
||||
elif winId == 12905:
|
||||
return 'WINDOW_VIDEO_TIME_SEEK' # virtual window for time seeking during fullscreen video
|
||||
|
||||
elif winId == 12906:
|
||||
return 'WINDOW_FULLSCREEN_GAME'
|
||||
|
||||
elif winId == 12997:
|
||||
return 'WINDOW_SPLASH' # splash window
|
||||
elif winId == 12998:
|
||||
return 'WINDOW_START' # first window to load
|
||||
elif winId == 12999:
|
||||
return 'WINDOW_STARTUP_ANIM' # for startup animations
|
||||
|
||||
elif 13000 < winId < 13099:
|
||||
return 'PYTHON' # WINDOW_ID's from 13000 to 13099 reserved for Python
|
||||
|
||||
elif 14000 < winId < 14099:
|
||||
return 'ADDON' # WINDOW_ID's from 14000 to 14099 reserved for Addons
|
||||
|
||||
|
||||
def play_video(item, strm=False, force_direct=False, autoplay=False):
|
||||
logger.debug()
|
||||
logger.debug(item.tostring('\n'))
|
||||
@@ -762,12 +1032,6 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
|
||||
|
||||
# if it is a video in mpd format, the listitem is configured to play it ith the inpustreamaddon addon implemented in Kodi 17
|
||||
# from core.support import dbg;dbg()
|
||||
if item.manifest == 'hls':
|
||||
if not install_inputstream():
|
||||
return
|
||||
xlistitem.setProperty('inputstream' if PY3 else 'inputstreamaddon', 'inputstream.adaptive')
|
||||
xlistitem.setProperty('inputstream.adaptive.manifest_type', 'hls')
|
||||
xlistitem.setMimeType('application/x-mpegURL')
|
||||
if mpd or item.manifest =='mpd':
|
||||
if not install_inputstream():
|
||||
return
|
||||
@@ -778,6 +1042,12 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
|
||||
xlistitem.setProperty("inputstream.adaptive.license_type", item.drm)
|
||||
xlistitem.setProperty("inputstream.adaptive.license_key", item.license)
|
||||
xlistitem.setMimeType('application/dash+xml')
|
||||
elif item.manifest == 'hls' or mediaurl.split('|')[0].endswith('m3u8'):
|
||||
if not install_inputstream():
|
||||
return
|
||||
xlistitem.setProperty('inputstream' if PY3 else 'inputstreamaddon', 'inputstream.adaptive')
|
||||
xlistitem.setProperty('inputstream.adaptive.manifest_type', 'hls')
|
||||
xlistitem.setMimeType('application/x-mpegURL')
|
||||
|
||||
if force_direct: item.window = True
|
||||
|
||||
|
||||
@@ -6505,5 +6505,21 @@ msgid "Downloading..."
|
||||
msgstr ""
|
||||
|
||||
msgctxt "#90001"
|
||||
msgid "KOD options"
|
||||
msgstr "KOD options..."
|
||||
|
||||
msgctxt "#90002"
|
||||
msgid "No TMDB found"
|
||||
msgstr "No TmdbId found, cannot continue"
|
||||
|
||||
msgctxt "#90003"
|
||||
msgid "Already on KOD, continue searching on other channels?"
|
||||
msgstr "Item is coming from KOD, continue searching on other channels?"
|
||||
|
||||
msgctxt "#90004"
|
||||
msgid "No contextmenu option"
|
||||
msgstr "No options"
|
||||
|
||||
msgctxt "#90005"
|
||||
msgid "Search on KOD"
|
||||
msgstr "Search on KOD..."
|
||||
msgstr "Search with KOD"
|
||||
|
||||
@@ -6505,7 +6505,22 @@ msgctxt "#80050"
|
||||
msgid "Downloading..."
|
||||
msgstr "Download in corso..."
|
||||
|
||||
|
||||
msgctxt "#90001"
|
||||
msgid "KOD options"
|
||||
msgstr "Opzioni di KOD..."
|
||||
|
||||
msgctxt "#90002"
|
||||
msgid "No TMDB found"
|
||||
msgstr "Non sono riuscito a trovare le informazioni su TMDB"
|
||||
|
||||
msgctxt "#90003"
|
||||
msgid "Already on KOD, continue searching on other channels?"
|
||||
msgstr "Preferisci cercare su altri canali?"
|
||||
|
||||
msgctxt "#90004"
|
||||
msgid "No contextmenu option"
|
||||
msgstr "Nessuna opzione possibile"
|
||||
|
||||
msgctxt "#90005"
|
||||
msgid "Search on KOD"
|
||||
msgstr "Cerca con KOD..."
|
||||
msgstr "Cerca con KOD"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"active": true,
|
||||
"active": false,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "dood(?:stream|).[^/]+/((?:e|d)/[a-z0-9]+)",
|
||||
"url": "https://dood.to/\\1"
|
||||
"pattern": "dood(?:stream)?.[^/]+/(?:e|d)/([a-z0-9]+)",
|
||||
"url": "https://dood.to/e/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,39 +1,41 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re, time
|
||||
from lib import js2py
|
||||
from core import httptools, scrapertools
|
||||
import time, string, random
|
||||
from core import httptools, support
|
||||
from platformcode import logger, config
|
||||
import cloudscraper
|
||||
scraper = cloudscraper.create_scraper()
|
||||
|
||||
def test_video_exists(page_url):
|
||||
global data
|
||||
logger.debug('page url=', page_url)
|
||||
response = httptools.downloadpage(page_url)
|
||||
|
||||
if response.code == 404 or 'File you are looking for is not found' in response.data:
|
||||
response = scraper.get(page_url)
|
||||
|
||||
if response.status_code == 404 or 'File you are looking for is not found' in response.text:
|
||||
return False, config.get_localized_string(70449) % 'DooD Stream'
|
||||
else:
|
||||
data = response.data
|
||||
data = response.text
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
# from core.support import dbg;dbg()
|
||||
global data
|
||||
logger.debug("URL", page_url)
|
||||
# from core.support import dbg;dbg()
|
||||
|
||||
video_urls = []
|
||||
host = scrapertools.find_single_match(page_url, r'http[s]?://[^/]+')
|
||||
host = 'https://dood.to'
|
||||
headers = {'User-Agent': httptools.get_user_agent(), 'Referer': page_url}
|
||||
|
||||
new_url = scrapertools.find_single_match(data, r'<iframe src="([^"]+)"')
|
||||
if new_url: data = httptools.downloadpage(host + new_url).data
|
||||
match = support.match(data, patron=r'''dsplayer\.hotkeys[^']+'([^']+).+?function\s*makePlay.+?return[^?]+([^"]+)''').match
|
||||
if match:
|
||||
url, token = match
|
||||
ret = scraper.get(host + url, headers=headers).text
|
||||
video_urls.append(['mp4 [DooD Stream]', '{}{}{}{}|Referer={}'.format(randomize(ret), url, token, int(time.time() * 1000), host)])
|
||||
|
||||
label = scrapertools.find_single_match(data, r'type:\s*"video/([^"]+)"')
|
||||
|
||||
logger.debug(data)
|
||||
|
||||
base_url, token = scrapertools.find_single_match(data, r'''dsplayer\.hotkeys[^']+'([^']+).+?function\s*makePlay.+?return[^?]+([^"]+)''')
|
||||
url = '{}{}{}|Referer={}'.format(httptools.downloadpage(host + base_url, headers={"Referer": page_url}).data, token, str(int(time.time() * 1000)), page_url)
|
||||
video_urls.append([ label + ' [DooD Stream]', url])
|
||||
|
||||
return video_urls
|
||||
|
||||
def randomize(data):
|
||||
t = string.ascii_letters + string.digits
|
||||
return data + ''.join([random.choice(t) for _ in range(10)])
|
||||
@@ -10,11 +10,14 @@ def test_video_exists(page_url):
|
||||
global data
|
||||
|
||||
# page_url = re.sub('://[^/]+/', '://feurl.com/', page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
page = httptools.downloadpage(page_url)
|
||||
data = page.data
|
||||
if "Sorry 404 not found" in data or "This video is unavailable" in data or "Sorry this video is unavailable:" in data:
|
||||
return False, config.get_localized_string(70449) % "fembed"
|
||||
page_url = page_url.replace("/f/","/v/")
|
||||
page_url = page_url.replace("/v/","/api/source/")
|
||||
|
||||
page_url = page.url
|
||||
page_url = page_url.replace("/f/", "/v/")
|
||||
page_url = page_url.replace("/v/", "/api/source/")
|
||||
data = httptools.downloadpage(page_url, post={}).json
|
||||
logger.debug(data)
|
||||
if "Video not found or" in data or "We are encoding this video" in data:
|
||||
|
||||
42
servers/hxfile.json
Normal file
42
servers/hxfile.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https?://hxfile.co/(?!api)(?:embed-)?([A-z0-9]+)",
|
||||
"url": "https://hxfile.co/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "hxfile",
|
||||
"name": "HxFile",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@70708",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "hxfile.png"
|
||||
}
|
||||
25
servers/hxfile.py
Normal file
25
servers/hxfile.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools, scrapertools, servertools, support
|
||||
from platformcode import logger, config
|
||||
from lib import jsunpack
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.debug("(page_url='%s')" % page_url)
|
||||
global data
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Can't create video code" in data:
|
||||
return False, config.get_localized_string(70292) % 'HxFile'
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.debug("url=" + page_url)
|
||||
global data
|
||||
video_urls = []
|
||||
packed = scrapertools.find_single_match(data, r'(eval\s?\(function\(p,a,c,k,e,d\).*?\n)')
|
||||
data = jsunpack.unpack(packed)
|
||||
video_urls.extend(support.get_jwplayer_mediaurl(data, 'HxFile'))
|
||||
|
||||
return video_urls
|
||||
@@ -48,15 +48,15 @@ def test_video_exists(page_url):
|
||||
-17: 'The request exceeds your allowable transfer fee',
|
||||
-18: types + ' temporarily unavailable, please try again later'
|
||||
}
|
||||
api = 'https://g.api.mega.co.nz/cs?id=%d%s' % (seqno, get)
|
||||
req_api = httptools.downloadpage(api, post=json.dumps([post])).data
|
||||
api = 'https://g.api.mega.co.nz/cs?id={}{}'.format(seqno, get)
|
||||
req_api = httptools.downloadpage(api, post=json.dumps([post])).json
|
||||
if isfolder:
|
||||
req_api = json.loads(req_api)
|
||||
req_api = req_api
|
||||
else:
|
||||
try:
|
||||
req_api = json.loads(req_api)[0]
|
||||
req_api = req_api[0]
|
||||
except:
|
||||
req_api = json.loads(req_api)
|
||||
req_api = req_api
|
||||
logger.error(req_api)
|
||||
if isinstance(req_api, (int, long)):
|
||||
if req_api in codes:
|
||||
@@ -82,8 +82,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.debug("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
|
||||
# si hay mas de 5 archivos crea un playlist con todos
|
||||
# Esta función (la de la playlist) no va, hay que ojear megaserver/handler.py aunque la llamada este en client.py
|
||||
# If there are more than 5 files create a playlist with all
|
||||
# This function (the playlist) does not go, you have to browse megaserver / handler.py although the call is in client.py
|
||||
if len(files) > 5:
|
||||
media_url = c.get_play_list()
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [mega]", media_url])
|
||||
|
||||
@@ -4,8 +4,12 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "mixdrop.[^/]+/(?:f|e)/([a-z0-9]+)",
|
||||
"pattern": "mixdrop[s]?.[^/]+/(?:f|e)/([a-z0-9]+)",
|
||||
"url": "https://mixdrop.co/e/\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "(mixdrop[s]?.[^/]+/player\\.php\\?id=[a-z0-9-]+)",
|
||||
"url": "https://\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"active": true,
|
||||
"active": false,
|
||||
"find_videos": {
|
||||
"ignore_urls": ["https://embed.mystream.to/span"],
|
||||
"patterns": [
|
||||
|
||||
@@ -19,18 +19,27 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
global data
|
||||
logger.debug("URL", page_url)
|
||||
video_urls = []
|
||||
# support.dbg()
|
||||
|
||||
h = json.loads(support.match(data, patron='stream="([^"]+)"').match.replace('"','"'))
|
||||
baseurl = decode(h['host']) + h['hash']
|
||||
matches = support.match(baseurl + '/index.m3u8', patron=r'RESOLUTION=\d+x(\d+)\s*([^\s]+)').matches
|
||||
headers = {'User-Agent': httptools.get_user_agent(),
|
||||
'Referer': page_url,
|
||||
'Origin': 'https://ninjastream.to',
|
||||
'X-Requested-With': 'XMLHttpRequest'}
|
||||
|
||||
for quality, url in matches:
|
||||
video_urls.append(["{} {}p [NinjaStream]".format(url.split('.')[-1], quality), '{}/{}'.format(baseurl, url)])
|
||||
apiUrl = 'https://ninjastream.to/api/video/get'
|
||||
post = {'id':page_url.split('/')[-1]}
|
||||
data = httptools.downloadpage(apiUrl, headers=headers, post=post).json
|
||||
|
||||
if data.get('result',{}).get('playlist'):
|
||||
# support.dbg()
|
||||
url = data.get('result',{}).get('playlist')
|
||||
|
||||
video_urls.append([url.split('.')[-1], url + '|Referer:' + page_url])
|
||||
|
||||
return video_urls
|
||||
|
||||
def decode(host):
|
||||
Host = ''
|
||||
for n in range(len(host)):
|
||||
Host += chr(ord(host[n]) ^ ord('2'))
|
||||
return Host
|
||||
# def decode(host):
|
||||
# Host = ''
|
||||
# for n in range(len(host)):
|
||||
# Host += chr(ord(host[n]) ^ ord('2'))
|
||||
# return Host
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "okstream.cc/([0-9a-zA-Z]+)",
|
||||
"pattern": "okstream.cc/(?:e)?/([0-9a-zA-Z]+)",
|
||||
"url": "https://www.okstream.cc/e/\\1"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from core import httptools, support
|
||||
from platformcode import logger, config
|
||||
|
||||
def test_video_exists(page_url):
|
||||
@@ -8,7 +8,7 @@ def test_video_exists(page_url):
|
||||
logger.debug('page url=', page_url)
|
||||
response = httptools.downloadpage(page_url)
|
||||
|
||||
if response.code == 404:
|
||||
if response.code == 404 or 'File has been removed or does not exist!' in response.data:
|
||||
return False, config.get_localized_string(70449) % 'OkStream'
|
||||
else:
|
||||
data = response.data
|
||||
@@ -19,8 +19,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
global data
|
||||
logger.debug("URL", page_url)
|
||||
video_urls = []
|
||||
keys = scrapertools.find_single_match(data, '>var keys="([^"]+)"')
|
||||
protection = scrapertools.find_single_match(data, '>var protection="([^"]+)"')
|
||||
keys = support.match(data, patron=r'>var keys="([^"]+)"').match
|
||||
protection = support.match(data, patron=r'>var protection="([^"]+)"').match
|
||||
url = httptools.downloadpage("https://www.okstream.cc/request/", post='&morocco={}&mycountry={}'.format(keys, protection), headers={'Referer':page_url}).data
|
||||
url = url.strip()
|
||||
video_urls.append([url.split('.')[-1] + " [OkStream]", url])
|
||||
|
||||
43
servers/playtube.json
Normal file
43
servers/playtube.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "playtube.ws/(?:embed-|)(\\w+)",
|
||||
"url": "https://playtube.ws/embed-\\1.html"
|
||||
}
|
||||
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "playtube",
|
||||
"name": "PlayTube",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "playtube.png"
|
||||
}
|
||||
29
servers/playtube.py
Normal file
29
servers/playtube.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector playtube By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
import re
|
||||
import codecs
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
global data
|
||||
data = httptools.downloadpage(page_url)
|
||||
if data.code == 404 or "File is no longer available" in data.data:
|
||||
return False, config.get_localized_string(70449) % 'PlayTube'
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
pack = scrapertools.find_single_match(data.data, 'p,a,c,k,e,d.*?</script>')
|
||||
unpacked = jsunpack.unpack(pack)
|
||||
url = scrapertools.find_single_match(unpacked, 'file:"([^"]+)') + "|referer=%s" %(page_url)
|
||||
video_urls.append(['m3u8 [PlayTube]', url] )
|
||||
return video_urls
|
||||
@@ -4,16 +4,19 @@
|
||||
from core import httptools, scrapertools
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.debug("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "File was deleted" in data or "Video is transfer on streaming server now." in data:
|
||||
if "File was deleted" in data or "Video is transfer on streaming server now." in data \
|
||||
or 'Conversione video in corso' in data:
|
||||
return False, config.get_localized_string(70449) % "Speedvideo"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.debug("url=" + page_url)
|
||||
video_urls = []
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https?://vidmoly.net/(?:embed-)?(\\w+)\\.html",
|
||||
"pattern": "https?://vidmoly.(?:net|to)/(?:embed-)?(\\w+)\\.html",
|
||||
"url": "https://vidmoly.net/embed-\\1.html"
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"active": true,
|
||||
"active": false,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?:vupload.com|vup.to)/((?:embed-)?[a-z0-9]+)",
|
||||
"url": "https://vupload.com/\\1.html"
|
||||
"pattern": "(?:vupload.com|vup.to)/((?:embed-|e/)?[a-z0-9]+)",
|
||||
"url": "https://vupload.com/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "wstream",
|
||||
"name": "Wstream",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"free": true,
|
||||
|
||||
"find_videos": {
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://youdbox.com/embed-[A-z0-9-]+.html)",
|
||||
"url": "\\1"
|
||||
"pattern": "https://youdbox.(?:com|net)/embed-([A-z0-9-]+.html)",
|
||||
"url": "https://youdbox.net/embed-\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -37,5 +37,6 @@
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
],
|
||||
"thumbnail": "youdbox.png"
|
||||
}
|
||||
|
||||
@@ -1,14 +1,29 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# import re
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
from platformcode import logger, config
|
||||
import codecs
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
global data
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if 'File was deleted' in data:
|
||||
return False, config.get_localized_string(70449) % 'YouDbox'
|
||||
return True
|
||||
|
||||
|
||||
def get_video_url(page_url, video_password):
|
||||
logger.debug("(page_url='%s')" % page_url)
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
global data
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
|
||||
list = scrapertools.find_single_match(data, 'var [a-zA-Z0-9]+ = ([^\]]+)').replace('[', '').replace('"', '').replace('\\x', '').replace(',', ' ')
|
||||
list = list.split()[::-1]
|
||||
url =""
|
||||
for elem in list:
|
||||
decoded = codecs.decode(elem, "hex")
|
||||
url += decoded.decode("utf8")
|
||||
url = scrapertools.find_single_match(url, '<source src="([^"]+)"')
|
||||
video_urls.append(["[youdbox]", url])
|
||||
return video_urls
|
||||
|
||||
|
||||
@@ -23,13 +23,9 @@ from channelselector import get_thumb
|
||||
from platformcode import logger, config, platformtools, unify
|
||||
from core.support import typo, thumb
|
||||
import xbmcgui
|
||||
|
||||
import gc
|
||||
|
||||
import xbmc
|
||||
from threading import Thread
|
||||
from core.support import dbg
|
||||
gc.disable()
|
||||
|
||||
info_language = ["de", "en", "es", "fr", "it", "pt"] # from videolibrary.json
|
||||
def_lang = info_language[config.get_setting("info_language", "videolibrary")]
|
||||
@@ -38,7 +34,8 @@ def_lang = info_language[config.get_setting("info_language", "videolibrary")]
|
||||
def mainlist(item):
|
||||
logger.debug()
|
||||
|
||||
if config.get_setting('new_search'):
|
||||
if platformtools.get_window() not in ('WINDOW_SETTINGS_MENU', 'WINDOW_SETTINGS_INTERFACE', 'WINDOW_SKIN_SETTINGS')\
|
||||
and xbmc.getInfoLabel('System.CurrentWindow') in ('Home', '') and config.get_setting('new_search'):
|
||||
itemlist = [Item(channel='globalsearch', title=config.get_localized_string(70276), action='Search', mode='all', thumbnail=get_thumb("search.png"), folder=False),
|
||||
Item(channel='globalsearch', title=config.get_localized_string(70741) % config.get_localized_string(30122), action='Search', mode='movie', thumbnail=get_thumb("search_movie.png"),folder=False),
|
||||
Item(channel='globalsearch', title=config.get_localized_string(70741) % config.get_localized_string(30123), action='Search', mode='tvshow', thumbnail=get_thumb("search_tvshow.png"), folder=False),
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
from __future__ import division
|
||||
|
||||
# from builtins import str
|
||||
import random
|
||||
import sys
|
||||
|
||||
from channelselector import get_thumb
|
||||
@@ -199,6 +200,8 @@ def youtube_search(item):
|
||||
else:
|
||||
title = urllib.quote(title)
|
||||
title = title.replace("%20", "+")
|
||||
httptools.set_cookies({'domain': 'youtube.com', 'name': 'CONSENT',
|
||||
'value': 'YES+cb.20210328-17-p0.en+FX+' + str(random.randint(100, 999))})
|
||||
data = httptools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&search_query=" + title).data
|
||||
patron = r'thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?'
|
||||
patron += r'text":"([^"]+).*?'
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# from specials import videolibrary
|
||||
import os, sys, xbmc
|
||||
|
||||
try:
|
||||
import xbmcvfs
|
||||
xbmc.translatePath = xbmcvfs.translatePath
|
||||
xbmc.validatePath = xbmcvfs.validatePath
|
||||
xbmc.makeLegalFilename = xbmcvfs.makeLegalFilename
|
||||
except:
|
||||
pass
|
||||
|
||||
from platformcode import config, logger
|
||||
librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib'))
|
||||
sys.path.insert(0, librerias)
|
||||
|
||||
from core.item import Item
|
||||
from core import scrapertools
|
||||
from core.videolibrarydb import videolibrarydb
|
||||
from platformcode.xbmc_videolibrary import execute_sql_kodi
|
||||
from specials.videolibrary import update_videolibrary
|
||||
|
||||
|
||||
def search_id(Id):
|
||||
n, records = execute_sql_kodi('SELECT idPath FROM tvshowlinkpath WHERE idShow= {}'.format(Id))
|
||||
if records:
|
||||
n, records = execute_sql_kodi('SELECT strPath FROM path WHERE idPath= "{}"'.format(records[0][0]))
|
||||
if records:
|
||||
return scrapertools.find_single_match(records[0][0], r'\[(tt[^\]]+)')
|
||||
return ''
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
videolibrary_id = search_id(sys.listitem.getVideoInfoTag().getDbId())
|
||||
if videolibrary_id:
|
||||
tvshows_ids = list(videolibrarydb['tvshow'].keys())
|
||||
videolibrarydb.close()
|
||||
if videolibrary_id in tvshows_ids:
|
||||
item = Item(videolibrary_id=videolibrary_id)
|
||||
update_videolibrary(item)
|
||||
Reference in New Issue
Block a user