KoD 1.7.7

- fix di routine ai canali/server\n\n
This commit is contained in:
marco
2023-06-30 19:39:03 +02:00
parent c3e02636fb
commit d29efd4ec2
68 changed files with 1784 additions and 543 deletions

View File

@@ -1,28 +1,25 @@
name: Test Suite
on:
workflow_dispatch:
schedule:
- cron: '00 15 * * *'
jobs:
tests:
runs-on: ubuntu-latest
runs-on: macos-latest
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
uses: actions/setup-python@v4.3.0
with:
python-version: 3.8
python-version: 3.9
- name: Run tests
run: |
export KODI_INTERACTIVE=0
./tests/run.sh
- name: Commit & Push changes
uses: dmnemec/copy_file_to_another_repo_action@v1.0.4
uses: dmnemec/copy_file_to_another_repo_action@main
env:
API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }}
with:

View File

@@ -1,4 +1,4 @@
<addon id="plugin.video.kod" name="Kodi on Demand" version="1.7.6" provider-name="KoD Team">
<addon id="plugin.video.kod" name="Kodi on Demand" version="1.7.7" provider-name="KoD Team">
<requires>
<!-- <import addon="script.module.libtorrent" optional="true"/> -->
<import addon="metadata.themoviedb.org"/>
@@ -28,9 +28,6 @@
<screenshot>resources/media/screenshot-3.png</screenshot>
</assets>
<news>- fix di routine ai canali/server
- disabilitati cb01anime e tantifilm
- aggiunta opzione &quot;mostra server&quot; nel menu contestuale della libreria
- più opzioni per quanto riguarda l'aggiornamento della videoteca
</news>
<description lang="it">Naviga velocemente sul web e guarda i contenuti presenti</description>
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]

View File

@@ -16,7 +16,7 @@
"eurostreaming": "https://eurostreaming.credit",
"eurostreaming_actor": "https://eurostreaming.care",
"filmstreaming": "https://filmstreaming.sbs",
"guardaseriecam": "https://guardaserie.baby",
"guardaseriecam": "https://guardaserie.app",
"hd4me": "https://hd4me.net",
"ilcorsaronero": "https://ilcorsaronero.link",
"ilgeniodellostreaming_cam": "https://ilgeniodellostreaming.sbs",

View File

@@ -115,7 +115,7 @@ def peliculas(item):
action = 'seasons'
patron = r'<img src="(?P<thumb>[^"]+)(?:[^>]+>){4}\s*<a href="(?P<url>[^"]+)[^>]+>(?P<title>[^<]+)'
if (item.args == 'search' or item.contentType != 'movie') and inspect.stack(0)[4][3] not in ['get_channel_results']:
if (item.args == 'search' or item.contentType != 'movie') and not support.stackCheck(['get_channel_results']):
patronNext = None
def itemlistHook(itemlist):
lastUrl = support.match(data, patron=r'href="([^"]+)">Last').match

View File

@@ -4,14 +4,13 @@
# ------------------------------------------------------------
import cloudscraper, json, copy, inspect
from core import jsontools, support
from core import jsontools, support, httptools
from platformcode import autorenumber
session = cloudscraper.create_scraper()
# support.dbg()
host = support.config.get_channel_url()
response = session.get(host + '/archivio')
csrf_token = support.match(response.text, patron='name="csrf-token" content="([^"]+)"').match
response = httptools.downloadpage(host + '/archivio')
csrf_token = support.match(response.data, patron='name="csrf-token" content="([^"]+)"').match
headers = {'content-type': 'application/json;charset=UTF-8',
'x-csrf-token': csrf_token,
'Cookie' : '; '.join([x.name + '=' + x.value for x in response.cookies])}
@@ -153,7 +152,7 @@ def peliculas(item):
item.args['order'] = order_list[order]
payload = json.dumps(item.args)
records = session.post(host + '/archivio/get-animes', headers=headers, data=payload).json()['records']
records = httptools.downloadpage(host + '/archivio/get-animes', headers=headers, post=payload).json['records']
for it in records:
if not it['title']:
it['title'] = ''
@@ -225,8 +224,28 @@ def episodios(item):
def findvideos(item):
itemlist = [item.clone(title='StreamingCommunityWS', server='streamingcommunityws', url=str(item.scws_id))]
# itemlist = [item.clone(title='StreamingCommunityWS', server='streamingcommunityws', url=str(item.scws_id)),
# item.clone(title=support.config.get_localized_string(30137), server='directo', url=item.video_url)]
return support.server(item, itemlist=itemlist, referer=False)
if item.scws_id:
from time import time
from base64 import b64encode
from hashlib import md5
client_ip = support.httptools.downloadpage('http://ip-api.com/json/').json.get('query')
expires = int(time() + 172800)
token = b64encode(md5('{}{} Yc8U6r8KjAKAepEA'.format(expires, client_ip).encode('utf-8')).digest()).decode('utf-8').replace('=', '').replace('+', '-').replace('/', '_')
url = 'https://scws.work/master/{}?token={}&expires={}&n=1'.format(item.scws_id, token, expires)
itemlist = [item.clone(title=support.config.get_localized_string(30137), url=url, server='directo', action='play')]
return support.server(item, itemlist=itemlist)
def play(item):
urls = list()
info = support.match(item.url, patron=r'(http.*?rendition=(\d+)[^\s]+)').matches
if info:
for url, res in info:
urls.append(['hls [{}]'.format(res), url])
return urls

View File

@@ -7,6 +7,15 @@
"banner": "animeworld.png",
"categories": ["anime", "vos"],
"settings": [
{
"id": "lang",
"type": "list",
"label": "Lingua di Ricerca",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [ "Tutte", "Ita", "Sub-Ita"]
},
{
"id": "order",
"type": "list",

View File

@@ -4,7 +4,7 @@
# thanks to fatshotty
# ----------------------------------------------------------
from core import httptools, support, jsontools
from core import httptools, support, config, jsontools
host = support.config.get_channel_url()
__channel__ = 'animeworld'
@@ -23,7 +23,7 @@ def get_data(item):
# support.dbg()
url = httptools.downloadpage(item.url, headers=headers, follow_redirects=True, only_headers=True).url
data = support.match(url, headers=headers, follow_redirects=True).data
if 'AWCookieVerify' in data:
if 'SecurityAW' in data:
get_cookie(data)
data = get_data(item)
return data
@@ -37,8 +37,8 @@ def order():
@support.menu
def mainlist(item):
anime=['/filter?sort=',
('ITA',['/filter?dub=1&sort=', 'menu', '1']),
('SUB-ITA',['/filter?dub=0&sort=', 'menu', '0']),
('ITA',['/filter?dub=1&sort=', 'menu', 'dub=1']),
('SUB-ITA',['/filter?dub=0&sort=', 'menu', 'dub=0']),
('In Corso', ['/ongoing', 'peliculas','noorder']),
('Ultimi Episodi', ['/updated', 'peliculas', 'updated']),
('Nuove Aggiunte',['/newest', 'peliculas','noorder' ]),
@@ -50,6 +50,7 @@ def mainlist(item):
def genres(item):
action = 'peliculas'
data = get_data(item)
patronBlock = r'dropdown[^>]*>\s*Generi\s*<span.[^>]+>(?P<block>.*?)</ul>'
patronMenu = r'<input.*?name="(?P<name>[^"]+)" value="(?P<value>[^"]+)"\s*>[^>]+>(?P<title>[^<]+)</label>'
@@ -75,9 +76,10 @@ def menu(item):
def submenu(item):
action = 'peliculas'
data = item.other
# debug=True
patronMenu = r'<input.*?name="(?P<name>[^"]+)" value="(?P<value>[^"]+)"\s*>[^>]+>(?P<title>[^<]+)<\/label>'
def itemHook(item):
item.url = host + '/filter?' + item.name + '=' + item.value + '&dub=' + item.args + ('&sort=' if item.name != 'sort' else '')
item.url = '{}/filter?{}={}&{}{}'.format(host, item.name, item.value, item.args, ('&sort=' if item.name != 'sort' else ''))
return item
return locals()
@@ -85,9 +87,10 @@ def submenu(item):
def newest(categoria):
support.info(categoria)
item = support.Item()
lang = config.get_setting('lang', channel=item.channel)
try:
if categoria == "anime":
item.url = host + '/updated'
item.url = host
item.args = "updated"
return peliculas(item)
# Continua la ricerca in caso di errore
@@ -98,13 +101,13 @@ def newest(categoria):
return []
def search(item, texto):
support.info(texto)
def search(item, text):
support.info(text)
if item.search:
item.url = host + '/filter?dub=' + item.args + '&keyword=' + texto + '&sort='
item.url = '{}/filter?{}&keyword={}&sort='.format(host, item.args, text)
else:
item.args = 'noorder'
item.url = host + '/search?keyword=' + texto
lang = ['?', '?dub=1&', '?dub=0&'][config.get_setting('lang', channel=item.channel)]
item.url = '{}/filter{}&keyword={}&sort='.format(host, lang, text)
item.contentType = 'tvshow'
try:
return peliculas(item)
@@ -118,8 +121,8 @@ def search(item, texto):
@support.scrape
def peliculas(item):
data = get_data(item)
anime = True
# debug = True
if item.args not in ['noorder', 'updated'] and not item.url[-1].isdigit(): item.url += order() # usa l'ordinamento di configura canale
data = get_data(item)
@@ -185,7 +188,9 @@ def findvideos(item):
else:
dataJson = support.match(host + '/api/episode/info?id=' + epID + '&alt=0', headers=headers).data
json = jsontools.load(dataJson)
title = support.match(json['grabber'], patron=r'server\d+.([^.]+)', string=True).match
if title: itemlist.append(item.clone(action="play", title=title, url=json['grabber'].split('=')[-1], server='directo'))
else: urls.append(json['grabber'])
# support.info(urls)
return support.server(item, urls, itemlist)

View File

@@ -34,4 +34,4 @@
"lvalues": ["10", "20", "30", "40", "50", "60", "80", "90"]
}
]
}
}

View File

@@ -45,9 +45,9 @@ def peliculas(item):
@support.scrape
def episodios(item):
patronBlock = r'<div class="tab-pane fade" id="season-(?P<season>.)"(?P<block>.*?)</ul>\s*</div>'
patron = r'<a href="#" allowfullscreen data-link="(?P<url>[^"]+).*?title="(?P<title>[^"]+)(?P<lang>[sS][uU][bB]-?[iI][tT][aA])?\s*">(?P<episode>[^<]+)'
patron = r'(?P<data><a href="#" allowfullscreen data-link="[^"]+.*?title="(?P<title>[^"]+)(?P<lang>[sS][uU][bB]-?[iI][tT][aA])?\s*">(?P<episode>[^<]+).*?</li>)'
action = 'findvideos'
# debugBlock = True
# debug = True
return locals()
@@ -68,4 +68,4 @@ def search(item, text):
def findvideos(item):
logger.debug()
return support.server(item, item.url)
return support.server(item, item.data)

View File

@@ -57,7 +57,9 @@ def peliculas(item):
def itemHook(item):
if not sceneTitle:
item.title = item.title.replace('_', ' ')
item.fulltitle = item.fulltitle.replace('_', ' ')
item.title = support.scrapertools.decodeHtmlentities(support.urlparse.unquote(item.title))
return item
if 'search' not in item.args:

View File

@@ -6,23 +6,22 @@
from core import support, httptools
from platformcode import config
host = config.get_channel_url()
headers = [['Referer', host]]
host = config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
menu = [
('Film', ['/film/', 'list', 'film']),
('Per Genere', ['', 'list', 'genere']),
('Al Cinema', ['/cinema/', 'list', 'film']),
('Sottotitolati', ['/sub-ita/', 'list', 'film']),
('Top del Mese', ['/top-del-mese.html', 'list', 'film'])
menu = [
('Film', ['/film/', 'list', 'film']),
('Per Genere', ['', 'list', 'genere']),
('Al Cinema', ['/cinema/', 'list', 'film']),
('Sub-ITA', ['/sub-ita/', 'list', 'film']),
('Top del Mese', ['/top-del-mese.html', 'list', 'film'])
]
search = ''
return locals()
return locals()
@support.scrape
@@ -64,7 +63,7 @@ def findvideos(item):
urls = []
data = support.match(item).data
matches = support.match(data, patron=r'<iframe.*?src="([^"]+)').matches
for m in matches:
if 'youtube' not in m and not m.endswith('.js'):
urls += support.match(m, patron=r'data-link="([^"]+)').matches

View File

@@ -4,7 +4,8 @@
# ------------------------------------------------------------
import requests
from core import support
from core import support, httptools
from platformcode import logger
DRM = 'com.widevine.alpha'
key_widevine = "https://la7.prod.conax.cloud/widevine/license"
@@ -27,7 +28,7 @@ def mainlist(item):
('Replay {bold}', ['', 'replay_channels'])]
menu = [('Programmi TV {bullet bold}', ['/tutti-i-programmi', 'peliculas', '', 'tvshow']),
('Teche La7 {bullet bold}', ['/i-protagonisti', 'peliculas', '', 'tvshow'])]
('Teche La7 {bullet bold}', ['/la7teche', 'peliculas', '', 'tvshow'])]
search = ''
return locals()
@@ -83,13 +84,18 @@ def search(item, text):
@support.scrape
def peliculas(item):
search = item.search
action = 'episodios'
pagination = 20
disabletmdb = True
addVideolibrary = False
downloadEnabled = False
action = 'episodios'
patron = r'<a href="(?P<url>[^"]+)"[^>]+><div class="[^"]+" data-background-image="(?P<t>[^"]+)"></div><div class="titolo">\s*(?P<title>[^<]+)<'
if 'la7teche' in item.url:
patron = r'<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)" class="teche-i-img".*?url\(\'(?P<thumb>[^\']+)'
def itemHook(item):
item.thumbnail = 'http:' + item.t if item.t.startswith('//') else item.t if item.t else item.thumbnail
item.fanart = item.thumb
return item
return locals()
@@ -97,33 +103,18 @@ def peliculas(item):
@support.scrape
def episodios(item):
data = support.match(item).data
# debug = True
action = 'findvideos'
if '>puntate<' in data:
patronBlock = r'>puntate<(?P<block>.*?)home-block-outbrain'
url = support.match(data, patron=r'>puntate<[^>]+>[^>]+>[^>]+><a href="([^"]+)"').match
data += support.match(host + url).data
else:
item.url += '/video'
data = support.match(item).data
patron = r'(?:<a href="(?P<url>[^"]+)">[^>]+><div class="[^"]+" data-background-image="(?P<t>[^"]*)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?(?:[^>]+>){6}?)\s*(?P<title>[^<]+)<(?:[^>]+>[^>]+>[^>]+><div class="data">(?P<date>[^<]+))?|class="heading">[^>]+>(?P<Title>[^<]+).*?window.shareUrl = "(?P<Url>[^"]+)".*?poster:\s*"(?P<Thumb>[^"]+)", title: "(?P<desc>[^"]+)"'
patronNext = r'<a href="([^"]+)">'
addVideolibrary = False
downloadEnabled = False
def itemHook(item):
if item.Thumb: item.t = item.Thumb
item.thumbnail = 'http:' + item.t if item.t.startswith('//') else item.t if item.t else item.thumbnail
if item.Title: item.title = support.typo(item.Title, 'bold')
if item.date:
item.title = support.re.sub(r'[Pp]untata (?:del )?\d+/\d+/\d+', '', item.title)
item.title += support.typo(item.date, '_ [] bold')
if item.desc: item.plot = item.desc
item.forcethumb = True
item.fanart = item.thumbnail
return item
if 'la7teche' in item.url:
patron = r'<a href="(?P<url>[^"]+)">\s*<div class="holder-bg">.*?data-background-image="(?P<thumb>[^"]+)(?:[^>]+>){4}\s*(?P<title>[^<]+)(?:(?:[^>]+>){2}\s*(?P<plot>[^<]+))?'
else:
data = str(support.match(item.url, patron=r'"home-block home-block--oggi(.*?)</section>').matches)
data += httptools.downloadpage(item.url + '/video').data
patron = r'item[^>]+>\s*<a href="(?P<url>[^"]+)">.*?image="(?P<thumb>[^"]+)(?:[^>]+>){4,5}\s*(?P<title>[\d\w][^<]+)(?:(?:[^>]+>){7}\s*(?P<title2>[\d\w][^<]+))?'
patronNext = r'<a href="([^"]+)">'
return locals()

View File

@@ -9,37 +9,33 @@ host = 'https://metalvideo.com'
headers = {'X-Requested-With': 'XMLHttpRequest'}
@support.scrape
@support.menu
def mainlist(item):
menu = [('Generi',['', 'genres']),
('Ultimi Video',['/videos/latest', 'peliculas']),
('Top Video',['/videos/top', 'peliculas']),
('Cerca...',['','search',])]
return locals()
@support.scrape
def genres(item):
item.url = host
action = 'peliculas'
patronBlock = r'<ul class="dropdown-menu(?P<block>.*?)</ul>\s*</div'
patron = r'<a href="(?P<url>[^"]+)"(?: class="")?>(?P<title>[^<]+)<'
patronBlock = r'<div class="swiper-slide">(?P<block>.*?)<button'
patron = r'class="" href="(?P<url>[^"]+)[^>]+>(?P<title>[^<]+)<'
def itemHook(item):
item.thumbnail = support.thumb('music')
item.contentType = 'music'
return item
def itemlistHook(itemlist):
itemlist.pop(0)
itemlist.append(
support.Item(
channel=item.channel,
title=support.typo('Cerca...', 'bold'),
contentType='music',
url=item.url,
action='search',
thumbnail=support.thumb('search')))
support.channel_config(item, itemlist)
return itemlist
return locals()
@support.scrape
def peliculas(item):
# debug=True
action = 'findvideos'
patron= r'<img src="[^"]+" alt="(?P<title>[^"]+)" data-echo="(?P<thumb>[^"]+)"(?:[^>]+>){7}<a href="(?P<url>[^"]+)"'
patronNext = r'<a href="([^"]+)">(?:&raquo|»)'
patron= r'<a href="(?P<url>[^"]+)"[^>]+>\s*<img src="(?P<thumb>[^"]+)" alt="(?P<title>[^"]+)"[^>]*>'
patronNext = r'<a href="([^"]+)" data-load="[^"]+" class="[^"]+" title="Next'
typeContentDict = {'': 'music'}
def itemHook(item):
item.contentType = 'music'
@@ -49,12 +45,13 @@ def peliculas(item):
def findvideos(item):
return support.server(item, Videolibrary=False)
data = support.match(item, patron=r'<source src="[^"]+').match
return support.server(item, Videolibrary=False, data=data)
def search(item, text):
support.info(text)
item.url = host + '/search.php?keywords=' + text + '&video-id='
item.url = host + '/search?keyword=' + text
try:
return peliculas(item)
# Continua la ricerca in caso di errore

View File

@@ -22,16 +22,17 @@ def mainlist(item):
top = [('Dirette {bold}', ['/dirette', 'live', '/palinsesto/onAir.json']),
('Replay {bold}', ['/guidatv', 'replayMenu', '/guidatv.json'])]
menu = [('Film {bullet bold}', ['/film', 'menu', '/tipologia/film/index.json']),
('Serie italiane {bullet bold}', ['/serietv', 'menu', '/tipologia/serieitaliane/index.json']),
# ('Fiction {bullet bold}', ['/fiction', 'menu', '/tipologia/fiction/index.json']),
('Documentari {bullet bold}', ['/documentari', 'menu', '/tipologia/documentari/index.json']),
('Programmi TV{bullet bold}', ['/programmi', 'menu', '/tipologia/programmi/index.json']),
('Programmi per Bambini {bullet bold}', ['/bambini', 'menu', '/tipologia/bambini/index.json']),
('Teen {bullet bold}', ['/teen', 'menu', '/tipologia/teen/index.json']),
('Learning {bullet bold}', ['/learning', 'menu', '/tipologia/learning/index.json']),
('Teche Rai {bullet bold storia}', ['/techerai', 'menu', '/tipologia/techerai/index.json']),
('Musica e Teatro {bullet bold}', ['/musica-e-teatro', 'menu', '/tipologia/musica-e-teatro/index.json'])
menu = [('Film {bold}', ['/film', 'menu', '/tipologia/film/index.json']),
('Serie italiane {bold}', ['/serieitaliane', 'menu', '/tipologia/serieitaliane/index.json']),
('Serie Internazionali {bold}', ['/serieinternazionali', 'menu', '/tipologia/serieinternazionali/index.json']),
('Programmi TV{bold}', ['/programmi', 'menu', '/tipologia/programmi/index.json']),
('Documentari {bold}', ['/documentari', 'menu', '/tipologia/documentari/index.json']),
('Bambini {bold}', ['/bambini', 'menu', '/tipologia/bambini/index.json']),
('Teen {bold}', ['/teen', 'menu', '/tipologia/teen/index.json']),
('Musica e Teatro {bold}', ['/musica-e-teatro', 'menu', '/tipologia/musica-e-teatro/index.json']),
('Teche Rai {bold storia}', ['/techerai', 'menu', '/tipologia/techerai/index.json']),
('Learning {bold}', ['/learning', 'menu', '/tipologia/learning/index.json']),
('Rai Italy{bold tv}', ['/raiitaly', 'menu', '/tipologia/raiitaly/index.json'])
]
search = ''
@@ -41,6 +42,7 @@ def mainlist(item):
def menu(item):
logger.debug()
itemlist = []
item.disable_videolibrary = True
action = 'peliculas'
@@ -60,7 +62,6 @@ def menu(item):
action = 'menu'
thumb = support.thumb('genres')
itemlist.append(item.clone(title=support.typo(it['name'], 'bold'), data=it.get('contents', item.data), thumbnail=thumb, action=action))
return itemlist
@@ -256,7 +257,7 @@ def getUrl(url):
elif url.startswith("/"): url = host + url
url = url.replace(".html?json", ".json").replace("/?json",".json").replace("?json",".json").replace(" ", "%20")
logger.debug('URL', url)
return url

View File

@@ -223,7 +223,7 @@ def episodios(item):
action='findvideos',
contentType='episode',
contentSerieName=item.fulltitle,
url='{}/watch/{}?e={}'.format(host, se['title_id'], ep['id'])))
url='{}/iframe/{}?episode_id={}'.format(host, se['title_id'], ep['id'])))
if config.get_setting('episode_info') and not support.stackCheck(['add_tvshow', 'get_newest']):
support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -235,10 +235,8 @@ def episodios(item):
def findvideos(item):
support.callAds('https://thaudray.com/5/3523301', host)
# Fix for old items in videolibrary
if item.episodeid and item.episodeid not in item.url:
item.url += item.episodeid
itemlist = [item.clone(title=channeltools.get_channel_parameters(item.channel)['title'], url=item.url, server='streamingcommunityws')]
itemlist = [item.clone(title=channeltools.get_channel_parameters(item.channel)['title'],
url=item.url.replace('/watch/', '/iframe/'), server='streamingcommunityws')]
return support.server(item, itemlist=itemlist, referer=False)

View File

@@ -3,9 +3,8 @@
# Canale per Tantifilm
# ------------------------------------------------------------
from core import scrapertools, httptools, support
from core import support
from core.item import Item
from core.support import info
from platformcode import logger
from platformcode import config

View File

@@ -13,12 +13,10 @@ headers = [['Referer', host]]
@support.menu
def mainlist(item):
# top = [('Novità',['', 'peliculas', 'new', 'tvshow']),
# ('Aggiornamenti', ['', 'peliculas', 'last', 'tvshow'])]
# tvshow = ['/category/serie-tv/']
anime =['/category/anime/']
# ('Sub-Ita',['/category/anime-sub-ita/', 'peliculas', 'sub']),
# ('Film Animati',['/category/film-animazione/','peliculas', '', 'movie'])]
anime =['/category/anime',
('ITA',['/lista-anime-ita','peliculas',]),
('Sub-ITA',['/lista-anime-sub-ita', 'peliculas'])]
# ('Film Animati',['/lista-anime-ita','peliculas', '', 'movie'])]
search = ''
return locals()
@@ -39,52 +37,28 @@ def search(item, text):
return []
def newest(categoria):
support.info(categoria)
item = support.Item()
try:
item.contentType = 'undefined'
item.url= host
item.args= 'new'
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
@support.scrape
def peliculas(item):
# debugBlock = True
# debug = True
# search = item.text
if item.contentType != 'movie': anime = True
anime = True
action = 'check'
blacklist = ['-Film Animazione disponibili in attesa di recensione ']
if item.action == 'search':
pagination = ''
#patronBlock = '"lcp_catlist"[^>]+>(?P<block>.*)</ul>'
patronBlock = '<main[^>]+>(?P<block>.*?)</ma'
#patron = r'href="(?P<url>[^"]+)" title="(?P<title>[^"]+)"'
patron = r'<a href="(?P<url>[^"]+)"[^>]*>(?P<title>[^<]+)<[^>]+>[^>]+>\s*<div'
elif item.args == 'last':
patronBlock = '(?:Aggiornamenti|Update)</h2>(?P<block>.*?)</ul>'
patron = r'<a href="(?P<url>[^"]+)">\s*<img[^>]+src[set]{0,3}="(?P<thumbnail>[^ ]+)[^>]+>\s*<span[^>]+>(?P<title>[^<]+)'
deflang = 'ITA' if 'sub' not in item.url else 'Sub-ITA'
if 'lista' in item.url:
pagination = 20
patron = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<]+)'
else:
patronBlock = '<main[^>]+>(?P<block>.*)</main>'
# patron = r'<a href="(?P<url>[^"]+)" rel="bookmark">(?P<title>[^<]+)</a>[^>]+>[^>]+>[^>]+><img.*?src="(?P<thumb>[^"]+)".*?<p>(?P<plot>[^<]+)</p>.*?<span class="cat-links">Pubblicato in.*?.*?(?P<type>(?:[Ff]ilm|</artic))[^>]+>'
patron = r'<a href="(?P<url>[^"]+)" rel="bookmark">(?P<title>[^<]+)</a>(:?[^>]+>){3}(?:<img.*?src="(?P<thumb>[^"]+)")?.*?<p>(?P<plot>[^<]+)</p>.*?tag">.*?(?P<type>(?:[Ff]ilm|</art|Serie Tv))'
patron = r'(?i)<a href="(?P<url>[^"]+)" rel="bookmark">(?P<title>[^<]+)</a>(:?[^>]+>){3}(?:<img.*?src="(?P<thumb>[^"]+)")?.*?<p>(?P<plot>[^<]+)</p>.*?tag">.*?(?P<type>(?:film|serie|anime))(?P<cat>.*?)</span>'
typeContentDict={'movie':['film']}
typeActionDict={'findvideos':['film']}
patronNext = '<a class="next page-numbers" href="([^"]+)">'
def itemHook(item):
support.info(item.title)
if item.args == 'sub':
item.title += support.typo('Sub-ITA', 'bold color kod _ []')
if 'sub/ita' in item.cat.lower():
item.title = item.title.replace('[ITA]', '[Sub-ITA]')
item.contentLanguage = 'Sub-ITA'
return item
return locals()
@@ -101,18 +75,11 @@ def check(item):
def episodios(item):
anime = True
patron = r'>\s*(?:(?P<season>\d+)(?:&#215;|x|×))?(?P<episode>\d+)(?:\s+&#8211;\s+)?[ ]+(?P<title2>[^<]+)[ ]+<a (?P<data>.*?)(?:<br|</p)'
# if inspect.stack(0)[1][3] not in ['find_episodes']:
# from platformcode import autorenumber
# autorenumber.start(itemlist, item)
return locals()
def findvideos(item):
servers = support.server(item, data=item.data)
return servers
# return support.server(item, item.data if item.contentType != 'movie' else support.match(item.url, headers=headers).data )
return support.server(item, data=item.data)
def clean_title(title):

View File

@@ -3,93 +3,137 @@
# Canale per tunein
# ------------------------------------------------------------
from core import scrapertools, support
from core import httptools, support
from platformcode import logger
host = 'http://api.radiotime.com'
headers = [['Referer', host]]
args = 'formats=mp3,aac,ogg,flash,html,hls,wma&partnerId=RadioTime&itemToken='
@support.scrape
@support.menu
def mainlist(item):
item.url = host
action = 'radio'
patron = r'text="(?P<title>[^"]+)" URL="(?P<url>[^"]+)"'
def itemHook(item):
item.thumbnail = support.thumb('music')
item.contentType = 'music'
return item
def itemlistHook(itemlist):
itemlist.append(
item.clone(title=support.typo('Cerca...', 'bold color kod'), action='search', thumbnail=support.thumb('search')))
support.channel_config(item, itemlist)
return itemlist
menu = [('Musica {bullet music}' ,['/categories/music?{}'.format(args), 'radio', '', 'music']),
('Sport {bullet music}' ,['/categories/sports?{}'.format(args), 'radio', '', 'music']),
('Notizie e Dibattiti {bullet music}' ,['/categories/c57922?{}'.format(args), 'radio', '' 'music']),
('Podcast {bullet music}' ,['/categories/c100000088?{}'.format(args), 'radio', '', 'music']),
('Audiolibri {bullet music}' ,['/categories/c100006408?{}'.format(args), 'radio', '', 'music']),
('Luogo {bullet music}' ,['/categories/regions?{}'.format(args), 'radio', '', 'music']),
('Lingua {bullet music}' ,['/categories/languages?{}'.format(args), 'radio', '', 'music'])]
search =''
return locals()
def radio(item):
support.info()
itemlist = []
data = support.match(item, patron= r'text="(?P<title>[^\("]+)(?:\((?P<location>[^\)]+)\))?" URL="(?P<url>[^"]+)" bitrate="(?P<quality>[^"]+)" reliability="[^"]+" guide_id="[^"]+" subtext="(?P<song>[^"]+)" genre_id="[^"]+" formats="(?P<type>[^"]+)" (?:playing="[^"]+" )?(?:playing_image="[^"]+" )?(?:show_id="[^"]+" )?(?:item="[^"]+" )?image="(?P<thumb>[^"]+)"')
if data.matches:
for title, location, url, quality, song, type, thumbnail in data.matches:
title = scrapertools.decodeHtmlentities(title)
itemlist.append(
item.clone(title = support.typo(title, 'bold') + support.typo(quality + ' kbps','_ [] bold color kod'),
thumbnail = thumbnail,
url = url,
contentType = 'music',
plot = support.typo(location, 'bold') + '\n' + song,
action = 'findvideos'))
else:
matches = support.match(data.data, patron= r'text="(?P<title>[^\("]+)(?:\([^\)]+\))?" URL="(?P<url>[^"]+)" (?:guide_id="[^"]+" )?(?:stream_type="[^"]+" )?topic_duration="(?P<duration>[^"]+)" subtext="(?P<plot>[^"]+)" item="[^"]+" image="(?P<thumb>[^"]+)"').matches
if matches:
for title, url, duration, plot, thumbnail in matches:
title = scrapertools.unescape(title)
infoLabels={}
infoLabels['duration'] = duration
itemlist.append(
item.clone(title = support.typo(title, 'bold'),
thumbnail = thumbnail,
infolLbels = infoLabels,
url = url,
contentType = 'music',
plot = plot,
action = 'findvideos'))
else:
matches = support.match(data.data, patron= r'text="(?P<title>[^"]+)" URL="(?P<url>[^"]+)"').matches
for title, url in matches:
title = scrapertools.unescape(title)
itemlist.append(
item.clone(channel = item.channel,
title = support.typo(title, 'bold'),
thumbnail = item.thumbnail,
url = url,
action = 'radio'))
support.nextPage(itemlist, item, data.data, r'(?P<url>[^"]+)" key="nextStations')
return itemlist
def findvideos(item):
import xbmc
itemlist = []
item.action = 'play'
urls = support.match(item.url).data.strip().split()
for url in urls:
item.url= url
item.server = 'directo'
itemlist.append(item)
return itemlist
def search(item, text):
support.info(text)
item.url = host + '/Search.ashx?query=' +text
itemlist = list()
try:
return radio(item)
js = httptools.downloadpage('{}/profiles?fullTextSearch=true&query={}&{}'.format(host, text, args)).json
data = js.get('Items', {})
for c in data:
if c.get('Pivots',{}).get('More',{}).get('Url', ''):
data = httptools.downloadpage(c.get('Pivots',{}).get('More',{}).get('Url', '')).json.get('Items',{})
else:
data = c.get('Children')
if data:
itemlist.extend(buildItemList(item, data))
if js.get('Paging', {}).get('Next'):
support.nextPage(itemlist, item, next_page=js.get('Paging', {}).get('Next'))
return itemlist
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
logger.error(line)
return []
def radio(item):
itemlist = list()
js = dict()
if item.data:
data = item.data
else:
js = httptools.downloadpage(item.url).json
data = js.get('Items', {})
itemlist = buildItemList(item, data)
if js.get('Paging', {}).get('Next'):
support.nextPage(itemlist, item, next_page=js.get('Paging', {}).get('Next'))
return itemlist
def buildItemList(item, data):
itemlist = list()
# support.dbg()
for c in data:
item.data = ''
item.action = 'radio'
token = c.get('Context',{}).get('Token','')
if not token:
token = c.get('Actions', {}).get('Context',{}).get('Token','')
if not c.get('Title', c.get('AccessibilityTitle')) or 'premium' in c.get('Title', c.get('AccessibilityTitle')).lower():
continue
if c.get('Children'):
if len(data) > 1:
if c.get('Pivots',{}).get('More',{}).get('Url', ''):
itm = item.clone(title=c.get('Title', c.get('AccessibilityTitle')),
url=c.get('Pivots',{}).get('More',{}).get('Url', ''),
token=token)
else:
itm = item.clone(title=c.get('Title', c.get('AccessibilityTitle')),
data=c.get('Children'),
token=token)
else:
if c.get('Pivots',{}).get('More',{}).get('Url', ''):
data = httptools.downloadpage(c.get('Pivots',{}).get('More',{}).get('Url', '')).json.get('Items', {})
else:
data = c.get('Children')
return buildItemList(item, data)
elif c.get('GuideId'):
title = c.get('Title', c.get('AccessibilityTitle'))
plot = '[B]{}[/B]\n{}'.format(c.get('Subtitle', ''), c.get('Description', ''))
thumbnail = c.get('Image', '')
if c.get('GuideId').startswith('s'):
itm = item.clone(title=title,
plot=plot,
thumbnail=thumbnail,
url = 'http://opml.radiotime.com/Tune.ashx?render=json&id={}&{}{}'.format(c.get('GuideId'), args, token),
action = 'findvideos')
else:
itm = item.clone(title=title,
plot=plot,
thumbnail=thumbnail,
url = c.get('Actions', {}).get('Browse',{}).get('Url',''))
elif c.get('Actions', {}).get('Browse',{}).get('Url',''):
title = c.get('Title', c.get('AccessibilityTitle'))
itm = item.clone(title = title,
url = c.get('Actions', {}).get('Browse',{}).get('Url',''))
itemlist.append(itm)
return itemlist
def findvideos(item):
item.action = 'play'
js = httptools.downloadpage(item.url, cloudscraper=True).json.get('body', {})
video_urls = list()
for it in js:
video_urls.append(['m3u8 [{}]'.format(it.get('bitrate')), it.get('url')])
item.referer = False
item.server = 'directo'
item.video_urls = video_urls
return [item]

View File

@@ -287,10 +287,11 @@ def downloadpage(url, **opt):
# Headers passed as parameters
if opt.get('headers', None) is not None:
opt['headers'] = dict(opt['headers'])
if not opt.get('replace_headers', False):
req_headers.update(dict(opt['headers']))
req_headers.update(opt['headers'])
else:
req_headers = dict(opt['headers'])
req_headers = opt['headers']
if domain in directIP.keys() and not opt.get('disable_directIP', False):
req_headers['Host'] = domain
@@ -429,8 +430,9 @@ def downloadpage(url, **opt):
else:
logger.debug("CF retry with proxy for domain: %s" % domain)
if not opt.get('headers'):
opt['headers'] = []
opt['headers'].extend([['Px-Host', domain], ['Px-Token', cf_proxy['token']]])
opt['headers'] = {}
opt['headers']['Px-Host'] = domain
opt['headers']['Px-Token'] = cf_proxy['token']
opt['real-url'] = url
ret = downloadpage(urlparse.urlunparse((parse.scheme, cf_proxy['url'], parse.path, parse.params, parse.query, parse.fragment)), **opt)
ret.url = url

View File

@@ -621,7 +621,7 @@ def scrape(func):
else: autorenumber.start(itemlist)
if itemlist and action != 'play' and 'patronMenu' not in args and 'patronGenreMenu' not in args \
and not stackCheck(['add_tvshow', 'get_newest']) and (function not in ['episodios', 'mainlist']
and not stackCheck(['add_tvshow', 'get_newest']) and not disabletmdb and (function not in ['episodios', 'mainlist']
or (function in ['episodios', 'seasons'] and config.get_setting('episode_info') and itemlist[0].season)):
# dbg()
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

View File

@@ -120,7 +120,7 @@ def update_title(item):
if item.from_title_tmdb: del item.from_title_tmdb
if not item.from_update and item.from_title: del item.from_title
if item.contentSerieName: # We copy the title to serve as a reference in the "Complete Information" menu
if item.contentSerieName and item.contentType == 'tvshow': # We copy the title to serve as a reference in the "Complete Information" menu
item.infoLabels['originaltitle'] = item.contentSerieName
item.contentTitle = item.contentSerieName
else:

View File

@@ -126,7 +126,7 @@ def HJs(val):
except Exception as e:
message = 'your Python function failed! '
try:
message += e.message
message += str(e)
except:
pass
raise MakeError('Error', message)
@@ -319,7 +319,7 @@ class PyJs(object):
#prop = prop.value
if self.Class == 'Undefined' or self.Class == 'Null':
raise MakeError('TypeError',
'Undefined and null dont have properties!')
'Undefined and null dont have properties (tried getting property %s)' % repr(prop))
if not isinstance(prop, basestring):
prop = prop.to_string().value
if not isinstance(prop, basestring): raise RuntimeError('Bug')
@@ -361,7 +361,7 @@ class PyJs(object):
* / % + - << >> & ^ |'''
if self.Class == 'Undefined' or self.Class == 'Null':
raise MakeError('TypeError',
'Undefined and null dont have properties!')
'Undefined and null don\'t have properties (tried setting property %s)' % repr(prop))
if not isinstance(prop, basestring):
prop = prop.to_string().value
if NUMPY_AVAILABLE and prop.isdigit():
@@ -991,7 +991,8 @@ class PyJs(object):
cand = self.get(prop)
if not cand.is_callable():
raise MakeError('TypeError',
'%s is not a function' % cand.typeof())
'%s is not a function (tried calling property %s of %s)' % (
cand.typeof(), repr(prop), repr(self.Class)))
return cand.call(self, args)
def to_python(self):
@@ -1304,7 +1305,7 @@ class PyObjectWrapper(PyJs):
except Exception as e:
message = 'your Python function failed! '
try:
message += e.message
message += str(e)
except:
pass
raise MakeError('Error', message)
@@ -1464,9 +1465,11 @@ class PyJsFunction(PyJs):
except NotImplementedError:
raise
except RuntimeError as e: # maximum recursion
raise MakeError(
'RangeError', e.message if
not isinstance(e, NotImplementedError) else 'Not implemented!')
try:
msg = e.message
except:
msg = repr(e)
raise MakeError('RangeError', msg)
def has_instance(self, other):
# I am not sure here so instanceof may not work lol.

View File

@@ -32,8 +32,7 @@ def UTC(year, month, date, hours, minutes, seconds, ms): # todo complete this
mili = args[6].to_number() if l > 6 else Js(0)
if not y.is_nan() and 0 <= y.value <= 99:
y = y + Js(1900)
t = TimeClip(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili)))
return PyJsDate(t, prototype=DatePrototype)
return TimeClip(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili)))
@Js
@@ -76,11 +75,12 @@ class PyJsDate(PyJs):
# todo fix this problematic datetime part
def to_local_dt(self):
return datetime.datetime.utcfromtimestamp(
UTCToLocal(self.value) // 1000)
return datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=UTCToLocal(self.value) // 1000)
def to_utc_dt(self):
return datetime.datetime.utcfromtimestamp(self.value // 1000)
return datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=self.value // 1000)
def local_strftime(self, pattern):
if self.value is NaN:
@@ -118,21 +118,40 @@ class PyJsDate(PyJs):
def parse_date(py_string): # todo support all date string formats
try:
date_formats = (
"%Y-%m-%d",
"%m/%d/%Y",
"%b %d %Y",
)
# Supports these hour formats and with or hour.
hour_formats = (
"T%H:%M:%S.%f",
"T%H:%M:%S",
) + ('',)
# Supports with or without Z indicator.
z_formats = ("Z",) + ('',)
supported_formats = [
d + t + z
for d in date_formats
for t in hour_formats
for z in z_formats
]
for date_format in supported_formats:
try:
dt = datetime.datetime.strptime(py_string, "%Y-%m-%dT%H:%M:%S.%fZ")
except:
dt = datetime.datetime.strptime(py_string, "%Y-%m-%dT%H:%M:%SZ")
return MakeDate(
MakeDay(Js(dt.year), Js(dt.month - 1), Js(dt.day)),
MakeTime(
Js(dt.hour), Js(dt.minute), Js(dt.second),
Js(dt.microsecond // 1000)))
except:
raise MakeError(
'TypeError',
'Could not parse date %s - unsupported date format. Currently only supported format is RFC3339 utc. Sorry!'
% py_string)
dt = datetime.datetime.strptime(py_string, date_format)
except ValueError:
continue
else:
return MakeDate(
MakeDay(Js(dt.year), Js(dt.month - 1), Js(dt.day)),
MakeTime(
Js(dt.hour), Js(dt.minute), Js(dt.second),
Js(dt.microsecond // 1000)))
raise MakeError(
'TypeError',
'Could not parse date %s - unsupported date format. Currently only supported formats are RFC3339 utc, ISO Date, Short Date, and Long Date. Sorry!'
% py_string)
def date_constructor(*args):
@@ -332,7 +351,7 @@ class DateProto:
check_date(this)
t = UTCToLocal(this.value)
tim = MakeTime(
HourFromTime(t), MinFromTime(t), SecFromTime(t), ms.to_int())
Js(HourFromTime(t)), Js(MinFromTime(t)), Js(SecFromTime(t)), ms)
u = TimeClip(LocalToUTC(MakeDate(Day(t), tim)))
this.value = u
return u
@@ -341,12 +360,164 @@ class DateProto:
check_date(this)
t = this.value
tim = MakeTime(
HourFromTime(t), MinFromTime(t), SecFromTime(t), ms.to_int())
Js(HourFromTime(t)), Js(MinFromTime(t)), Js(SecFromTime(t)), ms)
u = TimeClip(MakeDate(Day(t), tim))
this.value = u
return u
# todo Complete all setters!
def setSeconds(sec, ms=None):
check_date(this)
t = UTCToLocal(this.value)
s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(
Day(t), MakeTime(Js(HourFromTime(t)), Js(MinFromTime(t)), s, milli))
u = TimeClip(LocalToUTC(date))
this.value = u
return u
def setUTCSeconds(sec, ms=None):
check_date(this)
t = this.value
s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(
Day(t), MakeTime(Js(HourFromTime(t)), Js(MinFromTime(t)), s, milli))
v = TimeClip(date)
this.value = v
return v
def setMinutes(min, sec=None, ms=None):
check_date(this)
t = UTCToLocal(this.value)
m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(Js(HourFromTime(t)), m, s, milli))
u = TimeClip(LocalToUTC(date))
this.value = u
return u
def setUTCMinutes(min, sec=None, ms=None):
check_date(this)
t = this.value
m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(Js(HourFromTime(t)), m, s, milli))
v = TimeClip(date)
this.value = v
return v
def setHours(hour, min=None, sec=None, ms=None):
check_date(this)
t = UTCToLocal(this.value)
h = hour.to_number()
if not min is None: m = Js(MinFromTime(t))
else: m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(h, m, s, milli))
u = TimeClip(LocalToUTC(date))
this.value = u
return u
def setUTCHours(hour, min=None, sec=None, ms=None):
check_date(this)
t = this.value
h = hour.to_number()
if not min is None: m = Js(MinFromTime(t))
else: m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(h, m, s, milli))
v = TimeClip(date)
this.value = v
return v
def setDate(date):
check_date(this)
t = UTCToLocal(this.value)
dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), Js(MonthFromTime(t)), dt), TimeWithinDay(t))
u = TimeClip(LocalToUTC(newDate))
this.value = u
return u
def setUTCDate(date):
check_date(this)
t = this.value
dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), Js(MonthFromTime(t)), dt), TimeWithinDay(t))
v = TimeClip(newDate)
this.value = v
return v
def setMonth(month, date=None):
check_date(this)
t = UTCToLocal(this.value)
m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), m, dt), TimeWithinDay(t))
u = TimeClip(LocalToUTC(newDate))
this.value = u
return u
def setUTCMonth(month, date=None):
check_date(this)
t = this.value
m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), m, dt), TimeWithinDay(t))
v = TimeClip(newDate)
this.value = v
return v
def setFullYear(year, month=None, date=None):
check_date(this)
if not this.value is NaN: t = UTCToLocal(this.value)
else: t = 0
y = year.to_number()
if not month is None: m = Js(MonthFromTime(t))
else: m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(y, m, dt), TimeWithinDay(t))
u = TimeClip(LocalToUTC(newDate))
this.value = u
return u
def setUTCFullYear(year, month=None, date=None):
check_date(this)
if not this.value is NaN: t = UTCToLocal(this.value)
else: t = 0
y = year.to_number()
if not month is None: m = Js(MonthFromTime(t))
else: m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(y, m, dt), TimeWithinDay(t))
v = TimeClip(newDate)
this.value = v
return v
def toUTCString():
check_date(this)

View File

@@ -36,8 +36,8 @@ def DaylightSavingTA(t):
return t
try:
return int(
LOCAL_ZONE.dst(datetime.datetime.utcfromtimestamp(
t // 1000)).seconds) * 1000
LOCAL_ZONE.dst(datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=t // 1000)).seconds) * 1000
except:
warnings.warn(
'Invalid datetime date, assumed DST time, may be inaccurate...',

View File

@@ -53,7 +53,7 @@ def write_file_contents(path_or_file, contents):
if hasattr(path_or_file, 'write'):
path_or_file.write(contents)
else:
with open(path_as_local(path_or_file), 'w') as f:
with codecs.open(path_as_local(path_or_file), "w", "utf-8") as f:
f.write(contents)
@@ -238,6 +238,10 @@ class EvalJs(object):
self.execute_debug(code)
return self['PyJsEvalResult']
@property
def context(self):
return self._context
def __getattr__(self, var):
return getattr(self._var, var)
@@ -268,14 +272,3 @@ class EvalJs(object):
else:
sys.stderr.write('EXCEPTION: ' + str(e) + '\n')
time.sleep(0.01)
#print x
if __name__ == '__main__':
#with open('C:\Users\Piotrek\Desktop\esprima.js', 'rb') as f:
# x = f.read()
e = EvalJs()
e.execute('square(x)')
#e.execute(x)
e.console()

View File

@@ -6,7 +6,7 @@ def console():
@Js
def log():
print(arguments[0])
print(" ".join(repr(element) for element in arguments.to_list()))
console.put('log', log)
console.put('debug', log)

View File

@@ -0,0 +1 @@
from .seval import eval_js_vm

View File

@@ -602,11 +602,12 @@ class PyJsDate(PyJs):
# todo fix this problematic datetime part
def to_local_dt(self):
return datetime.datetime.utcfromtimestamp(
self.UTCToLocal(self.value) // 1000)
return datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=self.UTCToLocal(self.value) // 1000)
def to_utc_dt(self):
return datetime.datetime.utcfromtimestamp(self.value // 1000)
return datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=self.value // 1000)
def local_strftime(self, pattern):
if self.value is NaN:

View File

@@ -32,8 +32,7 @@ def UTC(year, month, date, hours, minutes, seconds, ms): # todo complete this
mili = args[6].to_number() if l > 6 else Js(0)
if not y.is_nan() and 0 <= y.value <= 99:
y = y + Js(1900)
t = TimeClip(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili)))
return PyJsDate(t, prototype=DatePrototype)
return TimeClip(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili)))
@Js
@@ -76,11 +75,12 @@ class PyJsDate(PyJs):
# todo fix this problematic datetime part
def to_local_dt(self):
return datetime.datetime.utcfromtimestamp(
UTCToLocal(self.value) // 1000)
return datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=UTCToLocal(self.value) // 1000)
def to_utc_dt(self):
return datetime.datetime.utcfromtimestamp(self.value // 1000)
return datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=self.value // 1000)
def local_strftime(self, pattern):
if self.value is NaN:
@@ -332,7 +332,7 @@ class DateProto:
check_date(this)
t = UTCToLocal(this.value)
tim = MakeTime(
HourFromTime(t), MinFromTime(t), SecFromTime(t), ms.to_int())
Js(HourFromTime(t)), Js(MinFromTime(t)), Js(SecFromTime(t)), ms)
u = TimeClip(LocalToUTC(MakeDate(Day(t), tim)))
this.value = u
return u
@@ -341,12 +341,164 @@ class DateProto:
check_date(this)
t = this.value
tim = MakeTime(
HourFromTime(t), MinFromTime(t), SecFromTime(t), ms.to_int())
Js(HourFromTime(t)), Js(MinFromTime(t)), Js(SecFromTime(t)), ms)
u = TimeClip(MakeDate(Day(t), tim))
this.value = u
return u
# todo Complete all setters!
def setSeconds(sec, ms=None):
check_date(this)
t = UTCToLocal(this.value)
s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(
Day(t), MakeTime(Js(HourFromTime(t)), Js(MinFromTime(t)), s, milli))
u = TimeClip(LocalToUTC(date))
this.value = u
return u
def setUTCSeconds(sec, ms=None):
check_date(this)
t = this.value
s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(
Day(t), MakeTime(Js(HourFromTime(t)), Js(MinFromTime(t)), s, milli))
v = TimeClip(date)
this.value = v
return v
def setMinutes(min, sec=None, ms=None):
check_date(this)
t = UTCToLocal(this.value)
m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(Js(HourFromTime(t)), m, s, milli))
u = TimeClip(LocalToUTC(date))
this.value = u
return u
def setUTCMinutes(min, sec=None, ms=None):
check_date(this)
t = this.value
m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(Js(HourFromTime(t)), m, s, milli))
v = TimeClip(date)
this.value = v
return v
def setHours(hour, min=None, sec=None, ms=None):
check_date(this)
t = UTCToLocal(this.value)
h = hour.to_number()
if not min is None: m = Js(MinFromTime(t))
else: m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(h, m, s, milli))
u = TimeClip(LocalToUTC(date))
this.value = u
return u
def setUTCHours(hour, min=None, sec=None, ms=None):
check_date(this)
t = this.value
h = hour.to_number()
if not min is None: m = Js(MinFromTime(t))
else: m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(h, m, s, milli))
v = TimeClip(date)
this.value = v
return v
def setDate(date):
check_date(this)
t = UTCToLocal(this.value)
dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), Js(MonthFromTime(t)), dt), TimeWithinDay(t))
u = TimeClip(LocalToUTC(newDate))
this.value = u
return u
def setUTCDate(date):
check_date(this)
t = this.value
dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), Js(MonthFromTime(t)), dt), TimeWithinDay(t))
v = TimeClip(newDate)
this.value = v
return v
def setMonth(month, date=None):
check_date(this)
t = UTCToLocal(this.value)
m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), m, dt), TimeWithinDay(t))
u = TimeClip(LocalToUTC(newDate))
this.value = u
return u
def setUTCMonth(month, date=None):
check_date(this)
t = this.value
m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), m, dt), TimeWithinDay(t))
v = TimeClip(newDate)
this.value = v
return v
def setFullYear(year, month=None, date=None):
check_date(this)
if not this.value is NaN: t = UTCToLocal(this.value)
else: t = 0
y = year.to_number()
if not month is None: m = Js(MonthFromTime(t))
else: m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(y, m, dt), TimeWithinDay(t))
u = TimeClip(LocalToUTC(newDate))
this.value = u
return u
def setUTCFullYear(year, month=None, date=None):
check_date(this)
if not this.value is NaN: t = UTCToLocal(this.value)
else: t = 0
y = year.to_number()
if not month is None: m = Js(MonthFromTime(t))
else: m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(y, m, dt), TimeWithinDay(t))
v = TimeClip(newDate)
this.value = v
return v
def toUTCString():
check_date(this)

View File

@@ -16,7 +16,8 @@ CONSTANTS = {
'SQRT1_2': 0.7071067811865476,
'SQRT2': 1.4142135623730951
}
def is_infinity(x):
return x - 1e10 == x
class MathFunctions:
def abs(this, args):
@@ -65,22 +66,22 @@ class MathFunctions:
def ceil(this, args):
x = get_arg(args, 0)
a = to_number(x)
if a != a: # it must be a nan
return NaN
if not is_finite(x):
return x
return float(math.ceil(a))
def floor(this, args):
x = get_arg(args, 0)
a = to_number(x)
if a != a: # it must be a nan
return NaN
if not is_finite(x):
return x
return float(math.floor(a))
def round(this, args):
x = get_arg(args, 0)
a = to_number(x)
if a != a: # it must be a nan
return NaN
if not is_finite(x):
return x
return float(round(a))
def sin(this, args):

View File

@@ -1,6 +1,6 @@
from ..conversions import *
from ..func_utils import *
from six import unichr
def fromCharCode(this, args):
res = u''

View File

@@ -38,8 +38,8 @@ def DaylightSavingTA(t):
return t
try:
return int(
LOCAL_ZONE.dst(datetime.datetime.utcfromtimestamp(
t // 1000)).seconds) * 1000
LOCAL_ZONE.dst(datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=t // 1000)).seconds) * 1000
except:
warnings.warn(
'Invalid datetime date, assumed DST time, may be inaccurate...',

View File

@@ -798,7 +798,7 @@ OP_CODES = {}
g = ''
for g in globals():
try:
if not issubclass(globals()[g], OP_CODE) or g is 'OP_CODE':
if not issubclass(globals()[g], OP_CODE) or g == 'OP_CODE':
continue
except:
continue

View File

@@ -22,6 +22,11 @@ def replacement_template(rep, source, span, npar):
res += '$'
n += 2
continue
elif rep[n + 1] == '&':
# replace with matched string
res += source[span[0]:span[1]]
n += 2
continue
elif rep[n + 1] == '`':
# replace with string that is BEFORE match
res += source[:span[0]]

View File

@@ -1,7 +1,14 @@
from __future__ import print_function
from timeit import timeit
from collections import namedtuple
from array import array
from itertools import izip
try:
#python 2 code
from itertools import izip as zip
except ImportError:
pass
from collections import deque
@@ -47,7 +54,7 @@ t = []
Type = None
try:
print timeit(
print(timeit(
"""
t.append(4)
@@ -56,7 +63,7 @@ t.pop()
""",
"from __main__ import X,Y,namedtuple,array,t,add,Type, izip",
number=1000000)
"from __main__ import X,Y,namedtuple,array,t,add,Type, zip",
number=1000000))
except:
raise

View File

@@ -1,3 +1,4 @@
from __future__ import print_function
from string import ascii_lowercase, digits
##################################
StringName = u'PyJsConstantString%d_'
@@ -305,4 +306,4 @@ if __name__ == '__main__':
''')
t, d = remove_constants(test)
print t, d
print(t, d)

View File

@@ -16,6 +16,8 @@ If case of parsing errors it must return a pos of error.
NOTES:
Strings and other literals are not present so each = means assignment
"""
from __future__ import print_function
from utils import *
from jsparser import *
@@ -80,4 +82,4 @@ def bass_translator(s):
if __name__ == '__main__':
print bass_translator('3.ddsd = 40')
print(bass_translator('3.ddsd = 40'))

View File

@@ -9,6 +9,8 @@ FOR 123
FOR iter
CONTINUE, BREAK, RETURN, LABEL, THROW, TRY, SWITCH
"""
from __future__ import print_function
from utils import *
from jsparser import *
from nodevisitor import exp_translator
@@ -477,4 +479,4 @@ def translate_flow(source):
if __name__ == '__main__':
#print do_dowhile('do {} while(k+f)', 0)[0]
#print 'e: "%s"'%do_expression('++(c?g:h); mj', 0)[0]
print translate_flow('a; yimport test')[0]
print(translate_flow('a; yimport test')[0])

View File

@@ -1,4 +1,6 @@
"""This module removes JS functions from source code"""
from __future__ import print_function
from jsparser import *
from utils import *
@@ -94,5 +96,5 @@ def remove_functions(source, all_inline=False):
if __name__ == '__main__':
print remove_functions(
'5+5 function n (functiona ,functionaj) {dsd s, dsdd}')
print(remove_functions(
'5+5 function n (functiona ,functionaj) {dsd s, dsdd}'))

View File

@@ -45,6 +45,7 @@ TODO
"""
from __future__ import print_function
from utils import *
@@ -64,7 +65,7 @@ OP_METHODS = {
def dbg(source):
try:
with open('C:\Users\Piotrek\Desktop\dbg.py', 'w') as f:
with open(r'C:\Users\Piotrek\Desktop\dbg.py', 'w') as f:
f.write(source)
except:
pass
@@ -77,13 +78,13 @@ def indent(lines, ind=4):
def inject_before_lval(source, lval, code):
if source.count(lval) > 1:
dbg(source)
print
print lval
print()
print(lval)
raise RuntimeError('To many lvals (%s)' % lval)
elif not source.count(lval):
dbg(source)
print
print lval
print()
print(lval)
assert lval not in source
raise RuntimeError('No lval found "%s"' % lval)
end = source.index(lval)

View File

@@ -1,3 +1,5 @@
from __future__ import print_function
from jsparser import *
from utils import *
import re
@@ -557,6 +559,6 @@ if __name__ == '__main__':
#print 'Here', trans('(eee ) . ii [ PyJsMarker ] [ jkj ] ( j , j ) .
# jiji (h , ji , i)(non )( )()()()')
for e in xrange(3):
print exp_translator('jk = kk.ik++')
print(exp_translator('jk = kk.ik++'))
#First line translated with PyJs: PyJsStrictEq(PyJsAdd((Js(100)*Js(50)),Js(30)), Js("5030")), yay!
print exp_translator('delete a.f')
print(exp_translator('delete a.f'))

View File

@@ -1,6 +1,8 @@
""" This module removes all objects/arrays from JS source code and replace them with LVALS.
Also it has s function translating removed object/array to python code.
Use this module just after removing constants. Later move on to removing functions"""
from __future__ import print_function
OBJECT_LVAL = 'PyJsLvalObject%d_'
ARRAY_LVAL = 'PyJsLvalArray%d_'
from utils import *
@@ -180,7 +182,7 @@ def translate_object(obj, lval, obj_count=1, arr_count=1):
try:
key, value = spl
except: #len(spl)> 2
print 'Unusual case ' + repr(e)
print('Unusual case ' + repr(e))
key = spl[0]
value = ':'.join(spl[1:])
key = key.strip()
@@ -293,8 +295,8 @@ if __name__ == '__main__':
#print remove_objects(test)
#print list(bracket_split(' {}'))
print
print remove_arrays(
print()
print(remove_arrays(
'typeof a&&!db.test(a)&&!ib[(bb.exec(a)||["",""], [][[5][5]])[1].toLowerCase()])'
)
print is_object('', ')')
))
print(is_object('', ')'))

View File

@@ -1,3 +1,5 @@
from __future__ import print_function
from flow import translate_flow
from constants import remove_constants, recover_constants
from objects import remove_objects, remove_arrays, translate_object, translate_array, set_func_translator
@@ -148,4 +150,4 @@ if __name__ == '__main__':
#res = translate_js(jq)
res = translate_js(t)
dbg(SANDBOX % indent(res))
print 'Done'
print('Done')

View File

@@ -1,10 +1,16 @@
__all__ = ['require']
import subprocess, os, codecs, glob
from .evaljs import translate_js, DEFAULT_HEADER
from .translators.friendly_nodes import is_valid_py_name
import six
import tempfile
import hashlib
import random
DID_INIT = False
DIRNAME = os.path.dirname(os.path.abspath(__file__))
PY_NODE_MODULES_PATH = os.path.join(DIRNAME, 'py_node_modules')
DIRNAME = tempfile.mkdtemp()
PY_NODE_MODULES_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'py_node_modules')
def _init():
@@ -46,23 +52,33 @@ GET_FROM_GLOBALS_FUNC = '''
'''
def _get_module_py_name(module_name):
return module_name.replace('-', '_')
def _get_module_var_name(module_name):
return _get_module_py_name(module_name).rpartition('/')[-1]
cand = _get_module_py_name(module_name).rpartition('/')[-1]
if not is_valid_py_name(cand):
raise ValueError(
"Invalid Python module name %s (generated from %s). Unsupported/invalid npm module specification?" % (
repr(cand), repr(module_name)))
return cand
def _get_and_translate_npm_module(module_name, include_polyfill=False, update=False):
def _get_and_translate_npm_module(module_name, include_polyfill=False, update=False, maybe_version_str=""):
assert isinstance(module_name, str), 'module_name must be a string!'
py_name = _get_module_py_name(module_name)
module_filename = '%s.py' % py_name
var_name = _get_module_var_name(module_name)
if not os.path.exists(os.path.join(PY_NODE_MODULES_PATH,
module_filename)) or update:
_init()
in_file_name = 'tmp0in439341018923js2py.js'
out_file_name = 'tmp0out439341018923js2py.js'
module_hash = hashlib.sha1(module_name.encode("utf-8")).hexdigest()[:15]
version = random.randrange(10000000000000)
in_file_name = 'in_%s_%d.js' % (module_hash, version)
out_file_name = 'out_%s_%d.js' % (module_hash, version)
code = ADD_TO_GLOBALS_FUNC
if include_polyfill:
code += "\n;require('babel-polyfill');\n"
@@ -74,6 +90,8 @@ def _get_and_translate_npm_module(module_name, include_polyfill=False, update=Fa
f.write(code.encode('utf-8') if six.PY3 else code)
pkg_name = module_name.partition('/')[0]
if maybe_version_str:
pkg_name += '@' + maybe_version_str
# make sure the module is installed
assert subprocess.call(
'cd %s;npm install %s' % (repr(DIRNAME), pkg_name),
@@ -93,7 +111,7 @@ def _get_and_translate_npm_module(module_name, include_polyfill=False, update=Fa
with codecs.open(os.path.join(DIRNAME, out_file_name), "r",
"utf-8") as f:
js_code = f.read()
os.remove(os.path.join(DIRNAME, out_file_name))
print("Bundled JS library dumped at: %s" % os.path.join(DIRNAME, out_file_name))
if len(js_code) < 50:
raise RuntimeError("Candidate JS bundle too short - likely browserify issue.")
js_code += GET_FROM_GLOBALS_FUNC
@@ -117,21 +135,25 @@ def _get_and_translate_npm_module(module_name, include_polyfill=False, update=Fa
return py_code
def require(module_name, include_polyfill=False, update=False, context=None):
def require(module_name, include_polyfill=True, update=False, context=None):
"""
Installs the provided npm module, exports a js bundle via browserify, converts to ECMA 5.1 via babel and
finally translates the generated JS bundle to Python via Js2Py.
Returns a pure python object that behaves like the installed module. Nice!
:param module_name: Name of the npm module to require. For example 'esprima'.
:param module_name: Name of the npm module to require. For example 'esprima'. Supports specific versions via @
specification. Eg: 'crypto-js@3.3'.
:param include_polyfill: Whether the babel-polyfill should be included as part of the translation. May be needed
for some modules that use unsupported features.
for some modules that use unsupported features of JS6 such as Map or typed arrays.
:param update: Whether to force update the translation. Otherwise uses a cached version if exists.
:param context: Optional context in which the translated module should be executed in. If provided, the
header (js2py imports) will be skipped as it is assumed that the context already has all the necessary imports.
:return: The JsObjectWrapper containing the translated module object. Can be used like a standard python object.
"""
py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=update)
module_name, maybe_version = (module_name+"@@@").split('@')[:2]
py_code = _get_and_translate_npm_module(module_name, include_polyfill=include_polyfill, update=update,
maybe_version_str=maybe_version)
# this is a bit hacky but we need to strip the default header from the generated code...
if context is not None:
if not py_code.startswith(DEFAULT_HEADER):
@@ -141,5 +163,5 @@ def require(module_name, include_polyfill=False, update=False, context=None):
assert py_code.startswith(DEFAULT_HEADER), "Unexpected header."
py_code = py_code[len(DEFAULT_HEADER):]
context = {} if context is None else context
exec (py_code, context)
exec(py_code, context)
return context['var'][_get_module_var_name(module_name)].to_py()

View File

@@ -17,6 +17,11 @@ def replacement_template(rep, source, span, npar):
res += '$'
n += 2
continue
elif rep[n + 1] == '&':
# replace with matched string
res += source[span[0]:span[1]]
n += 2
continue
elif rep[n + 1] == '`':
# replace with string that is BEFORE match
res += source[:span[0]]

View File

@@ -14,26 +14,36 @@ if six.PY3:
LINE_LEN_LIMIT = 400 # 200 # or any other value - the larger the smaller probability of errors :)
class ForController:
class LoopController:
def __init__(self):
self.inside = [False]
self.update = ''
self.update = [""]
self.label_to_update_idx = {}
def enter_for(self, update):
self.inside.append(True)
self.update = update
def enter(self, update=""):
self.update.append(update)
def leave_for(self):
self.inside.pop()
def leave(self):
self.update.pop()
def get_update(self, label=None):
if label is None:
return self.update[-1]
if label not in self.label_to_update_idx:
raise SyntaxError("Undefined label %s" % label)
if self.label_to_update_idx[label] >= len(self.update):
raise SyntaxError("%s is not a iteration statement label?" % label)
return self.update[self.label_to_update_idx[label]]
def register_label(self, label):
if label in self.label_to_update_idx:
raise SyntaxError("label %s already used")
self.label_to_update_idx[label] = len(self.update)
def deregister_label(self, label):
del self.label_to_update_idx[label]
def enter_other(self):
self.inside.append(False)
def leave_other(self):
self.inside.pop()
def is_inside(self):
return self.inside[-1]
class InlineStack:
@@ -86,9 +96,10 @@ class ContextStack:
def clean_stacks():
global Context, inline_stack
global Context, inline_stack, loop_controller
Context = ContextStack()
inline_stack = InlineStack()
loop_controller = LoopController()
def to_key(literal_or_identifier):
@@ -108,6 +119,13 @@ def to_key(literal_or_identifier):
else:
return unicode(k)
def is_iteration_statement(cand):
if not isinstance(cand, dict):
# Multiple statements.
return False
return cand.get("type", "?") in {"ForStatement", "ForInStatement", "WhileStatement", "DoWhileStatement"}
def trans(ele, standard=False):
"""Translates esprima syntax tree to python by delegating to appropriate translating node"""
@@ -367,9 +385,14 @@ def BreakStatement(type, label):
def ContinueStatement(type, label):
if label:
return 'raise %s("Continued")\n' % (get_continue_label(label['name']))
maybe_update_expr = loop_controller.get_update(label=label['name'])
continue_stmt = 'raise %s("Continued")\n' % (get_continue_label(label['name']))
else:
return 'continue\n'
maybe_update_expr = loop_controller.get_update()
continue_stmt = "continue\n"
if maybe_update_expr:
return "# continue update\n%s\n%s" % (maybe_update_expr, continue_stmt)
return continue_stmt
def ReturnStatement(type, argument):
@@ -386,24 +409,28 @@ def DebuggerStatement(type):
def DoWhileStatement(type, body, test):
inside = trans(body) + 'if not %s:\n' % trans(test) + indent('break\n')
loop_controller.enter()
body_code = trans(body)
loop_controller.leave()
inside = body_code + 'if not %s:\n' % trans(test) + indent('break\n')
result = 'while 1:\n' + indent(inside)
return result
def ForStatement(type, init, test, update, body):
update = indent(trans(update)) if update else ''
update = trans(update) if update else ''
init = trans(init) if init else ''
if not init.endswith('\n'):
init += '\n'
test = trans(test) if test else '1'
loop_controller.enter(update)
if not update:
result = '#for JS loop\n%swhile %s:\n%s%s\n' % (
init, test, indent(trans(body)), update)
else:
result = '#for JS loop\n%swhile %s:\n' % (init, test)
body = 'try:\n%sfinally:\n %s\n' % (indent(trans(body)), update)
result += indent(body)
result += indent("%s# update\n%s\n" % (trans(body), update))
loop_controller.leave()
return result
@@ -422,7 +449,9 @@ def ForInStatement(type, left, right, body, each):
name = left['name']
else:
raise RuntimeError('Unusual ForIn loop')
loop_controller.enter()
res += indent('var.put(%s, PyJsTemp)\n' % repr(name) + trans(body))
loop_controller.leave()
return res
@@ -438,20 +467,23 @@ def IfStatement(type, test, consequent, alternate):
def LabeledStatement(type, label, body):
# todo consider using smarter approach!
label_name = label['name']
loop_controller.register_label(label_name)
inside = trans(body)
loop_controller.deregister_label(label_name)
defs = ''
if inside.startswith('while ') or inside.startswith(
'for ') or inside.startswith('#for'):
if is_iteration_statement(body) and (inside.startswith('while ') or inside.startswith(
'for ') or inside.startswith('#for')):
# we have to add contine label as well...
# 3 or 1 since #for loop type has more lines before real for.
sep = 1 if not inside.startswith('#for') else 3
cont_label = get_continue_label(label['name'])
cont_label = get_continue_label(label_name)
temp = inside.split('\n')
injected = 'try:\n' + '\n'.join(temp[sep:])
injected += 'except %s:\n pass\n' % cont_label
inside = '\n'.join(temp[:sep]) + '\n' + indent(injected)
defs += 'class %s(Exception): pass\n' % cont_label
break_label = get_break_label(label['name'])
break_label = get_break_label(label_name)
inside = 'try:\n%sexcept %s:\n pass\n' % (indent(inside), break_label)
defs += 'class %s(Exception): pass\n' % break_label
return defs + inside
@@ -546,7 +578,11 @@ def VariableDeclaration(type, declarations, kind):
def WhileStatement(type, test, body):
result = 'while %s:\n' % trans(test) + indent(trans(body))
test_code = trans(test)
loop_controller.enter()
body_code = trans(body)
loop_controller.leave()
result = 'while %s:\n' % test_code + indent(body_code)
return result

View File

@@ -55,16 +55,19 @@ def dbg(x):
"""does nothing, legacy dummy function"""
return ''
# Another way of doing that would be with my auto esprima translation but its much slower:
# parsed = esprima.parse(js).to_dict()
def pyjsparser_parse_fn(code):
parser = pyjsparser.PyJsParser()
return parser.parse(code)
def translate_js(js, HEADER=DEFAULT_HEADER, use_compilation_plan=False):
def translate_js(js, HEADER=DEFAULT_HEADER, use_compilation_plan=False, parse_fn=pyjsparser_parse_fn):
"""js has to be a javascript source code.
returns equivalent python code."""
if use_compilation_plan and not '//' in js and not '/*' in js:
return translate_js_with_compilation_plan(js, HEADER=HEADER)
parser = pyjsparser.PyJsParser()
parsed = parser.parse(js) # js to esprima syntax tree
# Another way of doing that would be with my auto esprima translation but its much slower and causes import problems:
# parsed = esprima.parse(js).to_dict()
parsed = parse_fn(js)
translating_nodes.clean_stacks()
return HEADER + translating_nodes.trans(
parsed) # syntax tree to python code

View File

@@ -26,17 +26,19 @@ def fix_js_args(func):
return func
code = append_arguments(six.get_function_code(func), ('this', 'arguments'))
return types.FunctionType(
result = types.FunctionType(
code,
six.get_function_globals(func),
func.__name__,
closure=six.get_function_closure(func))
return result
def append_arguments(code_obj, new_locals):
co_varnames = code_obj.co_varnames # Old locals
co_names = code_obj.co_names # Old globals
co_names += tuple(e for e in new_locals if e not in co_names)
new_args = tuple(e for e in new_locals if e not in co_names)
co_names += new_args
co_argcount = code_obj.co_argcount # Argument count
co_code = code_obj.co_code # The actual bytecode as a string
@@ -76,26 +78,51 @@ def append_arguments(code_obj, new_locals):
names_to_varnames = dict(
(co_names.index(name), varnames.index(name)) for name in new_locals)
is_new_bytecode = sys.version_info >= (3, 11)
# Now we modify the actual bytecode
modified = []
drop_future_cache = False
for inst in instructions(code_obj):
if is_new_bytecode and inst.opname == "CACHE":
assert inst.arg == 0
if not drop_future_cache:
modified.extend(write_instruction(inst.opcode, inst.arg))
else:
# We need to inject NOOP to not break jumps :(
modified.extend(write_instruction(dis.opmap["NOP"], 0))
continue
op, arg = inst.opcode, inst.arg
# If the instruction is a LOAD_GLOBAL, we have to check to see if
# it's one of the globals that we are replacing. Either way,
# update its arg using the appropriate dict.
drop_future_cache = False
if inst.opcode == LOAD_GLOBAL:
if inst.arg in names_to_varnames:
idx = inst.arg
if is_new_bytecode:
idx = idx // 2
if idx in names_to_varnames:
op = LOAD_FAST
arg = names_to_varnames[inst.arg]
elif inst.arg in name_translations:
arg = name_translations[inst.arg]
arg = names_to_varnames[idx]
# Cache is not present after LOAD_FAST and needs to be removed.
drop_future_cache = True
elif idx in name_translations:
tgt = name_translations[idx]
if is_new_bytecode:
tgt = 2*tgt + (inst.arg % 2)
arg = tgt
else:
raise ValueError("a name was lost in translation")
raise(ValueError("a name was lost in translation last instruction %s" % str(inst)))
# If it accesses co_varnames or co_names then update its argument.
elif inst.opcode in opcode.haslocal:
arg = varname_translations[inst.arg]
elif inst.opcode in opcode.hasname:
# for example STORE_GLOBAL
arg = name_translations[inst.arg]
elif is_new_bytecode and inst.opcode in opcode.hasfree:
# Python 3.11+ adds refs at the end (after locals), for whatever reason...
if inst.argval not in code_obj.co_varnames[:code_obj.co_argcount]: # we do not need to remap existing arguments, they are not shifted by new ones.
arg = inst.arg + len(new_locals)
modified.extend(write_instruction(op, arg))
if six.PY2:
code = ''.join(modified)
@@ -113,23 +140,26 @@ def append_arguments(code_obj, new_locals):
code_obj.co_filename, code_obj.co_name,
code_obj.co_firstlineno, code_obj.co_lnotab,
code_obj.co_freevars, code_obj.co_cellvars)
# Done modifying codestring - make the code object
if hasattr(code_obj, "replace"):
# Python 3.8+
return code_obj.replace(
code_obj = code_obj.replace(
co_argcount=co_argcount + new_locals_len,
co_nlocals=code_obj.co_nlocals + new_locals_len,
co_code=code,
co_names=names,
co_varnames=varnames)
return code_obj
else:
return types.CodeType(*args)
def instructions(code_obj):
# easy for python 3.4+
if sys.version_info >= (3, 4):
def instructions(code_obj, show_cache=True):
if sys.version_info >= (3, 11):
# Python 3.11 introduced "cache instructions", hidden by default.
for inst in dis.Bytecode(code_obj, show_caches=show_cache):
yield inst
elif sys.version_info >= (3, 4): # easy for python 3.4+
for inst in dis.Bytecode(code_obj):
yield inst
else:
@@ -171,7 +201,7 @@ def write_instruction(op, arg):
chr((arg >> 8) & 255)
]
else:
raise ValueError("Invalid oparg: {0} is too large".format(oparg))
raise ValueError("Invalid oparg: {0} is too large".format(arg))
else: # python 3.6+ uses wordcode instead of bytecode and they already supply all the EXTENDEND_ARG ops :)
if arg is None:
return [chr(op), 0]
@@ -191,6 +221,7 @@ def write_instruction(op, arg):
# raise ValueError("Invalid oparg: {0} is too large".format(oparg))
def check(code_obj):
old_bytecode = code_obj.co_code
insts = list(instructions(code_obj))
@@ -221,24 +252,99 @@ def check(code_obj):
'Your python version made changes to the bytecode')
def signature(func):
code_obj = six.get_function_code(func)
return (code_obj.co_nlocals, code_obj.co_argcount, code_obj.co_nlocals, code_obj.co_stacksize,
code_obj.co_flags, code_obj.co_names, code_obj.co_varnames,
code_obj.co_filename,
code_obj.co_freevars, code_obj.co_cellvars)
check(six.get_function_code(check))
def compare_func(fake_func, gt_func):
print(signature(fake_func))
print(signature(gt_func))
assert signature(fake_func) == signature(gt_func)
fake_ins = list(instructions(six.get_function_code(fake_func), show_cache=False))
real_ins = list(instructions(six.get_function_code(gt_func), show_cache=False))
offset = 0
pos = 0
for e in fake_ins:
if e.opname == "NOP":
offset += 1 # ignore NOPs that are inserted in place of old cache.
else:
real = real_ins[pos]
fake = e
print("POS %d OFFSET: %d FAKE VS REAL" % (pos, offset))
print(fake)
print(real)
assert fake.opcode == real.opcode
if fake.opcode in dis.hasjabs or fake.opcode in dis.hasjrel:
pass
else:
assert fake.arg == real.arg
assert fake.argval == real.argval or fake.opname in ["LOAD_CONST"]
assert fake.is_jump_target == real.is_jump_target
pos += 1
assert pos == len(real_ins), (pos, len(real_ins))
print("DONE, looks good.")
if __name__ == '__main__':
x = 'Wrong'
dick = 3000
import faulthandler
def func(a):
print(x, y, z, a)
print(dick)
d = (x, )
for e in (e for e in x):
print(e)
return x, y, z
faulthandler.enable()
func2 = types.FunctionType(
append_arguments(six.get_function_code(func), ('x', 'y', 'z')),
six.get_function_globals(func),
func.__name__,
closure=six.get_function_closure(func))
args = (2, 2, 3, 4), 3, 4
assert func2(1, *args) == args
def func(cmpfn):
if not this.Class in ('Array', 'Arguments'):
return this.to_object() # do nothing
arr = []
for i in xrange(len(this)):
arr.append(this.get(six.text_type(i)))
if not arr:
return this
if not cmpfn.is_callable():
cmpfn = None
cmp = lambda a, b: sort_compare(a, b, cmpfn)
if six.PY3:
key = functools.cmp_to_key(cmp)
arr.sort(key=key)
else:
arr.sort(cmp=cmp)
for i in xrange(len(arr)):
this.put(six.text_type(i), arr[i])
return this
def func_gt(cmpfn, this, arguments):
if not this.Class in ('Array', 'Arguments'):
return this.to_object() # do nothing
arr = []
for i in xrange(len(this)):
arr.append(this.get(six.text_type(i)))
if not arr:
return this
if not cmpfn.is_callable():
cmpfn = None
cmp = lambda a, b: sort_compare(a, b, cmpfn)
if six.PY3:
key = functools.cmp_to_key(cmp)
arr.sort(key=key)
else:
arr.sort(cmp=cmp)
for i in xrange(len(arr)):
this.put(six.text_type(i), arr[i])
return this
func2 = fix_js_args(func)
compare_func(func2, func_gt)

546
lib/xmltodict.py Normal file
View File

@@ -0,0 +1,546 @@
#!/usr/bin/env python
"Makes working with XML feel like you are working with JSON"
try:
from defusedexpat import pyexpat as expat
except ImportError:
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
try: # pragma no cover
from cStringIO import StringIO
except ImportError: # pragma no cover
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
_dict = dict
import platform
if tuple(map(int, platform.python_version_tuple()[:2])) < (3, 7):
from collections import OrderedDict as _dict
from inspect import isgenerator
try: # pragma no cover
_basestring = basestring
except NameError: # pragma no cover
_basestring = str
try: # pragma no cover
_unicode = unicode
except NameError: # pragma no cover
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.13.0'
__license__ = 'MIT'
class ParsingInterrupted(Exception):
pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None,
dict_constructor=_dict,
strip_whitespace=True,
namespace_separator=':',
namespaces=None,
force_list=None,
comment_key='#comment'):
self.path = []
self.stack = []
self.data = []
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
self.namespace_separator = namespace_separator
self.namespaces = namespaces
self.namespace_declarations = dict_constructor()
self.force_list = force_list
self.comment_key = comment_key
def _build_name(self, full_name):
if self.namespaces is None:
return full_name
i = full_name.rfind(self.namespace_separator)
if i == -1:
return full_name
namespace, name = full_name[:i], full_name[i+1:]
try:
short_namespace = self.namespaces[namespace]
except KeyError:
short_namespace = namespace
if not short_namespace:
return name
else:
return self.namespace_separator.join((short_namespace, name))
def _attrs_to_dict(self, attrs):
if isinstance(attrs, dict):
return attrs
return self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
def startNamespaceDecl(self, prefix, uri):
self.namespace_declarations[prefix or ''] = uri
def startElement(self, full_name, attrs):
name = self._build_name(full_name)
attrs = self._attrs_to_dict(attrs)
if attrs and self.namespace_declarations:
attrs['xmlns'] = self.namespace_declarations
self.namespace_declarations = self.dict_constructor()
self.path.append((name, attrs or None))
if len(self.path) >= self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attr_entries = []
for key, value in attrs.items():
key = self.attr_prefix+self._build_name(key)
if self.postprocessor:
entry = self.postprocessor(self.path, key, value)
else:
entry = (key, value)
if entry:
attr_entries.append(entry)
attrs = self.dict_constructor(attr_entries)
else:
attrs = None
self.item = attrs or None
self.data = []
def endElement(self, full_name):
name = self._build_name(full_name)
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = (None if not self.data
else self.cdata_separator.join(self.data))
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if self.stack:
data = (None if not self.data
else self.cdata_separator.join(self.data))
item = self.item
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data and item:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = None
self.data = []
self.path.pop()
def characters(self, data):
if not self.data:
self.data = [data]
else:
self.data.append(data)
def comments(self, data):
if self.strip_whitespace:
data = data.strip()
self.item = self.push_data(self.item, self.comment_key, data)
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
if self._should_force_list(key, data):
item[key] = [data]
else:
item[key] = data
return item
def _should_force_list(self, key, value):
if not self.force_list:
return False
if isinstance(self.force_list, bool):
return self.force_list
try:
return key in self.force_list
except TypeError:
return self.force_list(self.path[:-1], key, value)
def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
namespace_separator=':', disable_entities=True, process_comments=False, **kwargs):
"""Parse the given XML input and convert it into a dictionary.
`xml_input` can either be a `string`, a file-like object, or a generator of strings.
If `xml_attribs` is `True`, element attributes are put in the dictionary
among regular child elements, using `@` as a prefix to avoid collisions. If
set to `False`, they are just ignored.
Simple example::
>>> import xmltodict
>>> doc = xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>
... \"\"\")
>>> doc['a']['@prop']
u'x'
>>> doc['a']['b']
[u'1', u'2']
If `item_depth` is `0`, the function returns a dictionary for the root
element (default behavior). Otherwise, it calls `item_callback` every time
an item at the specified depth is found and returns `None` in the end
(streaming mode).
The callback function receives two parameters: the `path` from the document
root to the item (name-attribs pairs), and the `item` (dict). If the
callback's return value is false-ish, parsing will be stopped with the
:class:`ParsingInterrupted` exception.
Streaming example::
>>> def handle(path, item):
... print('path:%s item:%s' % (path, item))
... return True
...
>>> xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>\"\"\", item_depth=2, item_callback=handle)
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2
The optional argument `postprocessor` is a function that takes `path`,
`key` and `value` as positional arguments and returns a new `(key, value)`
pair where both `key` and `value` may have changed. Usage example::
>>> def postprocessor(path, key, value):
... try:
... return key + ':int', int(value)
... except (ValueError, TypeError):
... return key, value
>>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
... postprocessor=postprocessor)
{'a': {'b:int': [1, 2], 'b': 'x'}}
You can pass an alternate version of `expat` (such as `defusedexpat`) by
using the `expat` parameter. E.g:
>>> import defusedexpat
>>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat)
{'a': 'hello'}
You can use the force_list argument to force lists to be created even
when there is only a single child of a given level of hierarchy. The
force_list argument is a tuple of keys. If the key for a given level
of hierarchy is in the force_list argument, that level of hierarchy
will have a list as a child (even if there is only one sub-element).
The index_keys operation takes precedence over this. This is applied
after any user-supplied postprocessor has already run.
For example, given this input:
<servers>
<server>
<name>host1</name>
<os>Linux</os>
<interfaces>
<interface>
<name>em0</name>
<ip_address>10.0.0.1</ip_address>
</interface>
</interfaces>
</server>
</servers>
If called with force_list=('interface',), it will produce
this dictionary:
{'servers':
{'server':
{'name': 'host1',
'os': 'Linux'},
'interfaces':
{'interface':
[ {'name': 'em0', 'ip_address': '10.0.0.1' } ] } } }
`force_list` can also be a callable that receives `path`, `key` and
`value`. This is helpful in cases where the logic that decides whether
a list should be forced is more complex.
If `process_comment` is `True` then comment will be added with comment_key
(default=`'#comment'`) to then tag which contains comment
For example, given this input:
<a>
<b>
<!-- b comment -->
<c>
<!-- c comment -->
1
</c>
<d>2</d>
</b>
</a>
If called with process_comment=True, it will produce
this dictionary:
'a': {
'b': {
'#comment': 'b comment',
'c': {
'#comment': 'c comment',
'#text': '1',
},
'd': '2',
},
}
"""
handler = _DictSAXHandler(namespace_separator=namespace_separator,
**kwargs)
if isinstance(xml_input, _unicode):
if not encoding:
encoding = 'utf-8'
xml_input = xml_input.encode(encoding)
if not process_namespaces:
namespace_separator = None
parser = expat.ParserCreate(
encoding,
namespace_separator
)
try:
parser.ordered_attributes = True
except AttributeError:
# Jython's expat does not support ordered_attributes
pass
parser.StartNamespaceDeclHandler = handler.startNamespaceDecl
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
if process_comments:
parser.CommentHandler = handler.comments
parser.buffer_text = True
if disable_entities:
try:
# Attempt to disable DTD in Jython's expat parser (Xerces-J).
feature = "http://apache.org/xml/features/disallow-doctype-decl"
parser._reader.setFeature(feature, True)
except AttributeError:
# For CPython / expat parser.
# Anything not handled ends up here and entities aren't expanded.
parser.DefaultHandler = lambda x: None
# Expects an integer return; zero means failure -> expat.ExpatError.
parser.ExternalEntityRefHandler = lambda *x: 1
if hasattr(xml_input, 'read'):
parser.ParseFile(xml_input)
elif isgenerator(xml_input):
for chunk in xml_input:
parser.Parse(chunk, False)
parser.Parse(b'', True)
else:
parser.Parse(xml_input, True)
return handler.item
def _process_namespace(name, namespaces, ns_sep=':', attr_prefix='@'):
if not namespaces:
return name
try:
ns, name = name.rsplit(ns_sep, 1)
except ValueError:
pass
else:
ns_res = namespaces.get(ns.strip(attr_prefix))
name = '{}{}{}{}'.format(
attr_prefix if ns.startswith(attr_prefix) else '',
ns_res, ns_sep, name) if ns_res else name
return name
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
depth=0,
preprocessor=None,
pretty=False,
newl='\n',
indent='\t',
namespace_separator=':',
namespaces=None,
full_document=True,
expand_iter=None):
key = _process_namespace(key, namespaces, namespace_separator, attr_prefix)
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if (not hasattr(value, '__iter__')
or isinstance(value, _basestring)
or isinstance(value, dict)):
value = [value]
for index, v in enumerate(value):
if full_document and depth == 0 and index > 0:
raise ValueError('document with multiple roots')
if v is None:
v = _dict()
elif isinstance(v, bool):
if v:
v = _unicode('true')
else:
v = _unicode('false')
elif not isinstance(v, dict):
if expand_iter and hasattr(v, '__iter__') and not isinstance(v, _basestring):
v = _dict(((expand_iter, v),))
else:
v = _unicode(v)
if isinstance(v, _basestring):
v = _dict(((cdata_key, v),))
cdata = None
attrs = _dict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
ik = _process_namespace(ik, namespaces, namespace_separator,
attr_prefix)
if ik == '@xmlns' and isinstance(iv, dict):
for k, v in iv.items():
attr = 'xmlns{}'.format(':{}'.format(k) if k else '')
attrs[attr] = _unicode(v)
continue
if not isinstance(iv, _unicode):
iv = _unicode(iv)
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
if type(indent) is int:
indent = ' ' * indent
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.startElement(key, AttributesImpl(attrs))
if pretty and children:
content_handler.ignorableWhitespace(newl)
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, depth+1, preprocessor,
pretty, newl, indent, namespaces=namespaces,
namespace_separator=namespace_separator,
expand_iter=expand_iter)
if cdata is not None:
content_handler.characters(cdata)
if pretty and children:
content_handler.ignorableWhitespace(depth * indent)
content_handler.endElement(key)
if pretty and depth:
content_handler.ignorableWhitespace(newl)
def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
short_empty_elements=False,
**kwargs):
"""Emit an XML document for the given `input_dict` (reverse of `parse`).
The resulting XML document is returned as a string, but if `output` (a
file-like object) is specified, it is written there instead.
Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted
as XML node attributes, whereas keys equal to `cdata_key`
(default=`'#text'`) are treated as character data.
The `pretty` parameter (default=`False`) enables pretty-printing. In this
mode, lines are terminated with `'\n'` and indented with `'\t'`, but this
can be customized with the `newl` and `indent` parameters.
"""
if full_document and len(input_dict) != 1:
raise ValueError('Document must have exactly one root.')
must_return = False
if output is None:
output = StringIO()
must_return = True
if short_empty_elements:
content_handler = XMLGenerator(output, encoding, True)
else:
content_handler = XMLGenerator(output, encoding)
if full_document:
content_handler.startDocument()
for key, value in input_dict.items():
_emit(key, value, content_handler, full_document=full_document,
**kwargs)
if full_document:
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
if __name__ == '__main__': # pragma: no cover
import sys
import marshal
try:
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
except AttributeError:
stdin = sys.stdin
stdout = sys.stdout
(item_depth,) = sys.argv[1:]
item_depth = int(item_depth)
def handle_item(path, item):
marshal.dump((path, item), stdout)
return True
try:
root = parse(stdin,
item_depth=item_depth,
item_callback=handle_item,
dict_constructor=dict)
if item_depth == 0:
handle_item([], root)
except KeyboardInterrupt:
pass

View File

@@ -1107,7 +1107,7 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
# logger.error('Failed to resolve hostname, fallback to normal dns')
from core import support
# support.dbg()
if '|' not in mediaurl and item.referer != False:
if '|' not in mediaurl and item.referer != False and 'youtube' not in mediaurl:
mediaurl = mediaurl + '|' + urllib.urlencode(headers)
# video information is obtained.
@@ -1618,6 +1618,8 @@ def play_torrent(item, xlistitem, mediaurl):
xbmc.sleep(3000)
xbmc.executebuiltin("PlayMedia(" + torrent_options[selection][1] % mediaurl + ")")
def resume_playback(played_time):
class ResumePlayback(xbmcgui.WindowXMLDialog):
Close = False
@@ -1651,14 +1653,26 @@ def resume_playback(played_time):
self.set_values(False)
self.close()
if played_time and played_time > 30:
Dialog = ResumePlayback('ResumePlayback.xml', config.get_runtime_path(), played_time=played_time)
Dialog.show()
t = 0
while not Dialog.is_close() and t < 100:
t += 1
xbmc.sleep(100)
if not Dialog.Resume: played_time = 0
if config.get_setting('resume_menu') == 0:
Dialog = ResumePlayback('ResumePlayback.xml', config.get_runtime_path(), played_time=played_time)
Dialog.show()
t = 0
while not Dialog.is_close() and t < 100:
t += 1
xbmc.sleep(100)
if not Dialog.Resume: played_time = 0
else:
m, s = divmod(played_time, 60)
h, m = divmod(m, 60)
idx = xbmcgui.Dialog().contextmenu(
[
xbmc.getLocalizedString(12022).format('%02d:%02d:%02d' % (h, m, s)),
xbmc.getLocalizedString(12021)
])
if idx in [-1, 0]: played_time = 0
else: played_time = 0
xbmc.sleep(300)
return played_time
@@ -2031,4 +2045,4 @@ def serverWindow(item, itemlist, runDirectly=True):
from platformcode.launcher import run
run(selection)
else:
return selection
return selection

View File

@@ -617,7 +617,10 @@ def set_content(content_type, silent=False, custom=False):
values.append(r['addonid'])
if not custom:
if content_type == 'movie':
seleccion = values.index('metadata.themoviedb.org')
if PY3:
seleccion = values.index('metadata.themoviedb.org.python')
else:
seleccion = values.index('metadata.themoviedb.org')
else:
seleccion = values.index('metadata.tvshows.themoviedb.org.python')
else:

View File

@@ -199,6 +199,18 @@ msgctxt "#30046"
msgid "Resume from start"
msgstr ""
msgctxt "#30047"
msgid "Show resume from How:"
msgstr "Mostra riprendi da come:"
msgctxt "#30048"
msgid "Custom window"
msgstr "Finestra personalizzata"
msgctxt "#30049"
msgid "Skin window"
msgstr "Finestra della skin"
msgctxt "#30050"
msgid "Server connection error"
msgstr ""

View File

@@ -199,6 +199,18 @@ msgctxt "#30046"
msgid "Resume from start"
msgstr "Riprendi dall'inizio"
msgctxt "#30047"
msgid "Show resume from How:"
msgstr "Mostra riprendi da come:"
msgctxt "#30048"
msgid "Custom window"
msgstr "Finestra personalizzata"
msgctxt "#30049"
msgid "Skin window"
msgstr "Finestra della skin"
msgctxt "#30050"
msgid "Server connection error"
msgstr "Errore connessione server"

View File

@@ -24,6 +24,7 @@
<setting id="servers_favorites" visible="true" type="action" label="60551" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAic2VydmVyc19mYXZvcml0ZXMiLA0KICAgICJjaGFubmVsIjogInNldHRpbmciDQp9==)"/>
<setting id="servers_blacklist" visible="true" type="action" label="60550" action="RunPlugin(plugin://plugin.video.kod/?ew0KICAgICJhY3Rpb24iOiAic2VydmVyc19ibGFja2xpc3QiLA0KICAgICJjaGFubmVsIjogInNldHRpbmciDQp9==)"/>
<setting id="window_type" type="select" lvalues="60622|60623" label="60621" default="0"/>
<setting id="resume_menu" type="select" lvalues="30048|30049" label="30047" default="0"/>
<!-- <setting id="hide_servers" type="bool" label="70747" default="false" visible="eq(-1,true)" subsetting="true"/> -->
<setting id="checklinks" type="bool" label="30020" default="false"/>
<setting id="checklinks_number" type="slider" option="int" range="5,5,20" label="30021" default="5" visible="eq(-1,true)" subsetting="true"/>

View File

@@ -4,8 +4,8 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "dood(?:stream)?.[^/]+/+(?:e|d)/([a-z0-9]+)",
"url": "https://dood.yt/e/\\1"
"pattern": "(do*d(?:stream)?.[^/]+)/+(?:e|d)/([a-z0-9]+)",
"url": "https://\\1/e/\\2"
}
]
},

View File

@@ -1,15 +1,20 @@
# -*- coding: utf-8 -*-
import time, string, random
from core import httptools, support, servertools
import time, string, random, sys
from core import httptools, support
from platformcode import logger, config
if sys.version_info[0] >= 3:
from urllib.parse import urlparse
else:
from urllib import urlparse
def test_video_exists(page_url):
global data
logger.debug('page url=', page_url)
response = httptools.downloadpage(page_url)
response = httptools.downloadpage(page_url, cloudscraper=True)
if response.code == 404 or 'dsplayer' not in response.data:
return False, config.get_localized_string(70449) % 'DooD Stream'
else:
@@ -22,14 +27,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("URL", page_url)
video_urls = []
host = 'https://' + servertools.get_server_host('doodstream')[0]
headers = {'User-Agent': httptools.get_user_agent(), 'Referer': page_url}
support.dbg()
host = 'https://{}'.format(urlparse(page_url).netloc)
headers = {'User-Agent': httptools.get_user_agent(), 'Referer': host}
match = support.match(data, patron=r'''dsplayer\.hotkeys[^']+'([^']+).+?function\s*makePlay.+?return[^?]+([^"]+)''').match
if match:
url, token = match
ret = httptools.downloadpage(host + url, headers=headers).data
ret = httptools.downloadpage(host + url, headers=headers, cloudscraper=True).data
video_urls.append(['mp4 [DooD Stream]', '{}{}{}{}|Referer={}'.format(randomize(ret), url, token, int(time.time() * 1000), host)])

View File

@@ -20,5 +20,5 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
packed = support.match(data, patron=r'(eval\(function\(p.*?)</').match
if packed:
data = jsunpack.unpack(packed).replace("\\", "")
video_urls = support.get_jwplayer_mediaurl(data, 'filemoon')
video_urls = support.get_jwplayer_mediaurl(data, 'filemoon', hls=True)
return video_urls

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "mixdro?ps?.[^/]+/(?:f|e)/([a-z0-9]+)",
"pattern": "mixdro?o?ps?.[^/]+/(?:f|e)/([a-z0-9]+)",
"url": "https://mixdrop.co/e/\\1"
},
{

27
servers/streamhide.json Normal file
View File

@@ -0,0 +1,27 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "ahvsh.com/[a-z]/([\\d\\w]+)",
"url": "https://ahvsh.com/e/\\1"
}
]
},
"free": true,
"id": "streamhide",
"name": "StreamHide",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@70708",
"type": "bool",
"visible": true
}
],
"cloudflare": true
}

27
servers/streamhide.py Normal file
View File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
##
from core import httptools, support
from core import scrapertools
from platformcode import logger, config
def test_video_exists(page_url):
logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "File is no longer available" in data:
return False, config.get_localized_string(70449) % "StreamHide"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.debug("url=" + page_url)
global data
return support.get_jwplayer_mediaurl(data, 'StreamHide', hls=True)

View File

@@ -1,68 +1,77 @@
# -*- coding: utf-8 -*-
import urllib.parse
import ast
import xbmc
from core import httptools, support, filetools
from platformcode import logger, config
UA = httptools.random_useragent()
from concurrent import futures
from urllib.parse import urlparse
vttsupport = False if int(xbmc.getInfoLabel('System.BuildVersion').split('.')[0]) < 20 else True
def test_video_exists(page_url):
global scws_id
logger.debug('page url=', page_url)
scws_id = ''
global iframe
global iframeParams
if page_url.isdigit():
scws_id = page_url
else:
page = httptools.downloadpage(page_url)
if page.url == page_url: # se non esiste, reindirizza all'ultimo url chiamato esistente
scws_id = support.scrapertools.find_single_match(page.data, r'scws_id[^:]+:(\d+)')
else:
return 'StreamingCommunity', 'Prossimamente'
iframe = support.scrapertools.decodeHtmlentities(support.match(page_url, patron='<iframe [^>]+src="([^"]+)').match)
iframeParams = support.match(iframe, patron='window\.masterPlaylistParams\s=\s({.*?})').match
if not iframeParams:
return 'StreamingCommunity', 'Prossimamente'
if not scws_id:
return False, config.get_localized_string(70449) % 'StreamingCommunityWS'
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
from time import time
from base64 import b64encode
from hashlib import md5
global scws_id
urls = list()
subs = list()
local_subs = list()
video_urls = list()
# clientIp = httptools.downloadpage(f'https://scws.work/videos/{scws_id}').json.get('client_ip')
clientIp = httptools.downloadpage('http://ip-api.com/json/').json.get('query')
if clientIp:
expires = int(time() + 172800)
token = b64encode(md5('{}{} Yc8U6r8KjAKAepEA'.format(expires, clientIp).encode('utf-8')).digest()).decode('utf-8').replace('=', '').replace('+', '-').replace('/', '_')
url = 'https://scws.work/master/{}?token={}&expires={}&n=1'.format(scws_id, token, expires)
if page_url.isdigit():
video_urls.append(['m3u8', '{}|User-Agent={}'.format(url, UA)])
else:
video_urls = compose(url)
scws_id = urlparse(iframe).path.split('/')[-1]
masterPlaylistParams = ast.literal_eval(iframeParams)
url = 'https://scws.work/v2/playlist/{}?{}&n=1'.format(scws_id, urllib.parse.urlencode(masterPlaylistParams))
return video_urls
info = support.match(url, patron=r'LANGUAGE="([^"]+)",\s*URI="([^"]+)|(http.*?rendition=(\d+)[^\s]+)').matches
def compose(url):
subs = []
video_urls = []
info = support.match(url, patron=r'LANGUAGE="([^"]+)",\s*URI="([^"]+)|RESOLUTION=\d+x(\d+).*?(http[^"\s]+)', headers={'User-Agent':UA}).matches
if info and not logger.testMode: # ai test non piace questa parte
for lang, sub, res, url in info:
if sub:
while True:
match = support.match(sub, patron=r'(http[^\s\n]+)').match
if match:
sub = httptools.downloadpage(match).data
else:
break
if info:
for lang, sub, url, res in info:
if sub:
if lang == 'auto': lang = 'ita-forced'
s = config.get_temp_file(lang +'.srt')
subs.append(s)
filetools.write(s, support.vttToSrt(sub))
elif url:
video_urls.append(['m3u8 [{}]'.format(res), '{}|User-Agent={}'.format(url, UA), 0, subs])
subs.append([lang, sub])
elif not 'token=&' in url:
urls.append([res, url])
if subs:
local_subs = subs_downloader(subs)
video_urls = [['m3u8 [{}]'.format(res), url, 0, local_subs] for res, url in urls]
else:
video_urls = [['m3u8 [{}]'.format(res), url] for res, url in urls]
else:
video_urls.append(['m3u8', '{}|User-Agent={}'.format(url, UA)])
video_urls = [['hls', url]]
return video_urls
def subs_downloader(subs):
def subs_downloader_thread(n, s):
lang, url = s
match = support.match(url, patron=r'(http[^\s\n]+)').match
if match:
data = httptools.downloadpage(match).data
if lang == 'auto': lang = 'ita-forced'
sub = config.get_temp_file('{}.{}'.format(lang, 'vtt' if vttsupport else 'str'))
filetools.write(sub, data if vttsupport else support.vttToSrt(data))
return n, sub
local_subs = list()
with futures.ThreadPoolExecutor() as executor:
itlist = [executor.submit(subs_downloader_thread, n, s) for n, s in enumerate(subs)]
for res in futures.as_completed(itlist):
if res.result():
local_subs.append(res.result())
return [s[1] for s in sorted(local_subs, key=lambda n: n[0])]

View File

@@ -3,16 +3,16 @@
"find_videos": {
"ignore_urls": [],
"patterns": [{
"pattern": "(?:streamsb|sblanh|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb|sbvideo|cloudemb|sbplay2|japopav|javplaya|ssbstream|sbthe|sbspeed|sbanh|sblongvu|sbchill|sbhight|sbbrisk)\\.\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)",
"url": "https://streamas.cloud/e/\\1.html"
"pattern": "(?:streamsb|sblanh|sblona|sbembed|sbembed1|sbplay1|sbplay|pelistop|tubesb|playersb|embedsb|watchsb|streamas|sbfast|sbfull|viewsb|sbvideo|cloudemb|sbplay2|japopav|javplaya|ssbstream|sbthe|sbspeed|sbanh|sblongvu|sbchill|sbhight|sbbrisk)\\.\\w{2,5}/(?:embed-|d/|e/)?([A-z0-9]+)",
"url": "https://streamas.cloud/d/\\1.html"
},
{
"pattern": "(?:cloudemb.com)/([A-z0-9]+)",
"url": "https://streamas.cloud/e/\\1.html"
"url": "https://streamas.cloud/d/\\1.html"
},
{
"pattern": "animeworld.biz/(?:embed-|d/|e/)?([A-z0-9]+)",
"url": "https://streamas.cloud/e/\\1.html"
"url": "https://streamas.cloud/d/\\1.html"
}
]
},

View File

@@ -1,53 +1,81 @@
from core import httptools
import re
from core import httptools, support, scrapertools
from platformcode import config, logger, platformtools
import random, string
import codecs
try:
import urllib.parse as urllib
except ImportError:
import urllib
import re, sys
if sys.version_info[0] >= 3:
from concurrent import futures
else:
from concurrent_py2 import futures
from base64 import b64encode
host = 'https://streamas.cloud'
def get_sources(page_url):
code = page_url.split('/')[-1].split('.html')[0]
rand1 = "".join([random.choice(string.ascii_letters) for y in range(12)])
rand2 = "".join([random.choice(string.ascii_letters) for y in range(12)])
_0x470d0b = '{}||{}||{}||streamsb'.format(rand1, code, rand2)
prefix = 'https://streamas.cloud/sources'
suffix = '/' + codecs.getencoder('hex')(_0x470d0b.encode())[0].decode()
number = config.get_setting('number', server='streamsb')
sources = prefix + str(number) + suffix
# does not lite other headers different than watchsb and useragent
ret = httptools.downloadpage(sources, headers={'watchsb': 'sbstream', 'User-Agent': httptools.get_user_agent()}, replace_headers=True).json
if not ret: # probably number changed
wait = platformtools.dialog_progress('StreamSB', config.get_localized_string(60293))
for number in range(100):
if httptools.downloadpage(prefix + str(number) + '/').code == 200:
config.set_setting('number', server='streamsb', value=number)
sources = prefix + str(number) + suffix
# does not lite other headers different than watchsb and useragent
ret = httptools.downloadpage(sources,
headers={'watchsb': 'sbstream', 'User-Agent': httptools.get_user_agent()},
replace_headers=True).json
break
wait.close()
logger.debug(ret)
return ret
sources = support.match(page_url, headers={'watchsb': 'sbstream', 'User-Agent': httptools.get_user_agent()}, replace_headers=True, patron=r'download_video([^"]+).*?<span>\s*(\d+)').matches
if sources:
sources = {s[1]: s[0].replace('(','').replace(')','').replace("'",'').split(',') for s in sources}
return sources
def test_video_exists(page_url):
global sources
sources = get_sources(page_url)
if 'error' in sources:
return False, config.get_localized_string(70449) % "StreamSB"
else:
if sources:
return True, ""
else:
return False, config.get_localized_string(70449) % "StreamSB"
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
global sources
file = sources['stream_data']['file']
backup = sources['stream_data']['backup']
return [["m3u8 [StreamSB]", file], ["m3u8-altern [StreamSB]", backup]]
video_urls = list()
if sources:
action = config.get_setting('default_action')
if action == 0:
progress = platformtools.dialog_progress_bg("StreamSB", message="Risoluzione URLs")
step = int(100 / len(sources))
percent = 0
for res, url in sources.items():
progress.update(percent, "Risoluzione URL: {}p".format(res))
r, u = resolve_url(res, url)
percent += step
progress.update(percent, "Risoluzione URL: {}p".format(res))
video_urls.append(['{} [{}]'.format(u.split('.')[-1], r), u])
progress.close()
else:
res = sorted([* sources])[0 if action == 1 else -1]
progress = platformtools.dialog_progress_bg("StreamSB", message="Risoluzione URL: {}p".format(res))
url = sources[res]
r, u = resolve_url(res, url)
progress.close()
video_urls.append(['{} [{}]'.format(u.split(',')[-1], r), u])
return video_urls
def get_filename(page_url):
return get_sources(page_url)['stream_data']['title']
def get_payloads(data, token):
# support.dbg()
payloads = {'g-recaptcha-response': token}
for name, value in support.match(data, patron=r'input type="hidden" name="([^"]+)" value="([^"]+)').matches:
payloads[name] = value
return payloads
def resolve_url(res, params):
url = ''
source_url = '{}/dl?op=download_orig&id={}&mode={}&hash={}'.format(host, params[0], params[1], params[2])
data = httptools.downloadpage(source_url).data
co = b64encode((host + ':443').encode('utf-8')).decode('utf-8').replace('=', '')
token = scrapertools.girc(data, host, co)
payload = get_payloads(data, token)
if token:
url = support.match(source_url, patron=r'href="([^"]+)"\s*class="btn\s*btn-light', post=payload).match
return res, url

View File

@@ -21,7 +21,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.debug("(page_url='%s')" % page_url)
video_urls = []
video_id = scrapertools.find_single_match(page_url, '(?:v=|embed/)([A-z0-9_-]{11})')
inputstream = platformtools.install_inputstream()
try:

View File

@@ -186,7 +186,7 @@ def peliculas(item, json='', key='', itemlist=[]):
itlist = filterkey = []
action = 'findvideos'
if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes',
if inspect.stack(0)[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes',
'search'] and not item.filterkey and not item.disable_pagination:
Pagination = int(defp) if defp.isdigit() else ''
else:
@@ -243,7 +243,7 @@ def peliculas(item, json='', key='', itemlist=[]):
# if item.sort:
# itemlist.sort(key=lambda x: x.title.lower(), reverse=False)
if Pagination and len(itemlist) >= Pagination:
if inspect.stack()[1][3] != 'get_newest':
if inspect.stack(0)[1][3] != 'get_newest':
item.title = support.typo(config.get_localized_string(30992), 'color kod bold')
item.page = pag + 1
item.thumbnail = support.thumb()
@@ -279,13 +279,13 @@ def get_seasons(item):
contentType='season' if show_seasons else 'tvshow',
path=extra.path))
if inspect.stack()[2][3] in ['add_tvshow', 'get_episodes', 'update', 'find_episodes',
if inspect.stack(0)[2][3] in ['add_tvshow', 'get_episodes', 'update', 'find_episodes',
'get_newest'] or show_seasons == False:
itlist = []
for item in itemlist:
itlist = episodios(item)
itemlist = itlist
if inspect.stack()[2][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes',
if inspect.stack(0)[2][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes',
'get_newest'] and defp and not item.disable_pagination:
itemlist = pagination(item, itemlist)
@@ -322,7 +322,7 @@ def episodios(item, json='', key='', itemlist=[]):
ep = 1
season = infoLabels['season'] if 'season' in infoLabels else item.contentSeason if item.contentSeason else 1
if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes',
if inspect.stack(0)[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes',
'search'] and not show_seasons:
Pagination = int(defp) if defp.isdigit() else ''
else:
@@ -374,7 +374,7 @@ def episodios(item, json='', key='', itemlist=[]):
path=item.path))
# if showseason
if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes', 'get_newest', 'search']:
if inspect.stack(0)[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes', 'get_newest', 'search']:
if show_seasons and not item.filterseason:
itm.contentType = 'season'
season_list = []
@@ -399,15 +399,15 @@ def episodios(item, json='', key='', itemlist=[]):
support.videolibrary(itemlist, item)
support.download(itemlist, item)
elif defp and inspect.stack()[1][3] not in ['get_seasons'] and not item.disable_pagination:
elif defp and inspect.stack(0)[1][3] not in ['get_seasons'] and not item.disable_pagination:
if Pagination and len(itemlist) >= Pagination:
if inspect.stack()[1][3] != 'get_newest':
if inspect.stack(0)[1][3] != 'get_newest':
item.title = support.typo(config.get_localized_string(30992), 'color kod bold')
item.page = pag + 1
item.thumbnail = support.thumb()
itemlist.append(item)
if inspect.stack()[1][3] not in ['get_seasons'] and not show_seasons:
if inspect.stack(0)[1][3] not in ['get_seasons'] and not show_seasons:
support.videolibrary(itemlist, item)
support.download(itemlist, item)
return itemlist
@@ -769,7 +769,7 @@ def set_extra_values(item, json, path):
ret.subtitle = json[key]
if not ret.thumb:
if 'get_search_menu' in inspect.stack()[1][3]:
if 'get_search_menu' in inspect.stack(0)[1][3]:
ret.thumb = get_thumb('search.png')
else:
ret.thumb = item.thumbnail
@@ -830,7 +830,7 @@ def pagination(item, itemlist=[]):
encoded_itemlist = []
for it in itemlist:
encoded_itemlist.append(it.tourl())
if inspect.stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes', 'search']:
if inspect.stack(0)[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes', 'search']:
Pagination = int(defp) if defp.isdigit() else ''
else:
Pagination = ''
@@ -843,7 +843,7 @@ def pagination(item, itemlist=[]):
itlist.append(item)
if Pagination and len(itemlist) >= Pagination:
if inspect.stack()[1][3] != 'get_newest':
if inspect.stack(0)[1][3] != 'get_newest':
itlist.append(
Item(channel=item.channel,
action='pagination',

View File

@@ -416,7 +416,6 @@ class SearchWindow(xbmcgui.WindowXML):
self.count += 1
return channel, valid, other if other else results
self.update(channel, valid, other if other else results)
# update_lock.release()
def makeItem(self, url):
@@ -440,6 +439,7 @@ class SearchWindow(xbmcgui.WindowXML):
return it
def update(self, channel, valid, results):
update_lock.acquire()
self.LOADING.setVisible(False)
if self.exit:
return
@@ -462,7 +462,8 @@ class SearchWindow(xbmcgui.WindowXML):
for result in valid:
resultsList += result.tourl() + '|'
item.setProperty('items', resultsList)
self.channels[0].setProperty('results', str(len(resultsList.split('|')) - 1 ))
res = len(resultsList.split('|'))
self.channels[0].setProperty('results', str(res - 1 if res > 0 else 0))
if self.CHANNELS.getSelectedPosition() == 0:
items = []
@@ -487,7 +488,7 @@ class SearchWindow(xbmcgui.WindowXML):
})
for result in results:
resultsList += result.tourl() + '|'
item.setProperty('items',resultsList)
item.setProperty('items', resultsList)
self.results[name] = len(self.results)
self.channels.append(item)
else:
@@ -497,7 +498,8 @@ class SearchWindow(xbmcgui.WindowXML):
resultsList += result.tourl() + '|'
item.setProperty('items',resultsList)
logger.log(self.channels[int(self.results[name])])
self.channels[int(self.results[name])].setProperty('results', str(len(resultsList.split('|')) - 1))
res = len(resultsList.split('|'))
self.channels[int(self.results[name])].setProperty('results', str(res - 1 if res > 0 else 0))
pos = self.CHANNELS.getSelectedPosition()
self.CHANNELS.reset()
self.CHANNELS.addItems(self.channels)
@@ -511,6 +513,7 @@ class SearchWindow(xbmcgui.WindowXML):
if result: items.append(self.makeItem(result))
self.RESULTS.reset()
self.RESULTS.addItems(items)
update_lock.release()
def onInit(self):
self.time = time.time()
@@ -689,6 +692,7 @@ class SearchWindow(xbmcgui.WindowXML):
self.itemsResult = getattr(self.channel, item.action)(item)
if self.itemsResult and self.itemsResult[0].server:
from platformcode.launcher import findvideos
busy(False)
findvideos(self.item, self.itemsResult)
return
except:

View File

@@ -3,10 +3,10 @@ rm tests/home/userdata/addon_data/plugin.video.kod/settings_channels/*.json
rm tests/home/userdata/addon_data/plugin.video.kod/settings_servers/*.json
rm tests/home/userdata/addon_data/plugin.video.kod/cookies.dat
rm tests/home/userdata/addon_data/plugin.video.kod/kod_db.sqlite
python3.9 -m pip install --upgrade pip
pip3.9 install -U sakee
pip3.9 install -U html-testRunner
pip3.9 install -U parameterized
python -m pip install --upgrade pip
pip install -U sakee
pip install -e git+https://github.com/mac12m99/HtmlTestRunner.git@master#egg=html-testRunner
pip install -U parameterized
export PYTHONPATH=$PWD
export KODI_INTERACTIVE=0
export KODI_HOME=$PWD/tests/home
@@ -14,4 +14,4 @@ if (( $# >= 1 ))
then
export KOD_TST_CH=$1
fi
python3.9 tests/test_generic.py
python tests/test_generic.py

View File

@@ -12,7 +12,7 @@ import random
import sys
import time
import unittest
import datetime
import xbmc
if 'KOD_TST_CH' not in os.environ:
@@ -54,7 +54,7 @@ from core import servertools, httptools
import channelselector
import re
logger.DEBUG_ENABLED = False
httptools.HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = 10
outDir = os.path.join(os.getcwd(), 'reports')
@@ -66,7 +66,7 @@ validUrlRegex = re.compile(
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
chBlackList = ['url', 'mediasetplay', 'metalvideo', 'accuradio']
chBlackList = ['url', 'mediasetplay', 'metalvideo', 'accuradio', 'cinetecadibologna', 'tunein']
srvBlacklist = ['mega', 'hdmario', 'torrent', 'youtube']
chNumRis = {
'altadefinizione01': {
@@ -146,13 +146,14 @@ chNumRis = {
def wait():
time.sleep(random.randint(1, 3))
pass
# time.sleep(random.randint(1, 3))
servers = []
channels = []
channel_list = channelselector.filterchannels("all") if 'KOD_TST_CH' not in os.environ else [Item(channel=os.environ['KOD_TST_CH'], action="mainlist")]
logger.DEBUG_ENABLED = True
logger.info([c.channel for c in channel_list])
results = []
@@ -181,7 +182,9 @@ for chItem in channel_list:
for it in mainlist:
wait()
try:
print('preparing ' + ch + ' -> ' + it.title)
now = datetime.datetime.now()
current_time = now.strftime("%H:%M:%S")
print(current_time + 'preparing ' + ch + ' -> ' + it.title)
if it.action == 'channel_config':
hasChannelConfig = True
@@ -197,7 +200,12 @@ for chItem in channel_list:
# if more search action (ex: movie, tvshow), firstcontent need to be changed in every menu
if itemlist and itemlist[0].action in ('findvideos', 'episodios'):
firstContent = re.match('[ \w]*', itemlist[0].fulltitle).group(0)
for it2 in itemlist:
# some sites refuse to search if the search term is too short
title = it2.fulltitle if it2.contentType == 'movie' else it2.contentSerieName
if len(title) > 5:
firstContent = re.match('[ \w]*', title).group(0)
break
# some sites might have no link inside, but if all results are without servers, there's something wrong
for resIt in itemlist: