@@ -35,6 +35,7 @@ def mainlist(item):
|
||||
('HD', ['', 'menu', 'Film HD Streaming']),
|
||||
('Generi', ['', 'menu', 'Film per Genere']),
|
||||
('Anni', ['', 'menu', 'Film per Anno']),
|
||||
('Paese', ['', 'menu', 'Film per Paese']),
|
||||
('Ultimi Aggiornati',['/lista-film-ultimi-100-film-aggiornati/', 'peliculas', 'newest']),
|
||||
('Ultimi Aggiunti', ['/lista-film-ultimi-100-film-aggiunti/', 'peliculas', 'newest'])
|
||||
]
|
||||
@@ -208,7 +209,7 @@ def findvideos(item):
|
||||
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
|
||||
QualityStr = ""
|
||||
for match in matches:
|
||||
QualityStr = scrapertools.decodeHtmlentities(match.group(1))[6:]
|
||||
QualityStr = scrapertools.decodeHtmlentities(match.group(1))
|
||||
|
||||
# Estrae i contenuti - Streaming
|
||||
load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable', "orange", "Streaming", "SD")
|
||||
@@ -218,8 +219,15 @@ def findvideos(item):
|
||||
|
||||
# Estrae i contenuti - Streaming 3D
|
||||
load_links(itemlist, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable', "pink", "Streaming 3D")
|
||||
|
||||
return support.server(item, itemlist=itemlist)
|
||||
|
||||
itemlist=support.server(item, itemlist=itemlist)
|
||||
itemlist.insert(0,
|
||||
Item(channel=item.channel,
|
||||
action="",
|
||||
title="[COLOR orange]%s[/COLOR]" % QualityStr,
|
||||
folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
# Estrae i contenuti - Download
|
||||
# load_links(itemlist, '<strong>Download:</strong>(.*?)<tableclass=cbtable height=30>', "aqua", "Download")
|
||||
|
||||
@@ -21,10 +21,15 @@ def test_video_exists(page_url):
|
||||
page_url = 'https://wstream.video/video.php?file_code=' + code
|
||||
data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True).data
|
||||
|
||||
ID, code = scrapertools.find_single_match(data, r"""input\D*id=(?:'|")([^'"]+)(?:'|").*?value='([a-z0-9]+)""")
|
||||
post = urllib.urlencode({ID: code})
|
||||
|
||||
data = httptools.downloadpage(page_url, headers=headers, post=post, follow_redirects=True).data
|
||||
a = scrapertools.find_single_match(data, r"""input\D*id=(?:'|")([^'"]+)(?:'|").*?value='([a-z0-9]+)""")
|
||||
if a:
|
||||
ID, code = a
|
||||
post = urllib.urlencode({ID: code})
|
||||
data = httptools.downloadpage(page_url, headers=headers, post=post, follow_redirects=True).data
|
||||
else:
|
||||
page_url = scrapertools.find_single_match(data, r"""<center><a href='(https?:\/\/wstream[^']+)'\s*title='bkg'""")
|
||||
if page_url:
|
||||
data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True).data
|
||||
|
||||
if "Not Found" in data or "File was deleted" in data:
|
||||
return False, config.get_localized_string(70449) % 'Wstream'
|
||||
|
||||
Reference in New Issue
Block a user