From a14c9933b2d84061fb300fc190aec896287a50c9 Mon Sep 17 00:00:00 2001
From: MaxE <29448127+EMaX0@users.noreply.github.com>
Date: Thu, 30 Jan 2020 00:47:29 +0800
Subject: [PATCH] Updated cineblog01.py (#180)
migliorati cb01 e wstream
---
channels/cineblog01.py | 14 +++++++++++---
servers/wstream.py | 13 +++++++++----
2 files changed, 20 insertions(+), 7 deletions(-)
diff --git a/channels/cineblog01.py b/channels/cineblog01.py
index 5c57fa66..2337f951 100644
--- a/channels/cineblog01.py
+++ b/channels/cineblog01.py
@@ -35,6 +35,7 @@ def mainlist(item):
('HD', ['', 'menu', 'Film HD Streaming']),
('Generi', ['', 'menu', 'Film per Genere']),
('Anni', ['', 'menu', 'Film per Anno']),
+ ('Paese', ['', 'menu', 'Film per Paese']),
('Ultimi Aggiornati',['/lista-film-ultimi-100-film-aggiornati/', 'peliculas', 'newest']),
('Ultimi Aggiunti', ['/lista-film-ultimi-100-film-aggiunti/', 'peliculas', 'newest'])
]
@@ -208,7 +209,7 @@ def findvideos(item):
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
QualityStr = ""
for match in matches:
- QualityStr = scrapertools.decodeHtmlentities(match.group(1))[6:]
+ QualityStr = scrapertools.decodeHtmlentities(match.group(1))
# Estrae i contenuti - Streaming
load_links(itemlist, 'Streamin?g:(.*?)cbtable', "orange", "Streaming", "SD")
@@ -218,8 +219,15 @@ def findvideos(item):
# Estrae i contenuti - Streaming 3D
load_links(itemlist, 'Streamin?g 3D[^<]+(.*?)cbtable', "pink", "Streaming 3D")
-
- return support.server(item, itemlist=itemlist)
+
+ itemlist=support.server(item, itemlist=itemlist)
+ itemlist.insert(0,
+ Item(channel=item.channel,
+ action="",
+ title="[COLOR orange]%s[/COLOR]" % QualityStr,
+ folder=False))
+
+ return itemlist
# Estrae i contenuti - Download
# load_links(itemlist, 'Download:(.*?)', "aqua", "Download")
diff --git a/servers/wstream.py b/servers/wstream.py
index f903e8b7..53bbaba1 100644
--- a/servers/wstream.py
+++ b/servers/wstream.py
@@ -21,10 +21,15 @@ def test_video_exists(page_url):
page_url = 'https://wstream.video/video.php?file_code=' + code
data = httptools.downloadpage(page_url, headers=headers, follow_redirects=True).data
- ID, code = scrapertools.find_single_match(data, r"""input\D*id=(?:'|")([^'"]+)(?:'|").*?value='([a-z0-9]+)""")
- post = urllib.urlencode({ID: code})
-
- data = httptools.downloadpage(page_url, headers=headers, post=post, follow_redirects=True).data
+ a = scrapertools.find_single_match(data, r"""input\D*id=(?:'|")([^'"]+)(?:'|").*?value='([a-z0-9]+)""")
+ if a:
+ ID, code = a
+ post = urllib.urlencode({ID: code})
+ data = httptools.downloadpage(page_url, headers=headers, post=post, follow_redirects=True).data
+ else:
+ page_url = scrapertools.find_single_match(data, r"""