'
- matches = re.compile(patronvideos, re.DOTALL).findall(data)
-
- if len(matches) > 0:
- scrapedurl = urlparse.urljoin(item.url, matches[0])
+ action=action,
+ contentType=item.contentType,
+ fulltitle=title,
+ show=title,
+ title=title,
+ url=url))
+
+ next_page = scrapertoolsV2.find_single_match(data, "[/COLOR]",
+ url=next_page))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
-def lista_serie(item):
- logger.info("kod.serietvonline novità ")
+def peliculas(item):
+ logger.info(item.channel + 'peliculas')
itemlist = []
- p = 1
+ if item.contentType == 'movie':
+ action = 'findvideos'
+ else:
+ action = 'episodios'
+
+ page = 1
if '{}' in item.url:
- item.url, p = item.url.split('{}')
- p = int(p)
+ item.url, page = item.url.split('{}')
+ page = int(page)
data = httptools.downloadpage(item.url, headers=headers).data
+ block = scrapertoolsV2.find_single_match(data, r'id="lcp_instance_0">(.*?)<\/ul>')
+ matches = re.compile(r'[^<]+', re.DOTALL).findall(block)
- blocco = scrapertools.find_single_match(data, 'id="lcp_instance_0">(.*?)')
- patron='
[^<]+'
- matches = re.compile(patron, re.DOTALL).findall(blocco)
- scrapertools.printMatches(matches)
+ for i, (url, title) in enumerate(matches):
+ if (page - 1) * PERPAGE > i: continue
+ if i >= page * PERPAGE: break
+ title = scrapertoolsV2.decodeHtmlentities(title)
+ itemlist.append(
+ Item(channel=item.channel,
+ action=action,
+ title=title,
+ fulltitle=title,
+ url=url,
+ contentType=item.contentType,
+ show=title))
- for i, (scrapedurl, scrapedtitle) in enumerate(matches):
- if (p - 1) * PERPAGE > i: continue
- if i >= p * PERPAGE: break
- scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
- itemlist.append(Item(channel=item.channel,
- action="episodios",
- title=scrapedtitle,
- fulltitle=scrapedtitle,
- url=scrapedurl,
- fanart=item.fanart if item.fanart != "" else item.scrapedthumbnail,
- show=item.fulltitle,
- folder=True))
-
- if len(matches) >= p * PERPAGE:
- scrapedurl = item.url + '{}' + str(p + 1)
+ if len(matches) >= page * PERPAGE:
+ url = item.url + '{}' + str(page + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
- action="lista_serie",
- title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
- url=scrapedurl,
- thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
- folder=True))
+ action="peliculas",
+ title="[COLOR blue]" + config.get_localized_string(30992) + " >[/COLOR]",
+ url=url,
+ thumbnail=thumb(),
+ contentType=item.contentType))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
- logger.info("kod.serietvonline episodios")
+ logger.info(item.channel + 'episodios')
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
- logger.info('DATA=' + data)
- blocco = scrapertools.find_single_match(data, '
(.*?)<\/table>')
+ block= scrapertoolsV2.find_single_match(data, r'(.*?)<\/table>')
- patron = '| (.*?) |
'
- matches = re.compile(patron, re.DOTALL).findall(blocco)
- scrapertools.printMatches(matches)
+ matches = re.compile(r'
| (.*?) |
', re.DOTALL).findall(block)
- for puntata in matches:
- puntata = "| " + puntata
- # logger.debug(puntata)
- scrapedtitle = scrapertools.find_single_match(puntata, ' | (.*?) | ')
- scrapedtitle = scrapedtitle.replace(item.title, "")
+ for episode in matches:
+ episode = "" + episode
+ logger.info('EPISODE= ' + episode)
+ title = scrapertoolsV2.find_single_match(episode, ' | (.*?) | ')
+ title = title.replace(item.title, "")
+ if scrapertoolsV2.find_single_match(title, '([0-9]+x[0-9]+)'):
+ title = scrapertoolsV2.find_single_match(title, '([0-9]+x[0-9]+)') + ' - ' + re.sub('([0-9]+x[0-9]+)',' -',title)
+ elif scrapertoolsV2.find_single_match(title, ' ([0-9][0-9])') and not scrapertoolsV2.find_single_match(title, ' ([0-9][0-9][0-9])'):
+ title = '1x' + scrapertoolsV2.find_single_match(title, ' ([0-9]+)') + ' - ' + re.sub(' ([0-9]+)',' -',title)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
- fulltitle=scrapedtitle,
- show=scrapedtitle,
- title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
- url=puntata,
- thumbnail=item.scrapedthumbnail,
- plot=item.scrapedplot,
+ fulltitle=title,
+ show=title,
+ title=title,
+ url=episode,
folder=True))
+
+ if config.get_videolibrary_support() and len(itemlist) > 0:
+ itemlist.append(
+ Item(channel=item.channel, title='[COLOR blue][B]'+config.get_localized_string(30161)+'[/B][/COLOR]', url=item.url,
+ action="add_serie_to_library", extra="episodios", show=item.show))
+
return itemlist
def findvideos(item):
- logger.info("kod.serietvonline findvideos")
- itemlist = []
+ logger.info(item.channel + 'findvideos')
+ itemlist=[]
+ logger.info('TYPE= ' + item.contentType)
+ if item.contentType == 'movie':
+ data = httptools.downloadpage(item.url, headers=headers).data
+ logger.info('DATA= ' + data)
+ item.url= scrapertoolsV2.find_single_match(data, r'(.*?)<\/table>')
- patron = "]+>[^>]+>([^<]+)<\/a>"
- matches = re.compile(patron, re.DOTALL).findall(item.url)
-
- for scrapedurl, scrapedserver in matches:
+ urls = scrapertoolsV2.find_multiple_matches(item.url, r".*?>.*?([a-zA-Z]+).*?<\/a>")
+
+ for url, server in urls:
itemlist.append(
Item(channel=item.channel,
- action="play",
- fulltitle=item.scrapedtitle,
- show=item.scrapedtitle,
- title="[COLOR blue]" + item.title + "[/COLOR][COLOR orange]" + scrapedserver + "[/COLOR]",
- url=scrapedurl,
- thumbnail=item.scrapedthumbnail,
- plot=item.scrapedplot,
- folder=True))
+ action='play',
+ title=item.title + ' [COLOR blue][' + server + '][/COLOR]',
+ server=server,
+ url=url))
+
+ autoplay.start(itemlist, item)
+
+ if item.contentType != 'episode':
+ if config.get_videolibrary_support() and len(itemlist) > 0:
+ itemlist.append(
+ Item(channel=item.channel, title='[COLOR blue][B]'+config.get_localized_string(30161)+'[/B][/COLOR]', url=item.url,
+ action="add_pelicula_to_library", extra="findvideos", contentTitle=item.fulltitle))
return itemlist
def play(item):
- data = item.url
- data, c = unshortenit.unshorten(data)
+ data, c = unshortenit.unshorten(item.url)
itemlist = servertools.find_video_items(data=data)
@@ -197,10 +232,3 @@ def play(item):
return itemlist
-thumbnail_fanart = "https://superrepo.org/static/images/fanart/original/script.artwork.downloader.jpg"
-ThumbnailHome = "https://upload.wikimedia.org/wikipedia/commons/thumb/8/81/Dynamic-blue-up.svg/580px-Dynamic-blue-up.svg.png"
-thumbnail_novita = "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"
-thumbnail_lista = "http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"
-thumbnail_top = "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"
-thumbnail_cerca = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
-thumbnail_successivo = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"