1346 Commits

Author SHA1 Message Date
github-actions[bot]
32ad55bfa0 Aggiornamento domini 2024-09-07 17:33:38 +00:00
github-actions[bot]
b95cd9d922 Aggiornamento domini 2024-09-03 17:37:17 +00:00
github-actions[bot]
6311cbdc81 Aggiornamento domini 2024-08-26 17:36:16 +00:00
ilmich
ac946d4fa9 cineblog: fix scraper 2024-08-24 17:07:05 +02:00
github-actions[bot]
24e47dc26a Aggiornamento domini 2024-08-19 17:37:48 +00:00
github-actions[bot]
b656954f6d Aggiornamento domini 2024-08-11 17:37:26 +00:00
ilmich
6df7c6265e updated scws 2024-07-30 19:44:42 +02:00
ilmich
8b4f0e0051 Aggiornamento domini 2024-07-25 17:03:16 +02:00
ilmich
fc74cc35c4 toonitalia: fix scraper and voe server 2024-07-25 17:02:49 +02:00
ilmich
cc0f047c28 channels: fix raiplay e filmpertutti 2024-07-25 17:02:37 +02:00
ilmich
4abd669d83 filmpertutti: fix channel 2024-07-25 17:02:25 +02:00
github-actions[bot]
e652ae021c Aggiornamento domini 2024-07-24 17:36:58 +00:00
github-actions[bot]
48ce21b476 Aggiornamento domini 2024-07-20 17:34:30 +00:00
github-actions[bot]
882e025ad0 Aggiornamento domini 2024-07-19 17:41:40 +00:00
github-actions[bot]
0a6b321219 Aggiornamento domini 2024-07-16 17:34:10 +00:00
github-actions[bot]
e6b0478301 Aggiornamento domini 2024-07-12 17:37:34 +00:00
ilmich
e66c068a24 dinostreaming: updated with some categories 2024-07-11 19:45:11 +02:00
ilmich
3fb69cafa0 dinostreaming: new channel (work in progress) 2024-07-11 19:45:02 +02:00
ilmich
0961e41023 altadefinizione: channel refactored 2024-07-11 19:44:49 +02:00
ilmich
127ac02567 altadefinizione: fix resume on some tv series 2024-07-11 19:44:37 +02:00
ilmich
9c2419146a altadefinizione: fix for not working stream 2024-07-11 19:44:21 +02:00
ilmich
ecef4f696f altadefinizione: fix player resume 2024-07-11 19:44:11 +02:00
ilmich
6dab1b1da6 altadefinizione: fix for speed 2024-07-11 19:44:00 +02:00
ilmich
616d5942d0 altadefiniziona: fix scraper 2024-07-11 19:43:49 +02:00
github-actions[bot]
8ec29c52c8 Aggiornamento domini 2024-07-11 17:35:27 +00:00
ilmich
0532815ef2 altadefinizione: fixed episode scraper 2024-06-24 07:15:35 +02:00
github-actions[bot]
b1ce721e2e Aggiornamento domini 2024-06-17 17:34:18 +00:00
ilmich
71475f82c8 vvvvid: disabled 2024-06-16 21:18:33 +02:00
ilmich
3332c6d0bc discoveryplus: fix live channels 2024-06-16 21:18:33 +02:00
github-actions[bot]
626f1c35ad Aggiornamento domini 2024-05-31 17:37:34 +00:00
github-actions[bot]
e559bb87b5 Aggiornamento domini 2024-05-27 17:36:28 +00:00
Timmy89
a84af18a37 Aggiornamento domini 2024-05-26 22:47:59 +02:00
ilmich
d205e37ea4 altadefinizione: fix scraper 2024-05-25 18:29:59 +02:00
marco
b6a46b7171 altadefinizione: episodios con scrape 2024-05-25 18:29:41 +02:00
ilmich
7cd271e4d3 downloads: fix when autoplay is enabled 2024-05-25 18:29:25 +02:00
ilmich
7ab99f0bb7 support: set movie icon for saghe 2024-05-25 18:29:11 +02:00
ilmich
69d863cb2a altadefinizione: fix channel 2024-05-25 18:28:53 +02:00
ilmich
a6da6f7fe7 Fix SWCS 2024-05-23 20:12:19 +02:00
ilmich
ca3e7ec273 Fix SCWS 2024-05-23 20:12:09 +02:00
Alhaziel01
d950781310 Fix AnimeUnity e SCWS 2024-05-23 20:11:52 +02:00
ilmich
67106bd607 hd4me: fix channel 2024-05-11 07:51:46 +02:00
github-actions[bot]
e46e618890 Aggiornamento domini 2024-05-10 17:36:39 +00:00
github-actions[bot]
bfb0e9c292 Aggiornamento domini 2024-05-06 17:34:39 +00:00
ilmich
a683419209 mediasetplay: revised channel menu 2024-04-26 08:08:23 +02:00
ilmich
ac36b79de6 mediasetplay: fix channel 2024-04-24 20:11:20 +02:00
ilmich
1f545470ff supervideo: fix url 2024-04-24 17:50:43 +02:00
ilmich
e9880b4549 altadefinizione01: channel fix 2024-04-24 17:49:46 +02:00
ilmich
e531b7a8de voe: fix media url 2024-04-24 17:49:29 +02:00
github-actions[bot]
0b467d47a3 Aggiornamento domini 2024-04-22 17:34:06 +00:00
ilmich
1e5c2e0874 raiplay: fix drm contents 2024-04-18 21:11:27 +02:00
Timmy89
19604696ce Disabilitato filmstreaming 2024-04-16 08:58:19 +02:00
github-actions[bot]
aeafbe2dae Aggiornamento domini 2024-04-11 17:34:34 +00:00
ilmich
ed7df80ea3 fix domain 2024-04-06 17:47:49 +02:00
ilmich
f4259fcb5f updated servers 2024-04-06 17:47:37 +02:00
ilmich
a9ac004dc4 Merge remote-tracking branch 'origin/stable' into stable 2024-04-06 17:42:50 +02:00
ilmich
575827e624 discoveryplus: fix for some programs 2024-04-06 17:42:04 +02:00
github-actions[bot]
0ab1f9e309 Aggiornamento domini 2024-03-22 17:34:35 +00:00
mac12m99
3f40cc8414 Update maxstream.json 2024-03-22 07:12:22 +01:00
marco
7f562fcf5d Rimosso anonfiles dai provider per i file di log 2024-03-21 19:41:53 +01:00
marco
563c2cbda5 Fix 2024-03-21 19:41:13 +01:00
mac12m99
2e5ddc1c54 disattivato maxstream 2024-03-21 17:30:46 +01:00
ilmich
b7834c044b added lulustream server 2024-03-20 18:30:04 +01:00
marco
e26664b480 Header di httptools al lettore di kodi 2024-03-20 18:29:48 +01:00
marco
951b3f517c Nuovi proxy CF 2024-03-18 20:16:18 +01:00
github-actions[bot]
fe357ccc75 Aggiornamento domini 2024-03-18 17:36:14 +00:00
ilmich
c3c944e22f toonitalia: fix channel 2024-03-17 19:47:43 +01:00
github-actions[bot]
690d35bd03 Aggiornamento domini 2024-03-16 17:34:14 +00:00
TheHacker66
e2e5486553 Fix maxstream & uprot.net (#482) 2024-03-16 11:22:49 +01:00
ilmich
b722eda625 mixdrop: fix redirection 2024-03-15 18:43:16 +01:00
ilmich
f848574206 resolverdns: added select dns provider option 2024-03-13 20:13:08 +01:00
ilmich
481059407f resolverdns: better exception handling (#472)
* resolverdns: better exception handling
2024-03-13 20:12:44 +01:00
ilmich
ab60a3b38c platformcode: fix some warning and populate also manifest_headers 2024-03-13 19:52:20 +01:00
ilmich
9fc8fabeef italiafilm: fix year for tmdb 2024-03-13 19:43:44 +01:00
ilmich
e957dd2c72 italiafilm: fix channel search 2024-03-09 19:31:44 +01:00
ilmich
835edf0246 fix eurostreaming channel and small updates 2024-03-09 14:33:02 +01:00
github-actions[bot]
3f224cb697 Aggiornamento domini 2024-03-07 17:34:54 +00:00
ilmich
51068183ca fix mixdrop and supervideo 2024-03-06 19:49:40 +01:00
Timmy89
851feff444 Aggiornamento domini 2024-03-06 19:48:43 +01:00
github-actions[bot]
a12def67f7 Aggiornamento domini 2024-03-06 17:34:25 +00:00
Timmy89
cf85cf9193 Fix trakt 2024-03-04 19:35:33 +01:00
ilmich
c788b161e0 eurostreaming: fix scraper and re-enable channel 2024-03-04 17:58:27 +01:00
ilmich
89cf4a19f6 unshortenit: fix clicca.cc and added safego.cc 2024-03-04 17:58:01 +01:00
ilmich
ad246fb063 mixdrop: include another path 2024-03-04 17:57:33 +01:00
ilmich
f459b1b7e9 altadefizione: fix url redirection 2024-03-04 17:26:30 +01:00
github-actions[bot]
622c6c0f3c Aggiornamento domini 2024-03-03 17:34:34 +00:00
Timmy89
0bcc0a260b Aggiornamento domini 2024-03-03 15:43:53 +01:00
github-actions[bot]
c96ec2f084 Aggiornamento domini 2024-02-29 17:34:36 +00:00
ilmich
f2e85c9ae8 mediasetplay: fixed TV series episode list 2024-02-22 20:28:33 +01:00
github-actions[bot]
18217ea1e9 Aggiornamento domini 2024-02-20 17:35:45 +00:00
Timmy89
1fd0da39f6 Aggiornamento domini 2024-02-20 17:40:09 +01:00
Timmy89
e483c1059e Aggiornamento domini 2024-02-20 16:38:05 +01:00
marco
f684cccd04 Fix permessi 2024-02-19 20:30:35 +01:00
Timmy89
c0c280fb18 Merge pull request #470 from python2-kod/stable
correzioni per kodi su raspberry pi
2024-02-18 03:17:50 +01:00
python2-kod
067c481749 clean 2024-02-17 07:28:50 +01:00
python2-kod
a17bca7bf8 fix python2 2024-02-17 07:11:04 +01:00
ilmich
f4bc2c98a8 httptools: resolverdns refactored (#466) 2024-02-16 11:05:27 +01:00
ilmich
38808bb584 altadefinizione: added saga section (#467) 2024-02-12 20:07:40 +01:00
Timmy89
915b9e3e7b fix doodstream 2024-01-31 19:42:25 +01:00
Timmy89
4519aff817 cloudscraper: disable tls1.2 2024-01-31 19:41:25 +01:00
ilmich
9c02f1a890 raiplay: switch to inputstream adaptive (#463) 2024-01-29 20:26:52 +01:00
ilmich
acfd6f628f fix useragent (#459) 2024-01-29 20:26:50 +01:00
ilmich
b5baf3ad0f Update request urllib3 (#464)
* requests: updated to version 2.27.1

* urllib3: updated to version 1.26.18
2024-01-29 20:26:48 +01:00
ilmich
9a75c0ff9f mixdrop: add another domain (#458) 2024-01-22 19:23:26 +01:00
Timmy89
f2508b76d8 Disattivato casacinema 2024-01-20 09:36:34 +01:00
Timmy89
fa05e41386 Disattivato cinemalibero 2024-01-20 09:27:51 +01:00
Michele Zuccalà
603b3c39ba altadefinizione: fix title, lang and others (#456) 2024-01-18 19:55:39 +01:00
Michele Zuccalà
b0c2ac7b94 service: fix ua updater (#457) 2024-01-18 19:55:36 +01:00
Timmy89
6eb71481f4 Aggiornata versione ua 2024-01-16 21:02:13 +01:00
github-actions[bot]
3d5c100de9 Aggiornamento domini 2024-01-16 17:35:46 +00:00
Michele Zuccalà
2d6550ea69 platformtools: fix user agent for hls playlist and hls key (#454) 2024-01-13 15:20:43 +01:00
github-actions[bot]
f4bacd0c2f Aggiornamento domini 2024-01-10 17:34:49 +00:00
Timmy89
85e3030f18 Aggiornamento domini 2024-01-10 03:33:48 +01:00
github-actions[bot]
ebad6e2c77 Aggiornamento domini 2024-01-09 17:34:41 +00:00
github-actions[bot]
0b8c32e08f Aggiornamento domini 2024-01-08 17:34:37 +00:00
github-actions[bot]
65702ed534 Aggiornamento domini 2024-01-06 17:34:26 +00:00
Timmy89
aba19de698 Aggiornamento domini 2024-01-06 17:15:37 +01:00
Michele Zuccalà
35e051c4e3 fix altadefinizione (#452) 2024-01-05 12:09:20 +01:00
Michele Zuccalà
4d7b86f1f4 optimize html_uniform (#449) 2024-01-05 12:09:17 +01:00
github-actions[bot]
daad0c3555 Aggiornamento domini 2023-12-15 17:34:22 +00:00
github-actions[bot]
e816e766c2 Aggiornamento domini 2023-12-01 17:34:29 +00:00
marco
912ccf2372 Fix animeunity 2023-11-25 19:19:03 +01:00
marco
4ac48abbff Fix audio SC 2023-11-23 20:08:03 +01:00
Michele Zuccalà
7287780c65 Fix altadefinizione01 (#445) 2023-11-17 20:35:59 +01:00
github-actions[bot]
b19d5bed52 Aggiornamento domini 2023-11-17 17:34:19 +00:00
Michele Zuccalà
3e94e87607 Add another mixdrop domain regexp (#443) 2023-11-14 20:45:33 +01:00
Timmy89
f8c8b2ba95 Fix StreamingCommunity, con il gentile contributo di @ilmich80 2023-11-08 19:44:28 +01:00
Timmy89
ea351c63f1 Aggiornamento dominio Mixdrop 2023-11-03 21:23:20 +01:00
github-actions[bot]
a82ee1c493 Aggiornamento domini 2023-10-26 17:34:20 +00:00
github-actions[bot]
799dfe2a1b Aggiornamento domini 2023-10-25 17:35:43 +00:00
marco
606bfe4618 Fix altadefinizione, film in videoteca 2023-10-24 19:46:58 +02:00
github-actions[bot]
6e146f7a75 Aggiornamento domini 2023-10-23 17:35:48 +00:00
marco
4b11365063 Fix altadefinizione (tranne sez. agg.), aggiunto wolfstream 2023-10-22 15:02:20 +02:00
marco
a6a29df7e3 Fix parziale altadefinizione, corretto catastrophic backtraking su guardaseriecam (in alcuni casi) 2023-10-22 15:02:15 +02:00
github-actions[bot]
2a49648b2a Aggiornamento domini 2023-10-20 17:35:50 +00:00
github-actions[bot]
8a244d0465 Aggiornamento domini 2023-10-19 17:34:21 +00:00
github-actions[bot]
1484812b86 Aggiornamento domini 2023-10-15 17:34:12 +00:00
github-actions[bot]
2c185a646e Aggiornamento domini 2023-10-02 17:34:15 +00:00
github-actions[bot]
b3ef7314db Aggiornamento domini 2023-09-29 17:34:25 +00:00
github-actions[bot]
9d2f4e746e Aggiornamento domini 2023-09-28 17:34:06 +00:00
Timmy89
ac563d1598 Riabilitato Streamingita 2023-09-27 20:59:19 +02:00
Timmy89
73f227aba8 Aggiornamento domini 2023-09-27 20:58:36 +02:00
Timmy89
57fe901001 Fix Guardaseriecam
Questo fix corregge il rallentamento della ricerca globale come per esempio la ricerca della serie Joe Pickett.
2023-09-27 16:05:18 +02:00
github-actions[bot]
ec61833829 Aggiornamento domini 2023-09-14 17:34:39 +00:00
github-actions[bot]
e89c7eaffb Aggiornamento domini 2023-09-09 17:34:06 +00:00
github-actions[bot]
93a9e989ce Aggiornamento domini 2023-09-07 17:34:31 +00:00
Timmy89
c5fd860bb4 Disattivato StreamingITA 2023-08-21 01:45:23 +02:00
Timmy89
611b387bb8 CB01 aggiunta sezione film Italiani 2023-08-21 00:38:22 +02:00
github-actions[bot]
78c4ce8518 Aggiornamento domini 2023-08-15 17:32:13 +00:00
marco
353281d17f decoratore scrape, aggiunto parametro flags e flagsBlock per modifica flag regex 2023-08-10 13:25:56 +02:00
github-actions[bot]
5237668d17 Aggiornamento domini 2023-07-28 17:32:22 +00:00
marco
dadab39c11 Fix ilcorsaronero 2023-07-28 11:13:51 +02:00
github-actions[bot]
f1a27f4b27 Aggiornamento domini 2023-07-21 17:38:08 +00:00
github-actions[bot]
da6739cd83 Aggiornamento domini 2023-07-17 17:37:37 +00:00
marco
d4d9ddc336 Fix menu 1337x 2023-07-17 12:51:29 +02:00
marco
32e0dbf525 StreamSB nuovi domini 2023-07-17 12:15:48 +02:00
Pasquale Pizzuti
4355c3d8bb Aggiornamento domini (#430) 2023-07-17 12:15:46 +02:00
github-actions[bot]
b25b127d89 Aggiornamento domini 2023-07-14 17:36:55 +00:00
Timmy89
9aebc049c8 Dipendenze opzionali 2023-07-13 23:32:20 +02:00
github-actions[bot]
ba607356d2 Aggiornamento domini 2023-07-13 17:39:16 +00:00
github-actions[bot]
271b6b3d1f Aggiornamento domini 2023-07-08 17:41:14 +00:00
github-actions[bot]
4dce6f2d5f Aggiornamento domini 2023-07-07 17:41:59 +00:00
github-actions[bot]
0bb9c62a7d Aggiornamento domini 2023-07-06 17:43:54 +00:00
github-actions[bot]
817092fefc Aggiornamento domini 2023-07-05 17:38:06 +00:00
github-actions[bot]
e5f656616f Aggiornamento domini 2023-07-01 17:40:59 +00:00
marco
d29efd4ec2 KoD 1.7.7
- fix di routine ai canali/server\n\n
2023-06-30 19:39:03 +02:00
Timmy89
c3e02636fb Disattivato AnimeForce 2023-06-26 19:39:38 +02:00
github-actions[bot]
abb01d6bf3 Aggiornamento domini 2023-06-22 17:36:38 +00:00
github-actions[bot]
786e8bf2c2 Aggiornamento domini 2023-06-20 17:40:46 +00:00
Timmy89
8fe0df3c16 CB01 Film popolari, Film popolari per genere. 2023-06-19 23:28:30 +02:00
Timmy89
d80d3c9f26 Disattivato Animealtadefinizione 2023-06-19 23:20:43 +02:00
github-actions[bot]
943de295e8 Aggiornamento domini 2023-06-17 17:36:57 +00:00
github-actions[bot]
adb83f18f8 Aggiornamento domini 2023-06-09 17:36:48 +00:00
Timmy89
141aefca83 Riattivato AniPlay 2023-06-09 12:51:54 +02:00
marco
5b607e379a Fix streamingcommunity 2023-05-30 20:48:49 +02:00
marco
66ae64c562 Fix supervideo 2023-05-17 20:57:20 +02:00
marco
f470c49c7b Fix ilcorsaronero 2023-05-17 20:48:12 +02:00
github-actions[bot]
12b3afcba7 Aggiornamento domini 2023-05-13 17:32:30 +00:00
Timmy89
630fa1ade0 Aggiornamento domini 2023-05-13 18:58:01 +02:00
github-actions[bot]
24cc1638b7 Aggiornamento domini 2023-05-11 17:32:23 +00:00
Timmy89
ce4533d7c3 Aggiornamento domini 2023-05-10 02:31:46 +02:00
github-actions[bot]
2415307a8a Aggiornamento domini 2023-05-06 17:32:24 +00:00
Timmy89
f413324021 Aggiornamento domini 2023-05-06 17:07:52 +02:00
github-actions[bot]
8fde65126f Aggiornamento domini 2023-05-04 17:32:31 +00:00
github-actions[bot]
b0adfc0cb3 Aggiornamento domini 2023-05-01 17:35:39 +00:00
Timmy89
8a25db2c2a Aggiornamento domini 2023-05-01 16:28:33 +02:00
marco
571513311d Fix streamSB 2023-04-29 17:19:58 +02:00
marco
09ab2ca5db Fix stagioni mancanti filmpertutti 2023-04-26 22:00:16 +02:00
marco
c866339a68 Fix sottosezioni CB01 2023-04-26 22:00:13 +02:00
marco
a31ab4dc33 Fix download da menu contestuale con canali singolo server 2023-04-26 22:00:11 +02:00
github-actions[bot]
8ee269ce8d Aggiornamento domini 2023-04-23 17:32:41 +00:00
Timmy89
0f1ac46df4 Aggiornamento domini 2023-04-23 13:05:59 +02:00
github-actions[bot]
d291f1b463 Aggiornamento domini 2023-04-21 17:32:37 +00:00
Timmy89
18462bd1ca Aggiornamento domini 2023-04-21 12:39:11 +02:00
marco
d4e5089ec2 Refresh all'abilitazione del download 2023-04-19 20:38:46 +02:00
marco
59ebb49d6a Prova streamtape 2023-04-19 20:38:41 +02:00
marco
ae162428ce Downloader: sistemata finestra selezione, fix riproduzione da sezione videoteca e ottimizzazione 2023-04-19 20:38:04 +02:00
Timmy89
6e5bfecd6a ItaliaFilm aggiunta sezione Film 2023-04-10 20:37:18 +02:00
github-actions[bot]
5e9d080398 Aggiornamento domini 2023-04-10 17:32:18 +00:00
Timmy89
06bad7a844 Aggiornamento domini 2023-04-10 04:15:52 +02:00
syscallkill
d1759d1fe6 add italiafilm channel (#414) 2023-04-08 15:59:03 +02:00
Timmy89
292cc61786 Piccolo aggiustamento CB01 2023-04-02 15:34:10 +02:00
Timmy89
93efe07593 Fix novità CB01 2023-04-02 13:30:40 +02:00
Timmy89
556277e202 Ripristinata sezione ultimi film aggiunti CB01 2023-04-01 22:29:30 +02:00
github-actions[bot]
6b2aa274de Aggiornamento domini 2023-03-31 17:32:35 +00:00
github-actions[bot]
76752e3c40 Aggiornamento domini 2023-03-27 17:33:06 +00:00
Timmy89
bce2c1b0f0 Aggiornamento domini 2023-03-26 22:54:24 +02:00
marco
fe2ee713ec Fix CF 2023-03-25 10:36:29 +01:00
marco
3e0e7f535d Fix YT, sezione serietv raiplay 2023-03-25 10:36:26 +01:00
github-actions[bot]
d8743fc956 Aggiornamento domini 2023-03-24 17:32:31 +00:00
github-actions[bot]
f675c8d79d Aggiornamento domini 2023-03-23 17:32:53 +00:00
github-actions[bot]
4ebe273511 Aggiornamento domini 2023-03-21 17:32:33 +00:00
Timmy89
d822e9b1b4 Ops 2023-03-21 13:13:59 +01:00
Timmy89
539faa4011 Aggiornamento domini 2023-03-21 12:45:42 +01:00
marco
c77d115448 ops 2023-03-20 20:59:04 +01:00
marco
fd9c09a32b Fix goodstream 2023-03-20 20:30:16 +01:00
marco
082fec00aa Fix Altadefinizione, casacinema e filmpertutti, streamsb. Nuovo dominio toonitalia.xyz (non ha sottosezioni) 2023-03-20 20:30:13 +01:00
github-actions[bot]
2acaf18282 Aggiornamento domini 2023-03-17 17:32:27 +00:00
Timmy89
aaad4172ef Aggiornamento domini 2023-03-17 09:33:26 +01:00
Timmy89
f40dbef187 Aggiornamento domini 2023-03-17 09:28:56 +01:00
github-actions[bot]
630ca002e2 Aggiornamento domini 2023-03-15 17:34:03 +00:00
Timmy89
0a3e71b0a9 Aggiornamento domini 2023-03-15 18:06:38 +01:00
marco
7716a5faf9 CB01: rimosse sezioni non più esistenti 2023-03-13 21:15:54 +01:00
github-actions[bot]
bf91fd4625 Aggiornamento domini 2023-03-07 17:34:04 +00:00
github-actions[bot]
989f9aa8e8 Aggiornamento domini 2023-03-03 17:32:40 +00:00
marco
48f0295dde Disattivati toonitalia e aniplay 2023-03-02 21:06:03 +01:00
marco
35304d8bda Fix VVVVID, streamsb e nuovo server filemoon. Miglior passaggio info a trakt (se attivo) 2023-03-02 20:48:26 +01:00
github-actions[bot]
537f0f5f68 Aggiornamento domini 2023-03-01 17:35:23 +00:00
Timmy89
130a47018f Aggiornamento domini 2023-02-28 21:13:25 +01:00
github-actions[bot]
ff6b5bd884 Aggiornamento domini 2023-02-28 17:34:23 +00:00
marco
bee16c1f0d Fix casacinema 2023-02-20 20:08:15 +01:00
github-actions[bot]
5c8432f566 Aggiornamento domini 2023-02-17 17:32:41 +00:00
marco
8da5e8b3e3 Fix casacinema 2023-02-16 20:21:05 +01:00
Timmy89
2048cabb37 Fix SC 2023-02-16 19:18:50 +01:00
github-actions[bot]
5bd067ad20 Aggiornamento domini 2023-02-16 17:32:31 +00:00
github-actions[bot]
8007a35365 Aggiornamento domini 2023-02-12 17:32:28 +00:00
github-actions[bot]
a3a537f952 Aggiornamento domini 2023-02-11 17:32:33 +00:00
marco
e3ab536339 Fix StreamSB 2023-02-11 13:39:31 +01:00
marco
5fe60b7b83 tempdir per js2py 2023-02-07 20:31:05 +01:00
github-actions[bot]
b497782630 Aggiornamento domini 2023-02-01 17:32:30 +00:00
Timmy89
0890815382 Fix SC 2023-02-01 16:46:02 +01:00
marco
a6074d4ba1 Fix vvvvid, rimosso animeuniverse(chiuso) 2023-01-28 11:21:22 +01:00
marco
cd8bbabd74 Fix vvvvid 2023-01-24 20:36:53 +01:00
marco
1ce00b452f Fix doodstream e opzioni di KoD (kodi 20) 2023-01-23 21:47:15 +01:00
github-actions[bot]
3c337f1da2 Aggiornamento domini 2023-01-21 17:32:35 +00:00
Timmy89
65d4f50040 Update channels.json 2023-01-21 14:14:07 +01:00
marco
491fcd6af2 Fix cinemalibero e SC 2023-01-21 14:04:12 +01:00
github-actions[bot]
e78cfd6af5 Aggiornamento domini 2023-01-20 17:32:30 +00:00
marco
12016b2ccf Fix animeunity, cinemalibero sezione anime 2023-01-19 20:27:43 +01:00
github-actions[bot]
311e02af27 Aggiornamento domini 2023-01-19 17:33:17 +00:00
marco
9476c013f5 Migliorie riconoscimento server Cinemalibero e nuovo server HexUpload 2022-12-30 13:41:35 +01:00
marco
b7885ab100 Fix altadefinizione01 - alcuni server mancanti 2022-12-27 20:20:53 +01:00
marco
1f64b017ab Fix VVVVID 2022-12-20 20:06:27 +01:00
github-actions[bot]
201c4d8a53 Aggiornamento domini 2022-12-16 17:32:40 +00:00
mac12m99
a8569db0e1 Aggiornamento domini 2022-12-15 22:01:18 +01:00
github-actions[bot]
1ac9bd8bf3 Aggiornamento domini 2022-12-15 17:35:35 +00:00
Alhaziel01
619e0ef612 Fix Riproduzione AniPlay 2022-12-15 15:47:44 +01:00
Alhaziel01
f8ff7ca4f2 Fix Videoteca Aniplay 2022-12-15 12:15:21 +01:00
Alhaziel01
87efde1fa9 Fix Casacinema 2022-12-14 11:13:30 +01:00
Alhaziel01
b282c93f5a Fix AniPlay 2022-12-14 10:45:32 +01:00
github-actions[bot]
d4802a086c Aggiornamento domini 2022-12-13 17:32:52 +00:00
mac12m99
56c6273249 aggiornamento domini 2022-12-12 22:38:01 +01:00
marco
aa78772527 Riattivato filmstreaming, fix minori 2022-12-04 14:33:54 +01:00
marco
473d8f8758 Fix dropload, filmpertutti (sezione aggiornamenti), disattivato filmstreaming (da errore anche da browser) 2022-12-03 14:59:32 +01:00
marco
e9be40c944 Proxy anti CF 2022-12-03 11:46:30 +01:00
marco
58c04d009d Fix vvvvid 2022-12-03 11:30:12 +01:00
Alhaziel01
a6cbd81dbb Fix VOE 2022-12-02 12:46:12 +01:00
Alhaziel01
e94cea605e Fix Cinemalibero 2022-12-02 11:01:44 +01:00
Alhaziel01
262f97f90b Fix Ricerca in Altadefinizione 2022-12-02 10:19:29 +01:00
Alhaziel01
4644f3abb5 Eurostreaming - Momentaneamente Disabilitato 2022-12-02 10:06:50 +01:00
Alhaziel01
f1d0c5121d Fix Tmdb - Aggiunta alla videoteca di Film in Collezione 2022-12-02 09:52:43 +01:00
github-actions[bot]
5734e979a1 Aggiornamento domini 2022-11-24 17:33:48 +00:00
marco
6add80329b Nuovo dominio altadefinizione01 2022-11-21 18:55:05 +01:00
github-actions[bot]
a5dcd5ef26 Aggiornamento domini 2022-11-17 17:37:53 +00:00
Alhaziel01
1d1c2fc3df - Fix Finestra selezione Qualità
- Fix Redirect StreamingCommunity
2022-11-16 18:53:58 +01:00
Alhaziel01
7e3cb693ab StreamingCommunity Ripristinato Messaggio "Prossimamente" 2022-11-16 18:53:55 +01:00
marco
5cc6c78228 Streamingcommunity: non riprodurre ultimo contenuto quando il selezionato uscirà prossimamente 2022-11-16 18:53:51 +01:00
github-actions[bot]
112b03cbe0 Aggiornamento domini 2022-11-16 17:37:09 +00:00
github-actions[bot]
a711d11129 Aggiornamento domini 2022-11-13 17:37:51 +00:00
marco
d3ffad0b6f Fix ricerca casacinema 2022-11-07 20:13:02 +01:00
github-actions[bot]
7bc81a5314 Aggiornamento domini 2022-11-07 17:45:46 +00:00
marco
cc6a92eab7 Fix filmpertutti serietv, server VUP chiuso 2022-11-06 17:26:33 +01:00
marco
d8bc89728f animeuniverse è ora AnimeHDitalia, nascosti gli episodi non ancora caricati 2022-11-05 15:10:52 +01:00
Alhaziel01
073985223c VVVVID Fix contenuti con DRM 2022-11-03 18:18:46 +01:00
Alhaziel01
8fe8663f13 Fix Rinumerazione Toonitalia 2022-11-02 17:33:47 +01:00
Alhaziel01
9109411f6f Discovery + [free] - Aggiunto Warner TV 2022-10-31 09:50:02 +01:00
Alhaziel01
7a2321d5cc - Fix Chiusura Globalsearch
- Fix Ripresa punto di visione
2022-10-28 19:07:07 +02:00
Alhaziel01
23d652ebc5 Fix Autoplay e Altadefinizione 2022-10-25 09:06:21 +02:00
Alhaziel01
d9f8f9d6af Animeunity, rimosso server secondario 2022-10-24 19:43:12 +02:00
Alhaziel01
89250019f4 Fix gestione finestra server 2022-10-24 19:43:09 +02:00
Alhaziel01
3297e3b944 Fix voe, streamsb 2022-10-24 19:43:07 +02:00
marco
805cfa7b16 Fix streamlare, streamsb, toonitalia e modifiche interne 2022-10-24 19:43:04 +02:00
github-actions[bot]
d1de1ef91a Aggiornamento domini 2022-10-22 17:51:24 +00:00
marco
92dc3ae499 Riattivato tantifilm 2022-10-22 15:13:06 +02:00
Alhaziel01
e9ac7c8149 Merge branch 'stable' of https://github.com/kodiondemand/addon into stable 2022-10-21 08:41:07 +02:00
Alhaziel01
9ccec23b59 Fix Navigazione in Ricerca globale se un video è in riproduzione 2022-10-21 08:40:38 +02:00
github-actions[bot]
14c7341c62 Aggiornamento domini 2022-10-20 18:05:53 +00:00
Alhaziel01
78e0e3fe22 Fix AccuRadio 2022-10-20 19:25:12 +02:00
marco
be8dc944dc Nuovo server: dropload e fix puntate sfasate eurostreaming_actor 2022-10-17 19:40:29 +02:00
marco
0e184c8832 Nuovo dominio shotener 2022-10-13 21:08:17 +02:00
marco
8960e36148 Aggiornamento domino eurostreaming 2022-10-13 21:08:15 +02:00
Alhaziel01
5d2d259b2a Fix download Elementum 2022-10-13 20:34:52 +02:00
Alhaziel01
62530891e5 Fix SetreamSB 2022-10-13 20:34:49 +02:00
Alhaziel01
b1badd809e Fix Pluto TV 2022-10-13 20:34:46 +02:00
Alhaziel01
33a21786bf Aggiornamento StreamingCommunity 2022-10-05 18:25:24 +02:00
Alhaziel01
d6bfc4f5b4 Fix SC 2022-10-03 09:23:40 +02:00
github-actions[bot]
900a57d230 Aggiornamento domini 2022-10-02 17:48:50 +00:00
Alhaziel01
1e9c8debcb Fix SC 2022-10-01 19:48:04 +02:00
Alhaziel01
754ebcab61 Fix Aggiunta in Videoteca Film o Serie TV che cominciano con il punto 2022-10-01 16:15:28 +02:00
Alhaziel01
c38097efde Fix SC e AU 2022-10-01 15:57:50 +02:00
Alhaziel01
90becb9d37 Fix SC 2022-10-01 12:26:05 +02:00
Alhaziel01
cdbdec3481 Fix certifi per Kodi 18 (potrebbe richiedere reinstallazione) 2022-09-29 18:35:08 +02:00
Alhaziel01
009aa1abce Aggiornamento certifi (Fix Mediaset) 2022-09-28 19:01:05 +02:00
Alhaziel01
a499bd7cf1 Fix StreamingCommunity e AnimeUnity 2022-09-28 10:32:28 +02:00
marco
e57e4410f7 Fix eurostreaming_actor 2022-09-26 20:57:33 +02:00
Alhaziel01
bf56aa914f Fix streamingcommunity e animeunity 2022-09-26 18:18:49 +02:00
marco
3511757413 Cambio dominio SCWS -> fix streamingcommunity e animeunity 2022-09-25 14:16:47 +02:00
marco
fef67c2bbd Prova inspect.stack senza contesto 2022-09-24 11:25:31 +02:00
marco
4987901909 Sostituito parzialmente inspect.stack 2022-09-24 11:25:28 +02:00
github-actions[bot]
d41d500432 Aggiornamento domini 2022-09-23 17:53:58 +00:00
github-actions[bot]
4027c0e4da Aggiornamento domini 2022-09-21 17:52:52 +00:00
marco
b59b6042ec Fix ricerca discoveryplus e filmpertutti, aggiornamento episodi eurostreaming 2022-09-18 14:53:22 +02:00
marco
164efd8af7 KoD 1.7.6
- fix di routine ai canali/server\n- disabilitati cb01anime e tantifilm\n- aggiunta opzione mostra server nel menu contestuale della libreria\n- più opzioni per quanto riguarda l'aggiornamento della videoteca\n\n
2022-09-15 19:15:51 +02:00
github-actions[bot]
749b54a772 Aggiornamento domini 2022-09-13 17:51:48 +00:00
github-actions[bot]
75074f3849 Aggiornamento domini 2022-09-03 17:40:36 +00:00
marco
e8bf10d73f Ridotta scadenza cache DNS -> 1h 2022-08-26 20:46:03 +02:00
github-actions[bot]
826bf3aea4 Aggiornamento domini 2022-08-11 17:39:57 +00:00
Alhaziel01
4f0860ce65 Fix Altadefinizione: Film in Videoteca 2022-08-04 16:52:47 +02:00
marco
8aed32996f Fix lag ricerca su ATV quando avviata da sezioni TMDB 2022-08-03 19:37:57 +02:00
Alhaziel01
5905839d50 Fix Canale Url 2022-08-02 19:01:57 +02:00
Alhaziel01
4adc535fd9 Fix Ok.ru 2022-08-02 19:01:56 +02:00
marco
b0e49c0446 Riattivato HD4ME 2022-08-01 19:15:19 +02:00
marco
77b2b3949b Disattivati canali offline da tempo (guardaserieclick, guardaserieicu, hd4me, italiaserie, piratestreaming, serietvu) 2022-07-31 20:29:44 +02:00
Alhaziel01
aaa6c78ace Sottotitoli Community Channels 2022-07-30 15:29:21 +02:00
Alhaziel01
1a00926155 Fix Cinemalibero (Wrestling) 2022-07-28 08:36:41 +02:00
Alhaziel01
7a9a0c977a Fix per Kodi 18 2022-07-28 08:36:34 +02:00
github-actions[bot]
fb1b0ab0a8 Aggiornamento domini 2022-07-26 17:47:10 +00:00
github-actions[bot]
dcbdb7ad35 Aggiornamento domini 2022-07-25 17:49:35 +00:00
github-actions[bot]
56e692796b Aggiornamento domini 2022-07-21 17:48:42 +00:00
github-actions[bot]
976b7ece3b Aggiornamento domini 2022-07-13 17:49:06 +00:00
Alhaziel01
49a400d599 Fix rinumerazione Toonitalia 2022-07-12 14:56:44 +02:00
Alhaziel01
1b1e65f7d7 - Fix VVVVID
- Ricerca Informazioni in sezione Novità
 - Fix Toonitalia
 - Fix Voci sottomenu CB01 e aggiunto tag 3D, se presente
 - Fix episodio successivo per episodi > 100
 - Fix Community Channels
 - Selezione scraper (TMDb TvShows default)
2022-07-12 11:22:40 +02:00
github-actions[bot]
ad20a975e9 Aggiornamento domini 2022-07-09 17:41:41 +00:00
github-actions[bot]
0a0625202b Aggiornamento domini 2022-07-08 17:50:37 +00:00
github-actions[bot]
fc41ca5d34 Aggiornamento domini 2022-07-01 17:44:04 +00:00
github-actions[bot]
f11d65b0ae Aggiornamento domini 2022-06-29 18:05:37 +00:00
github-actions[bot]
898abc3e22 Aggiornamento domini 2022-06-25 17:42:06 +00:00
github-actions[bot]
6099add363 Aggiornamento domini 2022-06-24 17:45:43 +00:00
github-actions[bot]
88f230eec3 Aggiornamento domini 2022-06-23 17:46:58 +00:00
marco
979eca1f33 Nuovi domini streamlare e streamsb, possibile fix streamsb 2022-06-22 20:22:30 +02:00
github-actions[bot]
e838bb9cfe Aggiornamento domini 2022-06-16 17:42:38 +00:00
github-actions[bot]
bdcd2c359d Aggiornamento domini 2022-06-15 17:40:43 +00:00
github-actions[bot]
6050e0267c Aggiornamento domini 2022-06-14 17:41:40 +00:00
github-actions[bot]
6ef17fc659 Aggiornamento domini 2022-06-10 17:41:32 +00:00
github-actions[bot]
658ef937d4 Aggiornamento domini 2022-06-09 17:45:50 +00:00
github-actions[bot]
9b3a03f570 Aggiornamento domini 2022-06-08 17:41:08 +00:00
Dexter Morgan
3764c66054 Eurostreaming_Actor: fix per range anno 2022-06-07 21:06:39 +02:00
Dexter Morgan
b672f37059 Aggiunto canale eurostreaming.actor 2022-06-07 19:51:45 +02:00
marco
138d480e1c Reinserito settaggio per disattivare notifica trakt 2022-05-30 19:58:36 +02:00
github-actions[bot]
a99f3585ee Aggiornamento domini 2022-05-28 17:40:37 +00:00
marco
9670a8c469 Fix filmstreaming casi particolari, nuovo server goodstream 2022-05-28 18:47:23 +02:00
marco
23bc63d635 Fix filmstreaming 2022-05-28 12:05:49 +02:00
marco
02239be925 KoD 1.7.5
- Aggiunti nuovi canali: 1337x e filmstreaming\n- fix cinemalibero, altadefinizione01\n- workaround per puntante non funzionanti quando si aggiorna la videoteca\n\n
2022-05-28 11:48:55 +02:00
github-actions[bot]
080bb798a1 Aggiornamento domini 2022-05-27 17:45:40 +00:00
github-actions[bot]
aeccbe0030 Aggiornamento domini 2022-05-16 17:47:46 +00:00
github-actions[bot]
f5c01330fb Aggiornamento domini 2022-05-13 17:48:40 +00:00
github-actions[bot]
3003567267 Aggiornamento domini 2022-05-08 17:41:33 +00:00
Alhaziel01
a7f4e8bf67 Fix Discovery + 2022-05-05 16:06:55 +02:00
marco
17f103aebd Fix alternativo a redirect 2022-05-04 21:11:41 +02:00
github-actions[bot]
277701c90c Aggiornamento domini 2022-04-30 17:39:43 +00:00
github-actions[bot]
bac35549d2 Aggiornamento domini 2022-04-29 17:42:23 +00:00
github-actions[bot]
9d67db9cef Aggiornamento domini 2022-04-28 17:42:00 +00:00
marco
9249c637eb Fix Cerca con KoD se si usa la vecchia ricerca 2022-04-28 18:31:50 +02:00
github-actions[bot]
323ffdb802 Aggiornamento domini 2022-04-27 17:46:36 +00:00
github-actions[bot]
0bac6ea333 Aggiornamento domini 2022-04-22 17:49:55 +00:00
Alhaziel01
ea59908712 Fix Aniplay 2022-04-20 19:22:41 +02:00
Alhaziel01
3593d8b316 Fix Redirect Dominio 2022-04-20 19:22:35 +02:00
Alhaziel01
ea2aa8c95b Fix Redirect Dominio 2022-04-20 19:22:31 +02:00
Alhaziel01
499326384b Fix Ricerca IMDB 2022-04-20 19:22:24 +02:00
Alhaziel01
cf204329d6 Fix Altadefinizione 2022-04-20 19:22:14 +02:00
github-actions[bot]
ce3a780711 Aggiornamento domini 2022-04-12 17:45:31 +00:00
github-actions[bot]
5dcf5455c5 Aggiornamento domini 2022-04-11 17:39:47 +00:00
Alhaziel01
3769fb9607 Rimozione blocco plugin esterni 2022-04-08 16:03:40 +02:00
github-actions[bot]
380a9665b4 Aggiornamento domini 2022-04-07 17:37:47 +00:00
marco
a6e63f0763 Fix streamlare 2022-04-03 13:41:01 +02:00
marco
bb6de9953c Fix dirette mediaset 2022-04-03 13:40:02 +02:00
Alhaziel01
ee58f6128d Blocco addon-esterni 2022-04-02 16:51:19 +02:00
marco
158392f892 Aggiornamento domini 2022-03-30 20:36:35 +02:00
github-actions[bot]
6c5b1e5725 Aggiornamento domini 2022-03-26 17:44:11 +00:00
github-actions[bot]
c9ee86d225 Aggiornamento domini 2022-03-25 17:38:38 +00:00
github-actions[bot]
eb3663487b Aggiornamento domini 2022-03-24 17:46:55 +00:00
github-actions[bot]
f00bd90c5d Aggiornamento domini 2022-03-13 17:36:52 +00:00
github-actions[bot]
f995310f93 Aggiornamento domini 2022-03-12 17:36:32 +00:00
github-actions[bot]
fed15ea125 Aggiornamento domini 2022-03-11 17:38:39 +00:00
Alhaziel01
93151eff4e Fix Community Channels 2022-03-07 20:13:15 +01:00
github-actions[bot]
6ca3fb9e6b Aggiornamento domini 2022-03-07 17:37:51 +00:00
Alhaziel01
b6cdc6bc92 Fix Discovery + 2022-03-05 19:03:48 +01:00
Alhaziel01
93016aa588 Fix Tempo di Visione Episodi 2022-03-05 19:03:37 +01:00
Alhaziel01
8e22e76e35 Fix DoodStream 2022-02-28 19:05:00 +01:00
Alhaziel01
50d80b73dc - Aggiunto Canale Altadefinizione
- Fix Aggiungi alla Videoteca da Ricerca Globale
 - Miglior gestione riapertura finestra server
 - Fix Gestione server Bloccati
2022-02-26 17:01:51 +01:00
github-actions[bot]
cbc939fed8 Aggiornamento domini 2022-02-25 17:36:05 +00:00
github-actions[bot]
7876c0f5fb Aggiornamento domini 2022-02-24 17:36:10 +00:00
marco
8fa9536f96 Nuovo dominio streamingITA 2022-02-23 20:34:05 +01:00
Alhaziel01
d5ec77ad06 - Fix finestra server
- Fix lista episodi da ricerca globale
2022-02-18 18:03:31 +01:00
Alhaziel01
57bd9c5d2e Fix Riapertura finestra server in caso di errore 2022-02-17 17:37:13 +01:00
marco
fea10c1a15 Fix pagina successiva da ricerga globale -> altre opzioni 2022-02-15 18:35:11 +01:00
marco
41e0823f62 KoD 1.7.4
- Nuove visualizzazioni Server\n- Fix Gestione Viste\n- Aggiunto Pluto TV\n- Fix e migliorie varie\n\n
2022-02-12 12:54:50 +01:00
marco
fa99565e9f Fix streamlare 2022-02-09 20:30:53 +01:00
github-actions[bot]
9c34eaf221 Aggiornamento domini 2022-02-08 17:45:28 +00:00
github-actions[bot]
d2a43c8b81 Aggiornamento domini 2022-02-06 17:43:59 +00:00
github-actions[bot]
905305cadb Aggiornamento domini 2022-02-05 17:39:44 +00:00
github-actions[bot]
40334b88fb Aggiornamento domini 2022-02-04 17:42:07 +00:00
github-actions[bot]
121fdeef9b Aggiornamento domini 2022-02-02 17:46:04 +00:00
github-actions[bot]
52a7c70c46 Aggiornamento domini 2022-01-31 17:44:21 +00:00
Alhaziel01
61cbc057b4 Fix Sottotitoli StreamingCommunity 2022-01-26 11:42:47 +01:00
github-actions[bot]
1719cbf746 Aggiornamento domini 2022-01-20 17:45:44 +00:00
Alhaziel01
93ec9cd2da - Fix Canali Live
- Fix errore in tutti i canali
2022-01-19 21:46:17 +01:00
Alhaziel01
ed899cbd44 Aggiunto Pluto TV 2022-01-19 20:09:03 +01:00
Alhaziel01
07d60d0c92 Disattivato Paramount Network 2022-01-19 19:45:42 +01:00
marco
b619105677 Merge remote-tracking branch 'origin/stable' into stable 2022-01-17 20:29:50 +01:00
marco
2927263065 Fix streamingcommunity 2022-01-17 20:29:41 +01:00
github-actions[bot]
2f17e9144f Aggiornamento domini 2022-01-16 17:50:55 +00:00
github-actions[bot]
7d9d6f0381 Aggiornamento domini 2022-01-15 17:48:47 +00:00
Alhaziel01
a490f48a78 Fix Streaming Community 2022-01-15 12:11:05 +01:00
github-actions[bot]
b7497592c9 Aggiornamento domini 2022-01-14 17:49:51 +00:00
marco
842bbb7c1f Fix ordinamento episodi tantifilm 2022-01-14 18:38:13 +01:00
github-actions[bot]
566392db46 Aggiornamento domini 2022-01-13 17:51:28 +00:00
Alhaziel01
3f7b34f844 Disabilitato Maxstream 2022-01-13 15:22:02 +01:00
marco
827573b596 Merge remote-tracking branch 'origin/stable' into stable 2022-01-12 20:30:51 +01:00
marco
7b491eb679 Disattivato il genio, fix ricerca ilgenio_cam 2022-01-12 20:30:43 +01:00
marco
cd9a162fb5 Fix streamingcommunity 2022-01-09 17:39:12 +01:00
marco
62cc72849b Opzioni menu contestuale aggiuntive nei miei link e nei preferiti 2022-01-02 14:54:18 +01:00
marco
bb29243f03 Fix streamingcommunity 2021-12-31 13:46:00 +01:00
marco
06f711b6b9 Disattivato tapmovie(chiuso) 2021-12-30 20:21:33 +01:00
Alhaziel01
d90a0ba308 Fix Mediaset Play 2021-12-30 20:18:47 +01:00
Alhaziel01
8a710dcdb3 Fix dailymotion 2021-12-30 20:18:17 +01:00
marco
7abf12696c Fix get_season_and_episode 2021-12-30 20:18:14 +01:00
marco
3a251b2247 Miglioria commit precedente 2021-12-30 20:18:08 +01:00
marco
1eb23954fd Fix parziale cinemalibero (SerieTV) 2021-12-30 20:18:05 +01:00
Alhaziel01
968414dd23 Fix Tantifilm 2021-12-30 20:17:59 +01:00
Alhaziel01
7a1e51b290 Fix Discovery 2021-12-30 20:17:54 +01:00
Alhaziel01
b3e8341457 Disabilitato Altadefinizione Community 2021-12-30 20:17:52 +01:00
marco
265992f9ba Cambiato ordinamento menu contestuale 2021-12-25 19:11:25 +01:00
github-actions[bot]
19720a5617 Aggiornamento domini 2021-12-18 17:47:10 +00:00
github-actions[bot]
f4e039ea03 Aggiornamento domini 2021-12-17 17:46:57 +00:00
github-actions[bot]
67468dae87 Aggiornamento domini 2021-12-16 17:47:32 +00:00
marco
0c7b2056eb Riattivati guardaseriecam e ilgeniodellostreaming_cam 2021-12-15 20:27:03 +01:00
github-actions[bot]
962e4b0504 Aggiornamento domini 2021-12-15 17:48:13 +00:00
github-actions[bot]
22ef52888b Aggiornamento domini 2021-12-09 17:49:30 +00:00
Alhaziel01
b57cd7edb8 Fix StreamingITA 2021-12-01 19:41:57 +01:00
Alhaziel01
95eaf0060b Miglioria VVVVID 2021-12-01 19:40:04 +01:00
Alhaziel01
121d0558b2 Fix canali o server vuoti 2021-12-01 19:39:13 +01:00
Alhaziel01
70f5cd0d4e Fix File .nfo in libreria 2021-12-01 19:39:09 +01:00
marco
7eebb48b89 KoD 1.7.3
- fix vari\n\n
2021-11-27 18:08:14 +01:00
Alhaziel01
b0325f61d6 FIx Live e icone canali mancanti MediasetPlay 2021-11-26 17:31:00 +01:00
Alhaziel01
d644d66e43 Fix La7 2021-11-26 17:30:48 +01:00
Alhaziel01
ce81cf0da8 Fix Ultimi Episodi AnimeWorld 2021-11-26 16:34:44 +01:00
github-actions[bot]
1b904eec20 Aggiornamento domini 2021-11-23 17:43:58 +00:00
github-actions[bot]
9f75185517 Aggiornamento domini 2021-11-19 17:43:15 +00:00
github-actions[bot]
5dddc9bd75 Aggiornamento domini 2021-11-18 17:41:19 +00:00
github-actions[bot]
f18a7b6f85 Aggiornamento domini 2021-11-17 17:40:07 +00:00
marco
28d9ee2176 streamingcommunity URL statico 2021-11-16 21:15:37 +01:00
marco
88778aef1a Miglioria streamSB 2021-11-16 21:10:17 +01:00
Alhaziel01
efd1c8d872 Nomi Corretti Nuovi Server 2021-11-16 21:10:08 +01:00
marco
c90916e367 fix ricerca guardaserieicu, prova timeout globale (requests) 2021-11-16 21:10:02 +01:00
marco
faa51a5477 Aggiunti server streamlare (by alpha) e streamsb(by urlresolver) 2021-11-15 19:23:10 +01:00
Alhaziel01
897f27deb6 Fix Audio Assente Raiplay 2021-11-11 17:33:03 +01:00
marco
941d8f2236 KoD 1.7.2
- corretto il playback in tutti i casi (torrent, autoplay, videoteca, libreria ecc..)\n- piccole migliorie prestazionali nella ricerca globale\n- fix trailer\n\n
2021-11-08 19:50:51 +01:00
github-actions[bot]
9c7c41d44a Aggiornamento domini 2021-11-06 17:41:22 +00:00
Alhaziel01
69480575e1 Fix Visualizzazioni 2021-11-05 09:57:44 +01:00
github-actions[bot]
a49e6b5e07 Aggiornamento domini 2021-11-04 17:43:35 +00:00
mac12m99
f7a783be9c Update maxstream.py 2021-11-04 18:31:50 +01:00
mac12m99
8194f6dc97 Fix maxstream (vediamo quanto regge) 2021-11-04 14:49:08 +01:00
mac12m99
d2067983a3 Fix streamingcommunity per quando cambia url 2021-11-04 11:20:37 +01:00
mac12m99
8143ec7dca Fix paramount serie tv 2021-11-04 10:51:47 +01:00
Alhaziel01
d4ab965974 Fix Riproduzione e Download Torrent 2021-11-03 19:58:29 +01:00
github-actions[bot]
f3f86991c9 Aggiornamento domini 2021-11-03 17:43:31 +00:00
Alhaziel01
7a6ad81003 Fix Streaming community 2021-10-29 19:18:02 +02:00
Alhaziel01
b807b41f48 Probabile Fix Autoplay Torrent 2021-10-29 16:51:34 +02:00
Alhaziel01
e74337c07b Fix Installazione Elementum 2021-10-29 11:46:28 +02:00
mac12m99
c65c924d1f Fix guardaserieicu 2021-10-27 20:33:43 +02:00
mac12m99
324d6c00f8 Fix tantiflm, animealtadefinizione utiltimi ep e nascosti ep non ancora caricati 2021-10-27 20:33:41 +02:00
mac12m99
1d32b43176 Fix dreamsub ultimi episodi 2021-10-24 14:08:54 +02:00
mac12m99
9734b92502 Fix ricerca globale 2021-10-23 18:42:35 +02:00
Alhaziel01
0976a8581b - Aggiornato Anavids
- Fix m3u8 non supportati da inputstream
 - Stayonline
2021-10-23 11:36:48 +02:00
Alhaziel01
5a89b60ce8 Fix Animeforce 2021-10-22 19:14:16 +02:00
mac12m99
d48ca59d62 Fix maxstream 2021-10-22 19:03:51 +02:00
Alhaziel01
00d5da1083 Fix Ricerca globale 2021-10-21 11:43:54 +02:00
mac12m99
638180d4e9 Miglioria ilcorsaronero->tag qualità 2021-10-20 20:46:49 +02:00
mac12m99
546008c375 Miglioria ilcorsaronero: titolo completo tramite URL 2021-10-20 20:46:47 +02:00
Alhaziel01
2a7103270c Fix AnimeUnity 2021-10-20 20:15:21 +02:00
github-actions[bot]
9729ff2cf4 Aggiornamento domini 2021-10-20 17:42:38 +00:00
Alhaziel01
3aca8ec683 Fix Userload 2021-10-20 19:29:28 +02:00
mac12m99
2ccfa4f38a Supporto isecure.link (casacinema) 2021-10-20 18:31:23 +02:00
Alhaziel01
a32fe8c41f Fix Ricerca per Attori (Ricerca Classica) 2021-10-20 18:19:13 +02:00
mac12m99
35e0016050 Fix dreamsub 2021-10-19 20:35:06 +02:00
Alhaziel01
4e592031b0 Fix AnimeUnity 2021-10-19 20:34:32 +02:00
github-actions[bot]
cf4e60719d Aggiornamento domini 2021-10-19 17:42:08 +00:00
github-actions[bot]
84fbb53e8a Aggiornamento domini 2021-10-18 17:43:08 +00:00
mac12m99
bfc5657d45 Fix maxstream 2021-10-18 19:20:21 +02:00
mac12m99
e31e4190da Fix maxstream 2021-10-18 17:40:55 +02:00
mac12m99
e82c1b3bd8 Fix maxstream (support CAPTCHA) 2021-10-17 11:12:32 +02:00
Alhaziel01
9b201a49ef Fix streaming Community, in caso di cambio dominio 2021-10-16 16:02:12 +02:00
mac12m99
bf04517b78 Fix maxstream 2021-10-16 15:33:03 +02:00
Alhaziel01
18055e430a Fix titoli Streaming Community 2021-10-16 11:23:20 +02:00
Alhaziel01
11f5a1a56b Fix Maxstream 2021-10-16 10:02:02 +02:00
mac12m99
8709a1f865 Maxstream check codice video 2021-10-15 20:03:26 +02:00
Alhaziel01
dbc12dab8e Riattivato Maxtream 2021-10-15 17:06:47 +02:00
Alhaziel01
104de6e690 Fix Maxstream 2021-10-15 16:57:02 +02:00
Alhaziel01
f133d535ea Disabilitato momentaneamente Maxstream 2021-10-15 16:17:23 +02:00
Alhaziel01
52a7b9eef2 Fix Streaming Community 2021-10-15 12:43:12 +02:00
github-actions[bot]
3c28cb42ae Aggiornamento domini 2021-10-14 17:43:18 +00:00
Alhaziel01
009dec2b24 Fix Altadefinizione Community 2021-10-14 15:33:50 +02:00
Alhaziel01
ed7992ce3f Fix Mediaset Play, diretta "20" 2021-10-14 15:31:18 +02:00
Alhaziel01
d7235cb631 Fix altadefinizione community 2021-10-11 21:01:09 +02:00
mac12m99
68f5b13d67 Fix maxstream 2021-10-08 19:27:26 +02:00
Alhaziel01
cb9fc5b5f3 Fix AnimeForce 2021-10-07 19:53:27 +02:00
Alhaziel01
a7f0571b5b Rai Play: Ordine corretto 2021-10-07 19:53:22 +02:00
Alhaziel01
ce7dea395e Fix Repaly Rai Play 2021-10-06 19:41:51 +02:00
Alhaziel01
19d43c2b17 Altadefinizione Community Risoluzione "Veritiera" 2021-10-06 15:08:07 +02:00
Alhaziel01
775b8745f8 Probabile Fix Blocco Ricerca Globale 2021-10-06 14:54:05 +02:00
Alhaziel01
d6b19b2966 Fix Streaming Community 2021-10-06 14:53:43 +02:00
Alhaziel01
185981dd55 Fix Altadefinizione Community 2021-10-06 14:53:32 +02:00
Alhaziel01
4dfab34e1f Fix Selezione Viste 2021-10-06 14:53:10 +02:00
mac12m99
d0b1734f52 Piccolo fix adesso in onda 2021-10-05 20:34:21 +02:00
Alhaziel01
e8f45d0cbe Fix Successivo in altre opzioni di ricerca 2021-10-05 20:34:18 +02:00
github-actions[bot]
e64fcc4b27 Aggiornamento domini 2021-10-03 17:39:30 +00:00
mac12m99
65f9941714 altacomm: rimossa richiesta registrazione 2021-10-02 14:45:21 +02:00
mac12m99
0b86272635 Disattivato altadefinizioneclick (unito al community), fix altacomm e cineblog per certi tipi di serie 2021-10-02 12:21:18 +02:00
Alhaziel01
1e95d88097 Fix Altadefiniziona Community 2021-10-02 12:21:16 +02:00
Alhaziel01
18958830d9 Ripristiono Vista Touch 2021-10-01 19:45:40 +02:00
github-actions[bot]
f8a8755e31 Aggiornamento domini 2021-10-01 17:42:23 +00:00
Alhaziel01
6743b37af9 Fix TMDB 2021-10-01 19:36:53 +02:00
Alhaziel01
4bfe1353b7 Rimossa voce ricerca Avanzata da Altadefinizione Click 2021-10-01 19:27:53 +02:00
Alhaziel01
eba84fcb67 Fix Altadefinizione Click 2021-10-01 15:50:26 +02:00
Alhaziel01
e7926bcea8 Fix Gestione Viste 2021-10-01 15:50:19 +02:00
Alhaziel01
11ed736134 - Fix Altadefinizione Community
- Fix HDMario
 - Fix Streamtape
2021-10-01 09:53:52 +02:00
Alhaziel01
1d2ecb05d3 Fix Altadefinizione Community 2021-09-30 20:32:42 +02:00
Alhaziel01
9094c5e845 Fix selezione tipo di vista 2021-09-30 19:48:10 +02:00
github-actions[bot]
894c35a9bf Aggiornamento domini 2021-09-30 17:41:27 +00:00
Alhaziel01
db475a6c16 Fix Altadefinizione Community 2021-09-29 17:18:51 +02:00
Alhaziel01
2c8dafa1b4 Fix AnimeSaturn 2021-09-29 12:16:50 +02:00
Alhaziel01
8ed5872305 piccolo fix 2021-09-29 09:08:19 +02:00
Alhaziel01
612921b61d Nuovo Server HighLoad 2021-09-28 15:34:35 +02:00
Alhaziel01
86e4bebb3c - Altadefinizione Community:
- divisione per anno (per Film e Serie TV)
   - disabilitata divisione per qualità (inutile)
   - preferenze di ordinamento (da configurazione canale)
 - Velocizzato tmdb + fix per rinumerazione anime
2021-09-28 09:10:13 +02:00
4l3x87
cca66c6cf6 Fix Turbovid (#345) 2021-09-27 20:37:03 +02:00
mac12m99
408bca028f Fix toonitalia, alcuni link su eurostreaming, aggiunto server VOE (by alfa) 2021-09-26 14:34:06 +02:00
github-actions[bot]
5e0dd6d7bb Aggiornamento domini 2021-09-22 17:41:55 +00:00
Alhaziel01
f5ff18ecd0 Fix Ricerca per Attori 2021-09-22 10:01:19 +02:00
mac12m99
c48ea215b0 Fix streamingcommunity e ordinamento decrescente per programmi tv mediaset 2021-09-21 19:55:09 +02:00
github-actions[bot]
891723faa5 Aggiornamento domini 2021-09-21 17:43:02 +00:00
github-actions[bot]
86999da8b6 Aggiornamento domini 2021-09-20 17:38:57 +00:00
mac12m99
a335f83194 Fix dirette mediaset 2021-09-19 11:36:35 +02:00
mac12m99
428968a04e KoD 1.7.1 2021-09-18 18:59:00 +02:00
github-actions[bot]
cca8672e2c Aggiornamento domini 2021-09-16 17:41:07 +00:00
Alhaziel01
a7bb1c2f64 Fix Deltabit 2021-09-15 20:13:50 +02:00
mac12m99
af53d5d474 Fix hdmario 2021-09-14 20:20:24 +02:00
mac12m99
48c2b25513 Fix sezioni novità per eurostreaming e tantifilm 2021-09-13 20:42:18 +02:00
mac12m99
c310e171eb altacomm: fix cerca serietv 2021-09-13 18:29:05 +02:00
mac12m99
1be7cda8d1 Disabilitato speedvideo (probabilmente chiuso o abbandonato) 2021-09-11 13:31:01 +02:00
mac12m99
20b032d640 Fix altaCommunity -> SerieTv mancanti + Deltabit 2021-09-09 18:29:59 +02:00
github-actions[bot]
788d4f3119 Aggiornamento domini 2021-09-08 17:44:54 +00:00
mac12m99
adafa74dbb Fix altaCommunity -> SerieTv mancanti 2021-09-08 17:50:45 +02:00
mac12m99
edd984555b Fix altaCommunity e streamtape 2021-09-07 20:54:23 +02:00
mac12m99
3a1e3a228b Fix altaCommunity e aggiungi alla videoteca con autoplay attivo su alcuni canali 2021-09-06 20:38:40 +02:00
Alhaziel01
b21f957c08 Fix Altadefinizione Community 2021-08-27 16:18:16 +02:00
github-actions[bot]
aa4ee64579 Aggiornamento domini 2021-08-26 17:44:58 +00:00
Alhaziel01
9374cecf7c Fix Altadefinizione Click 2021-08-26 12:41:04 +02:00
Alhaziel01
eb99cf3e75 Fix Altadefinizione Community 2021-08-25 19:35:07 +02:00
Alhaziel01
01d40a66ae Fix Riproduzione 2021-08-24 11:21:19 +02:00
Alhaziel01
5795116630 Fix Riproduzione da Videoteca 2021-08-23 18:37:21 +02:00
mac12m99
57c9ff54dd ops 2021-08-21 19:58:07 +02:00
mac12m99
06f1aa11f4 Test per freeze kodi da videoteca 2021-08-21 17:32:33 +02:00
mac12m99
bd249bb2c3 Fix streamtape 2021-08-19 11:56:52 +02:00
mac12m99
0abf9b0934 Fix altadefinizionecommunity -> ricerca 2021-08-17 17:08:19 +02:00
mac12m99
200878ab0c Fix altadefinizionecommunity 2021-08-17 12:56:37 +02:00
mac12m99
bbd6d8b287 Merge remote-tracking branch 'origin/stable' into stable 2021-08-16 20:55:23 +02:00
mac12m99
7404c8cbe2 DNS cache: rinnovo dopo 7 giorni 2021-08-16 20:55:16 +02:00
github-actions[bot]
4a8efbf06f Aggiornamento domini 2021-08-16 17:47:37 +00:00
Alhaziel01
c7eadf01b1 Fix Login Altadefinizione Community 2021-08-16 19:06:32 +02:00
mac12m99
ccad94275c casacinema: fix per alcuni contenuti 2021-08-16 19:03:54 +02:00
mac12m99
840e47be78 Fix streamtape, filmpertutti->aggiornamenti 2021-08-14 19:19:45 +02:00
github-actions[bot]
817bcbc996 Aggiornamento domini 2021-08-09 17:48:41 +00:00
Alhaziel01
e0e9252180 Altadefinizione Community:
- Fix Ricerca
 - Fix Generi / Qualità
2021-08-07 17:33:52 +02:00
mac12m99
87ca7f83c2 Fix vidmoly, aniplay(A-Z), rimossi serieHD e DSDA, rimosso player premium altacommunity 2021-08-07 17:33:02 +02:00
github-actions[bot]
02d6b2fa2a Aggiornamento domini 2021-08-04 17:47:09 +00:00
Alhaziel01
bf8d9d68b1 Fix Avvio KoD 2021-08-03 22:27:14 +02:00
mac12m99
25d4522e1c Fix aggiungi videoteca su altadefinizionecommunity->ricerca, tmdb e rimosso URL shortner per "apri nel browser" in mancanza di browser 2021-08-03 21:18:24 +02:00
Alhaziel01
c13b7fcda1 Fix Login Altadefinizione Community 2021-08-03 21:11:53 +02:00
Enrico Da Rodda
ec03cd1b60 Fix regex Casacinema 2021-08-03 21:11:48 +02:00
github-actions[bot]
f8a64af1a5 Aggiornamento domini 2021-08-03 17:50:38 +00:00
Alhaziel01
04af369e1b Fix AniPlay (Riaggiungere le serie in Videoteca) 2021-08-02 19:06:23 +02:00
github-actions[bot]
e22308e949 {Aggiornamento domini} 2021-07-29 17:43:34 +00:00
Alhaziel01
1a91aefce4 Logo Altadefinizione Community 2021-07-28 17:46:49 +02:00
github-actions[bot]
1b03981de0 {Aggiornamento domini} 2021-07-27 17:43:09 +00:00
github-actions[bot]
2b20883017 {Aggiornamento domini} 2021-07-25 17:41:33 +00:00
github-actions[bot]
6132df9fca {Aggiornamento domini} 2021-07-24 17:41:21 +00:00
mac12m99
0119903f7e Aggiunto canale AltadefinizioneCommunity 2021-07-24 16:03:25 +02:00
github-actions[bot]
eda3c533ad {Aggiornamento domini} 2021-07-24 09:01:01 +00:00
mac12m99
a6c833b3ec Fix episodi Toonitalia 2021-07-21 18:35:02 +02:00
mac12m99
03e609a2b7 Aggiornamento domini 2021-07-20 20:42:16 +02:00
mac12m99
5cc09dd34f Fix proxytranslate 2021-07-20 20:41:30 +02:00
Alhaziel01
a8ac567792 Fix Trakt 2021-07-13 16:01:00 +02:00
mac12m99
ef0c1c1189 Aggiornamento domini 2021-07-10 14:25:41 +02:00
Enrico Da Rodda
9823287251 Fix casacinema e altadefinizioneclick 2021-07-08 19:39:27 +02:00
Alhaziel01
2db35222a2 Riabilitato SerieHD 2021-07-08 15:38:30 +02:00
Alhaziel01
4105b29cd7 Fix AniPlay 2021-07-08 09:03:54 +02:00
Alhaziel01
6f58427757 Fix AniPlay 2021-07-07 20:08:44 +02:00
Alhaziel01
2eb2ee5ebe Aggiunto Server Streamon 2021-07-07 17:16:50 +02:00
Alhaziel01
11972f242f Fix streamZ 2021-07-07 17:14:10 +02:00
Alhaziel01
44a8f0cad0 - Aggiungi alla videoteca se non si sa se è un film o una serie 2021-07-07 17:13:27 +02:00
Alhaziel01
1762d5bf83 - Aggiunte Serie ad Altadefinizione Click
- Disattivato Serie HD
2021-07-07 17:13:22 +02:00
github-actions[bot]
9569b174e5 {Aggiornamento domini} 2021-07-07 17:13:18 +02:00
mac12m99
3535c26173 altadefinizioneclick: fix referer (mixdrop) 2021-07-04 14:28:10 +02:00
Alhaziel01
93ad0673c6 Fix Numerazione AniPlay 2021-07-01 10:19:23 +02:00
Alhaziel01
4fb7d2f5cb Aggiunto AniPlay 2021-07-01 10:07:19 +02:00
mac12m99
73bd742b73 Fix cb01 puntate in cartella esterna e riconoscimento TMDB con anno 2021-06-30 14:52:33 +02:00
Alhaziel01
d3cd2e4ee6 Fix Episodio Successivo 2021-06-28 16:33:44 +02:00
Alhaziel01
4abd89a558 Fix streamZ 2021-06-28 16:33:40 +02:00
mac12m99
c541177310 Cb01: esclusione definitiva articoli di servizio, toonitalia fix sezione aggiornamenti, piccole migliorie 2021-06-26 15:20:30 +02:00
fatshotty
1fe780d7a8 fix MaxStream 2021-06-25 19:26:25 +02:00
Alhaziel01
d4a82d00eb Fix VVVVID 2021-06-25 10:50:23 +02:00
mac12m99
005c13c60c Aggiornamento domini 2021-06-23 19:24:03 +02:00
mac12m99
315412aa1a Fix guardaserieclick e ricerca ilgenio 2021-06-23 19:23:37 +02:00
fatshotty
84a35b25d7 fix eurostreaming - aggiornamenti 2021-06-23 19:23:07 +02:00
mac12m99
e45fca832c Fix freeze kodi alla chiusura quando aggiornato service.py 2021-06-23 18:12:23 +02:00
mac12m99
fd135f1b6b Aggiunto server MaxStream 2021-06-20 18:21:50 +02:00
Alhaziel01
b6eab69a15 Server Preferiti fix in caso di valori errati 2021-06-17 17:50:37 +02:00
Alhaziel01
67dc8d858a Fix Selezione server preferiti 2021-06-17 17:10:16 +02:00
Alhaziel01
ae3815e892 Fix Autoplay 2021-06-17 16:38:53 +02:00
mac12m99
6773f10b69 Niente riordino lista server, per il momento 2021-06-16 20:30:51 +02:00
Alhaziel01
9956dbebda Fix Mancata apertura Canali in Kodi 18 2021-06-16 19:15:00 +02:00
fatshotty
996408108b Animeunity: supporto server StreamingCommunityWS 2021-06-16 18:24:38 +02:00
Alhaziel01
1949aac0a2 Fix Autoplay
(cherry picked from commit 8e020bb605)
2021-06-16 18:22:53 +02:00
Alhaziel01
097be756fe Fix Impostazioni server
(cherry picked from commit cf125d1e2b)
2021-06-16 18:22:50 +02:00
mac12m99
e55d9a070f Aggiornamento domini 2021-06-15 22:26:28 +02:00
Alhaziel01
6781e2eaaa Fix Rinumerazione all'aggiunta in videoteca 2021-06-11 12:29:30 +02:00
Alhaziel01
ca67bc4f9f Rimosso extended info, non compatibile con Kodi 19 2021-06-11 12:29:25 +02:00
Alhaziel01
3810f1a45b Fix Tmdb, Menu contestuale, Autoplay 2021-06-11 12:29:17 +02:00
mac12m99
114fed7f31 KoD 1.7
- Aggiunto menu globale opzioni di KoD\n- Aggiunto canale tapmovie e server annessi\n- Notifica quando il tipo di vista viene salvata (con indicazione del tipo di contenuto)\n\n
2021-06-10 17:40:17 +02:00
Alhaziel01
7c261c26b2 Fix Filmpertutti 2021-06-07 18:50:43 +02:00
Alhaziel01
5c441584a3 Fix unshorten linkup 2021-06-07 18:32:40 +02:00
Alhaziel01
82d1cac082 Fix AnimeUnity, HD4ME, ToonItalia 2021-06-07 18:32:26 +02:00
mac12m99
a3dac09a86 fix buckler.link, novità tantifilm 2021-06-05 17:14:29 +02:00
Alhaziel01
16272a5133 Aggiornamento Dominio Streaming Community 2021-06-04 16:21:40 +02:00
Alhaziel01
6a130d08da - Disattivato altadefinizione L
- Fix IlGenioDelloStreaming CAM
2021-06-01 19:47:59 +02:00
Alhaziel01
b646f4bbbc Fix unshortenit + nuovo path per mixdrop 2021-06-01 19:47:47 +02:00
Alhaziel01
06a8ca1a27 Fix HD4ME 2021-06-01 18:09:07 +02:00
Alhaziel01
081da7e42a Fix Mega 2021-06-01 18:09:00 +02:00
Alhaziel01
a4ccfd45aa Forza controllo url di hdpass (Fix "HDMario" SerieHD) 2021-06-01 18:08:54 +02:00
Alhaziel01
21dadd88b8 - Miglioria NinjaStream
- Fix Vup
2021-06-01 11:42:50 +02:00
Alhaziel01
abf92a832f DooD Stream, Fix Kodi 18 2021-06-01 09:20:56 +02:00
Alhaziel01
e7038d0e58 Fix OkStream 2021-05-31 18:40:07 +02:00
Alhaziel01
21feea854e Fix AnimeWorld 2021-05-31 18:14:19 +02:00
Alhaziel01
949f398a64 Fix DooD Stream 2021-05-31 18:13:33 +02:00
Alhaziel01
f7e4d1568b Fix Ninjastream 2021-05-31 18:13:19 +02:00
mac12m99
d63cda3206 ops
(cherry picked from commit 01235a9b97)
2021-05-30 20:49:08 +02:00
mac12m99
564736c703 Disattivati wstream, backin, vidtome e vcrypt (chiusi)
(cherry picked from commit 49c03638df)
2021-05-30 20:46:02 +02:00
Alhaziel01
cfd0038ed8 Fix CB01 2021-05-29 18:36:24 +02:00
mac12m99
ebfc6df299 Disattivati mondoserietv, serietvonline, toonitalia e mystream
Tantifilm nuovo indirizzo
2021-05-28 18:39:36 +02:00
mac12m99
98b40bd16d Aggiornamento domini 2021-05-21 20:39:29 +02:00
mac12m99
d3a2fa5702 Cache tmdb: riprova se risposta vuota 2021-05-17 20:17:15 +02:00
Alhaziel01
5923f697ea Fix Cb01 2021-05-17 18:52:28 +02:00
mac12m99
56e9489c18 Fix sezione novità (cb01) 2021-05-16 17:57:40 +02:00
mac12m99
e09a6a2e96 CB01: migliorato riconoscimento titoli in sezione Ultimi aggiunti 2021-05-15 13:08:56 +02:00
mac12m99
c3f84281f8 fix wstream e animesaturn 2021-05-15 10:38:56 +02:00
mac12m99
7f33326403 Fix cb01->ultimi/e aggiornati/e, wstream e piccole migliorie 2021-05-14 20:43:39 +02:00
mac12m99
06a2f81737 Fix grafico "cerca negli altri canali" 2021-05-14 20:43:36 +02:00
mac12m99
70ffcec833 Ottimizzazione init db 2021-05-13 20:47:36 +02:00
Alhaziel01
47d10b1c68 Fix Animeworld 2021-05-13 12:30:01 +02:00
mac12m99
29120c7be9 Riattivato DSDA (di nuovo ON) 2021-05-07 17:32:47 +02:00
mac12m99
be9832e934 Fix vcrypt e wstream 2021-05-06 21:24:40 +02:00
mac12m99
36ea5be436 Miglioria animeworld 2021-05-05 21:01:56 +02:00
mac12m99
4841c9c08f Disattivato DSDA(chiuso) 2021-05-04 21:00:38 +02:00
Alhaziel01
7c49b7cda6 Fix AnimeWorld 2021-05-03 16:35:51 +02:00
Alhaziel01
b72b258c22 Fix AnimeWorld 2021-05-03 09:43:34 +02:00
mac12m99
417cc83732 Migliorie ricerca globale 2021-05-01 21:28:14 +02:00
mac12m99
12690dfbb9 Fix ilgenio (link in re-upload), cb01 caso speciale serie e soppressione errori nei canali in videoteca 2021-05-01 14:56:09 +02:00
mac12m99
a895bd04fc Cb01 nel checkdns 2021-04-29 18:05:20 +02:00
mac12m99
b5c7974941 Piccolo fix casacinema e speedvideo 2021-04-29 17:57:13 +02:00
Enrico Da Rodda
fa01a6a914 Fix altadefinizione01 e click 2021-04-29 17:56:50 +02:00
Alhaziel01
24fccf7b9f Fix La7 2021-04-29 16:59:37 +02:00
Alhaziel01
c5153ae4df Fix AnimeUnity 2021-04-29 09:27:19 +02:00
mac12m99
b70aeb5191 KoD 1.6.3
- Corretto blocco nella ricerca globale\n- migliorie e fix vari ai canali e al core\n
2021-04-28 21:52:53 +02:00
Alhaziel01
73cb2b9d6a Fix AnimeWorld, supporto episodi uniti 2021-04-23 09:36:08 +02:00
Alhaziel01
32e9723eaf Fix TMDB (film) 2021-04-23 09:12:04 +02:00
mac12m99
7da77fe914 Fix download su smb 2021-04-22 20:07:43 +02:00
Alhaziel01
4191f8f524 Fix Animesaturn 2021-04-22 20:07:14 +02:00
Alhaziel01
4dfb38743f Piccoli fix Episodio Successivo 2021-04-22 20:07:09 +02:00
Alhaziel01
ccbc556914 Fix TMDB 2021-04-22 20:06:51 +02:00
Alhaziel01
8eb6e92bd6 Miglioprie tmdb e autorenumber 2021-04-22 20:06:39 +02:00
Alhaziel01
d724582087 Fix filetools 2021-04-22 20:06:32 +02:00
Alhaziel01
f4e84740e4 fix animeunity 2021-04-22 20:06:27 +02:00
Alhaziel01
d5a1c14cea Fix VVVVID (Generi) 2021-04-22 20:06:22 +02:00
mac12m99
0ac13cfcaf Fix ilgeniodellostreaming 2021-04-20 21:03:22 +02:00
mac12m99
673dc5c540 Fix registrazione HDmario 2021-04-18 20:15:44 +02:00
mac12m99
da6f697e8d Rimosso subtitletools e workaround in service 2021-04-18 16:46:03 +02:00
mac12m99
4545075ba3 Fix paramount e tunein 2021-04-18 14:11:51 +02:00
mac12m99
e21b58e82b Sleep inutile in service.onSettingsChanged 2021-04-16 23:08:40 +02:00
Alex
3dce05c62e Fix impostazioni + modifiche minori 2021-04-16 23:08:37 +02:00
github-actions[bot]
9257d8e553 {Aggiornamento domini} 2021-04-11 17:20:56 +02:00
mac12m99
e3eae9b52b Fix tantifilm 2021-04-11 17:20:52 +02:00
mac12m99
17e6063102 Fix per cartelle di rete 2021-04-11 15:24:35 +02:00
mac12m99
38868b4885 Altadefinizione01 è .games -> cambio dominio e aggiustamenti 2021-04-11 15:24:32 +02:00
mac12m99
f36ebb1ad9 Cerca negli altri canali: cerca subito se c'è tmdb_id
(cherry picked from commit 788b7be621)
2021-04-10 16:28:48 +02:00
mac12m99
65ce9df813 Possibile fix alla inutile ri-richiesta di configurazione videoteca
(cherry picked from commit 6424e3881c)
2021-04-10 14:22:51 +02:00
github-actions[bot]
2b95903e91 {Aggiornamento domini}
(cherry picked from commit c054389a9d)
2021-04-10 14:22:47 +02:00
mac12m99
0d16c2f4da fix e piccole migliorie trailer
(cherry picked from commit 05424ee827)
2021-04-10 14:22:44 +02:00
Alhaziel01
c08f40dd6f Fix Disattivazione Episodio Successivo 2021-04-10 11:37:26 +02:00
Alhaziel01
01c298feaf Fix Mediaset Play 2021-04-10 09:19:35 +02:00
Alhaziel01
c62cbafa39 Fix Episodio Successivo 2021-04-10 09:19:29 +02:00
mac12m99
a22353e7c9 Nuovo link altadefinizioneClick
(cherry picked from commit ae13866937)
2021-04-08 20:56:11 +02:00
mac12m99
b2bd3e61d1 KoD 1.6.2
- Migliorata funzione cerca trailer\n- Episodio successivo: è ora disponibile la modalità playlist (puoi usare il tasto riproduci successivo di kodi)\n- aggiunto www.accuradio.com\n- migliorie varie\n
2021-04-07 20:16:21 +02:00
mac12m99
8441b1e28a fix wstream 2021-03-31 21:05:29 +02:00
mac12m99
4a19071195 fix dirette mediasetplay 2021-03-31 20:34:32 +02:00
mac12m99
789ab6cc6c possibile fix trakt 2021-03-30 20:50:59 +02:00
Alhaziel01
3f8466d83c Fix VVVVID 2021-03-27 09:23:33 +01:00
Alhaziel01
3e83618baa Fix TMDB, VVVVID, Mediaset Play 2021-03-26 16:05:23 +01:00
mac12m99
883198f853 fix server VUP e vidtome 2021-03-25 21:09:54 +01:00
mac12m99
65ecee0a7f fix vidtome e migliorie vcrypt 2021-03-24 20:25:48 +01:00
mac12m99
953e25b3c0 fix userload e wstream 2021-03-24 20:25:23 +01:00
mac12m99
a79a83ba7f fix bug py3 streamingcommunity 2021-03-23 20:50:01 +01:00
Alhaziel01
bea1fceaa1 Fix Aggiunta in Videoteca StreamingCommunity 2021-03-23 19:25:29 +01:00
Alhaziel01
f222769b94 Fix StreamingCommunity 2021-03-23 17:54:07 +01:00
Alhaziel01
13cb0b915f Fix Animeworl e Casacinema 2021-03-22 10:33:07 +01:00
mac12m99
9f304abbee fix errore mediasetplay nelle sezioni con molti elementi + fix minori 2021-03-20 21:02:46 +01:00
Alhaziel01
d1d840d02d Raiplay Possibilità di mettere in pausa e ricominciare dall'inizio 2021-03-19 20:08:30 +01:00
Alhaziel01
572775cbbd Ordine in canali live 2021-03-19 19:30:47 +01:00
Alhaziel01
82a597aedd - Fix Nome file in Wstream
- Migliorato riconoscimento m3u9 in download
2021-03-19 19:30:36 +01:00
Alhaziel01
5223d6c87b - Fix Piratestreaming
- Normalizzazione numerazione episodi
2021-03-19 19:30:21 +01:00
enricodarodda
a9c0fb02db fix serietvonline 2021-03-19 19:25:54 +01:00
Alhaziel01
0932211da4 Probabile fix ricerca globale 2021-03-19 19:11:18 +01:00
Alhaziel01
d996bd06e5 Fix Autoplay 2021-03-17 15:29:01 +01:00
mac12m99
ed2c0fbbda Aggiornamento domini 2021-03-15 18:18:03 +01:00
mac12m99
6ca8c34692 fix mediasetplay: mostrati tutti gli episodi (prima max 100) 2021-03-15 18:17:24 +01:00
Alhaziel01
c08dc9b37f Fix Videoteca Serie TV 2021-03-13 19:47:50 +01:00
Alhaziel01
3a713dbfcf Fix Community Channels 2021-03-13 15:25:43 +01:00
Enrico Da Rodda
466a37106b Fix Altadefinizione01_link 2021-03-13 15:16:21 +01:00
mac12m99
76c934fe68 ops 2021-03-13 15:16:13 +01:00
Alhaziel01
eb4cfb1426 Fix Videoteca in caso di cambio dominio 2021-03-13 12:50:35 +01:00
Alhaziel01
416be99f91 Possibilità di passare all'episodio successivo in Automatico o Manuale 2021-03-13 12:50:08 +01:00
Alhaziel01
8f02cc064e Fix MediasetPlay 2021-03-13 12:50:05 +01:00
mac12m99
4857ff933e fix crash checkhost appena dopo l'installazione 2021-03-12 23:51:32 +01:00
mac12m99
fa0fe6e534 KoD 1.6.1
-Migliorata l'efficacia del riconoscimento dei contenuti in ricerca film/serie
- corretti alcuni bug e fatti alcuni fix per i soliti cambi di struttura
2021-03-09 22:08:09 +01:00
mac12m99
f1da5c7a0b fix mostra selezione canale in videoteca 2021-03-04 20:49:52 +01:00
enricodarodda
b81096b8d5 fix seriehd (#281) 2021-03-04 20:49:48 +01:00
mac12m99
3042cdf6ea miglioria gestione changelog 2021-03-04 20:49:28 +01:00
mac12m99
20b942a479 verifica directory e test_conn spostati in service 2021-03-04 20:48:26 +01:00
mac12m99
3689f25df9 aggiornamento domini 2021-03-04 20:47:56 +01:00
enricodarodda
5b5cc2f740 Fix seriehd (#280) 2021-02-26 18:11:20 +01:00
mac12m99
31a3eb3e05 fix seriehd (HDpass) 2021-02-25 20:31:22 +01:00
mac12m99
4d5a513e93 Disattivata disabilitazione automatica aggiornamento serie in videoteca + fix serie aggiornate comunque 2021-02-25 20:31:19 +01:00
mac12m99
71da044f48 fix serie flaggate come conluse + typo 2021-02-25 20:31:17 +01:00
mac12m99
09532218f1 aggiornamento domini 2021-02-25 20:30:14 +01:00
mac12m99
3df9077cac fix launcher 2021-02-22 18:26:34 +01:00
mac12m99
0d49a4e8dc fix verifica link e unshortenit (linkup) su kodi 19 2021-02-21 19:20:33 +01:00
Alhaziel01
1dfd873d86 - Fix SerieHD
- Fix VVVVID
- Fix TMDB
- Fix Rinumerazione
- Fix Mediaset (Rimossa Numerazione)
2021-02-20 16:30:45 +01:00
mac12m99
d568701648 fix cb01 2021-02-20 11:37:25 +01:00
Alhaziel01
1d7db6ef42 Fix Discovery 2021-02-19 16:10:46 +01:00
mac12m99
4d1898cf04 fix db 2021-02-17 21:49:37 +01:00
Alhaziel01
0bad0e5169 Fix Discovery 2021-02-17 10:10:14 +01:00
Alhaziel01
64f681fa06 Fix TMDB 2021-02-14 16:38:27 +01:00
Alhaziel01
9fe1146344 - Fix Discovery
- Fix aggiunta alla videoteca se manca il parametro Qualità
2021-02-14 16:38:22 +01:00
mac12m99
748fad7431 KoD 1.6
- rimosso supporto a TVDB (l'accesso alle API diventerà a pagamento)
- aggiunto canale Discovery+
- aggiunta possibilità di scegliere numerazioni alternative per le serie tv
- migliorie interne di vario tipo (tra cui un migliore riconoscimento dei contenuti nel caso siano scritti male)
2021-02-13 16:37:02 +01:00
mac12m99
0ebc744115 fix server nel geniodellostreaming 2021-02-06 18:42:09 +01:00
mac12m99
7d0dcfc487 fix onlystream e userload 2021-02-03 21:57:06 +01:00
mac12m99
45dcd96ab8 cinemalibero: miglior identificazione del tipo contenuto 2021-02-01 19:35:32 +01:00
mac12m99
3962359ea4 disattivato polpotv (down) 2021-01-30 20:10:51 +01:00
Alhaziel01
a15b2fb320 Fix StreamingCommunity 2021-01-30 08:55:11 +01:00
mac12m99
dba2c4b96e nuovo dominio wstream (krask.xyz) 2021-01-29 20:34:34 +01:00
Alhaziel01
3f4dface9a Fix UPtoStream 2021-01-29 20:34:15 +01:00
mac12m99
9beda46d89 fix bug referer nei CC 2021-01-29 20:33:45 +01:00
Alhaziel01
58abe6da50 Fix Community Channels 2021-01-28 12:13:13 +01:00
Alhaziel01
b6307f0329 Fix Titolo Community Channels 2021-01-28 11:28:00 +01:00
Alhaziel01
d8d645fcbb Fix Trakt e servertools 2021-01-27 11:24:24 +01:00
mac12m99
3f680ccd40 alcuni fix raiplay 2021-01-25 17:54:08 +01:00
mac12m99
c443340754 fix streamingcommunity (antiban) 2021-01-24 19:33:24 +01:00
mac12m99
a88acd0c07 KoD 1.5.3
- correzioni di alcuni bug (citiamo ad esempio il crash con il refresh rate e l'impossibilita di entrare nel menu server bloccati)
- fix per cambio di struttura a qualche canale/server
- migliorie interne
2021-01-22 22:41:14 +01:00
Alhaziel01
cbd5860c9a Fix server StreamingCommunity e Icone Server in Ricerca Globale 2021-01-21 15:50:24 +01:00
Alhaziel01
265b5a83cb Fix Freeze Riproduzione da Libreria 2021-01-19 19:57:30 +01:00
Alhaziel01
c8dd638531 - Paramount in adesso in onda
- StreamingCommunity come server
2021-01-16 16:58:03 +01:00
Alhaziel01
3b65319e2c - Fix AnimeWorld
- Nuovo Canale Paramount Network
2021-01-16 16:58:00 +01:00
Alhaziel01
312938d845 Fix AnimePerTutti 2021-01-15 17:47:49 +01:00
Alhaziel01
471e543ac5 - Fix AnimeWorld
- Fix Toonitalia
 - Fix DoodStream
 - Nuovo Server NinjaStream
 - Nuovo Server OkStrem
 - Nuovo Server Userload
2021-01-15 17:19:41 +01:00
Alhaziel01
52d2a1f1a3 Miglioria Info Window 2021-01-14 21:12:28 +01:00
Alhaziel01
a23423df96 Miglioramenti ricerca Mediaset Play e Toon Italia 2021-01-14 21:12:26 +01:00
Alhaziel01
e9940c4a75 Miglioramenti videoteca 2021-01-14 21:12:23 +01:00
Alhaziel01
e3bf1f1cef Fix Support Server e migliorie StreamingCommunity 2021-01-14 21:12:20 +01:00
Alhaziel01
5c88f011e5 Fix Episodi StreamingCommunity 2021-01-14 09:29:37 +01:00
mac12m99
0f9d083263 fix streamingcommunity 2021-01-13 22:28:16 +01:00
mac12m99
d3cc008116 miglioria support.server e fix eurostreaming 2021-01-13 18:24:12 +01:00
Alhaziel01
c8705fc76d Fix Server Bloccati 2021-01-13 17:26:12 +01:00
Alhaziel01
1673239592 Fix Mediaset Play 2021-01-13 12:10:12 +01:00
mac12m99
71663f2e48 Non visualizzare mai i server disattivati 2021-01-12 22:38:17 +01:00
mac12m99
fa829c66b0 disattivato akvideo (mostra captcha), autoplay: nessun messaggio in caso di video eliminato 2021-01-12 21:11:45 +01:00
Alhaziel01
54d7e1ca28 Fix Ricerca Alternativa 2021-01-11 17:28:15 +01:00
Alhaziel01
5ee2d98495 Fix Qualità in Ricerca Globale 2021-01-11 11:49:03 +01:00
marco
377864bbe7 fix override dns 2021-01-10 13:44:05 +01:00
marco
47f02db14b KoD 1.5.2
- Migliorato Menu Rapido\n- Rimosso Menu Laterale\n- Fix Youtube\n- Fix Visualizza collegamenti della videoteca come pop-up\n- Riorganizzata sezione Aiuto\n- Reinserito canale tantifilm\n
2021-01-09 20:34:42 +01:00
Alhaziel01
d169179e15 Fix Autoplay con Youtube 2021-01-08 17:46:09 +01:00
marco
92d522f428 disattivato fastsubita (pare chiuso), fix animesaturn e piccola miglioria cb01 2021-01-06 20:50:21 +01:00
Alhaziel01
8e0e9529b4 Fix Fembed e Streamtape 2021-01-06 20:50:03 +01:00
marco
9599512db7 rimossi cloudscraper, simplejson e torrentool, aggiornato sambatools 2021-01-02 16:19:24 +01:00
Alhaziel01
8f0f571554 Fix Mediaset Play 2020-12-29 20:27:21 +01:00
Alhaziel01
b4d988b702 Fix Busy Dialog 2020-12-28 20:00:18 +01:00
Alhaziel01
47f62642d5 Fix Streaming Community e Ricerca Globale 2020-12-28 20:00:02 +01:00
marco
8079ef5b29 ops 2020-12-28 18:40:27 +01:00
marco
0c0cb69001 piccoli fix 2020-12-28 17:30:30 +01:00
marco
d2095340be fix streamtape 2020-12-27 00:07:58 +01:00
marco
21619eb98c fix ilgenio 2020-12-26 18:02:46 +01:00
Alhaziel01
f92b9c391b Fix Autoplay e Segna come già visto 2020-12-24 10:23:19 +01:00
Alhaziel01
216ea4e5d1 Fix AnimeUniverse 2020-12-23 19:51:35 +01:00
Alhaziel01
114a640a13 Fix Anime Universe 2020-12-23 19:34:37 +01:00
Alhaziel01
fc39894b1a - Fix Raiplay
- Fix Animesaturn
 - Fix Animeuniverse
 - Alcuni Fix alla riproduzione
2020-12-23 19:21:28 +01:00
Alhaziel01
bdd4b69af1 Assegna tasto per Menu Rapido senza riavviare 2020-12-22 19:08:05 +01:00
Alhaziel01
c80ab72a75 Fix Grafici InfoPlus per Confluence 2020-12-22 19:00:41 +01:00
Alhaziel01
5c52d14e9b Fix Grafica Rinumerazione per Confluence 2020-12-22 18:45:06 +01:00
Alhaziel01
9b519adcb1 Fix Grafica Nuova Ricerca Globale per Confluence 2020-12-22 18:36:31 +01:00
Alhaziel01
2bc3d237d4 Fix Rinumerazione 2020-12-22 18:14:46 +01:00
Alhaziel01
d932b4d5b6 Fix Toonitalia 2020-12-22 18:03:59 +01:00
Alhaziel01
f929b63eb7 Aggiungi ai Preferiti anche da cerca Film/SerieTV 2020-12-22 18:03:54 +01:00
Alhaziel01
d70d019223 Fix Autorenumber 2020-12-22 18:03:50 +01:00
Alhaziel01
35d7480c1f Fix DooD Stream 2020-12-22 12:10:19 +01:00
Alhaziel01
1f407ade58 Fix Mystream per HDPass 2020-12-22 10:27:55 +01:00
Alhaziel01
aa5a92f07e Fix Vcrypt (snip) 2020-12-22 09:26:25 +01:00
marco
6dc2bb3f59 fix vcrypt 2020-12-21 21:52:21 +01:00
marco
3ef31e8df3 fix registrazione HDmario 2020-12-21 21:52:18 +01:00
Alhaziel01
3b64bbed88 - Fix Ricerca Globale in caso di Torrent
- Fix Wstream
 - Fix SupervVideo
 - Fix TVDB
 - IP diretto per alcuni siti
 - Altri Fix per Kodi 19
2020-12-21 17:42:59 +01:00
Alhaziel01
31de200d3d Fix per Skin che non supportano il Font di Default 2020-12-19 18:48:45 +01:00
Alhaziel01
2b8c3f4d44 - Fix per dispositivi lenti
- Aggiungi ai preferiti in ricerca globale
- Fix server in finestra Pop-Up
2020-12-19 15:22:24 +01:00
Alhaziel01
82fb4e75e6 Revert "Text fix dispositivi lenti"
This reverts commit 6fd04b1a73.
2020-12-19 09:52:35 +01:00
Alhaziel01
6fd04b1a73 Text fix dispositivi lenti 2020-12-19 09:46:58 +01:00
Alhaziel01
e09fb5e50c Fix Grafico Ricerca Globale 2020-12-18 19:55:36 +01:00
Alhaziel01
d72908d5ad fix unshorten mysnip 2020-12-18 17:52:53 +01:00
Alhaziel01
960db3e3aa Migliorie al Menu Rapido 2020-12-18 17:52:48 +01:00
Alhaziel01
2d9176af26 Fix ricerca globale e rinumerzione 2020-12-18 12:38:51 +01:00
marco
de5a65d77a KoD 1.5.1
- corretta e migliorata la nuova ricerca globale\n- salvataggio punto di visione basato sull'id tmdb (disponibile su qualunque canale / server anche senza salvare in videoteca)\n- alcuni fix e migliore\n
2020-12-17 18:41:54 +01:00
marco
23e61f23c6 ops 2020-12-10 22:53:20 +01:00
marco
1c623ad47a aggiornamento domini (con fix ilcorsaronero) 2020-12-10 22:50:32 +01:00
marco
c0aabf92ff Nuovo metoto anti CF -> fix ilgenio 2020-12-10 22:47:33 +01:00
marco
27f23a03ce fix torrent in caso di disabilitazione elementum 2020-12-10 22:47:22 +01:00
marco
03e9a2281c fix altadefinizioneclick 2020-12-07 14:29:47 +01:00
marco
bdac2659e5 fix dirette setResolvedUrl 2020-12-05 16:36:46 +01:00
marco
ba7d44cd71 piccole migliorie ricerca globale 2020-12-03 17:50:50 +01:00
Alhaziel01
2469c37dbd Ricerca Globale: Fix e Migliorie 2020-12-02 16:51:26 +01:00
Alhaziel01
2e8d8ae1b6 Ricerca Globale:
- Plot auto scroll
 - Fix Lista Film
2020-12-02 10:07:44 +01:00
marco
94eac40955 KoD 1.5
-Nuova Ricerca Globale\n-Nuova Rinumerazione\n-Messaggi di Errore più chiari\n-Fix var\n
2020-12-01 20:19:07 +01:00
Alhaziel01
97e06a8b84 Fix Configurazione Videoteca 2020-11-25 10:37:05 +01:00
marco
a28ef8d134 piccolo fix tmdb 2020-11-19 19:51:44 +01:00
Alhaziel01
133f220674 Fix Streamtape 2020-11-19 19:31:53 +01:00
marco
7c323c309b fix ricerca altadefinizioneclick e registrazione hdmario 2020-11-19 19:22:27 +01:00
Alhaziel01
b80e1dafc6 Fix VVVVID 2020-11-18 11:18:50 +01:00
Alhaziel01
36b3ebd3dc Fix Aggiunta alla Videoteca 2020-11-14 19:26:10 +01:00
marco
1ede6b1508 fix seriehd 2020-11-13 21:08:30 +01:00
marco
9595f0ec05 aggiornamento domini 2020-11-12 18:18:30 +01:00
Alhaziel01
0a51a1b598 Merge branch 'stable' of https://github.com/kodiondemand/addon into stable 2020-11-09 11:17:46 +01:00
Alhaziel01
0ac336da2b Piccola correzione 2020-11-09 11:17:32 +01:00
marco
f0849af647 fix ricerca altadefinizioneclick e altre piccole modifiche 2020-11-08 16:06:36 +01:00
marco
7c4f18da4b workaround corruzzione settings.xml 2020-11-08 11:39:29 +01:00
Alhaziel01
e46849f038 Fix Installazione Elementum 2020-11-07 18:56:49 +01:00
Alhaziel01
5d0331a59a Fix streamZ 2020-11-07 18:49:59 +01:00
Alhaziel01
08762819b7 Fix Streaming Community 2020-11-07 17:15:36 +01:00
marco
356a2c0c5c aggiornamento domini canali 2020-11-07 14:56:31 +01:00
mac12m99
94e0a2ca99 fix ricerca ilgenio, aggiunto cerca trailer dove mancava e altri piccole modifiche 2020-11-01 14:19:59 +01:00
marco
ec916a073b fix apri nel browser su alcuni canali 2020-10-24 13:18:34 +02:00
marco
384f06ded9 KoD 1.4.1
- ridisegnata la finestra della scelta film/serietv quando si aggiunge in videoteca\n- modifiche minori, qualche fix ai canali/server ed alla ricerca alternativa\n
2020-10-19 18:33:08 +02:00
marco
e0802264f3 uso 1.0.0.1 invece di 1.1.1.1 2020-10-18 21:00:05 +02:00
marco
284398f2a6 fix streamz e supporto a nuovi url shortener 2020-10-17 16:08:11 +02:00
marco
95b5fde20c image.tmdb.org sotto https 2020-10-14 22:47:56 +02:00
marco
81c03436bb ilcorsaronero: fix locandine pagina successiva 2020-10-12 18:11:24 +02:00
marco
a349ec70ab fix serietvonline findhost 2020-10-08 20:44:13 +02:00
Alhaziel01
a862366b02 - Fix MondoSerieTV
- Aggiornamento url canali
2020-10-08 16:49:14 +02:00
Alhaziel01
9361713125 Fix Streaming Community 2020-10-08 16:23:12 +02:00
Alhaziel01
54c35b6d1d Fix Up Stream 2020-10-08 11:53:53 +02:00
marco
3f8af5efc8 HDmario: fix registrazione automatica (supporto a captcha e verifica mail) 2020-10-07 20:54:03 +02:00
Alhaziel01
ca6c75f711 - Fix Community Channels
- Fix Dreamsub
 -Fix CB01 Anime
2020-10-05 17:29:31 +02:00
Alhaziel01
3f45691aa1 Fix Community Channels 2020-10-03 12:41:20 +02:00
Alhaziel01
91961ac84e Piccole Migliorie 2020-10-03 12:41:15 +02:00
Alhaziel01
92d3559917 Fix Trailers da Youtube 2020-10-03 11:04:26 +02:00
Alhaziel01
5fc6d55e8d Fix "Cerca nuovi episodi" da libreria di Kodi 2020-10-03 11:04:21 +02:00
Alhaziel01
e6d0761ce2 Fix tmdb ricerca serie per anno 2020-10-02 19:56:10 +02:00
Alhaziel01
a19483e2c0 Fix Guardaserie icu 2020-10-02 19:56:08 +02:00
marco
17310615ed guardaserieicu è ora .us 2020-10-02 19:56:06 +02:00
Alhaziel01
56203f54fe - Aggiunto DooD Stream
- Fix Vudeo
2020-10-02 19:56:04 +02:00
Alhaziel01
000db7e169 Fix VupPlayer 2020-10-02 19:56:01 +02:00
Alhaziel01
5e9312a0a7 Fix Link Diretti Animesaturn 2020-10-02 19:55:59 +02:00
Alhaziel01
c94e8b96ea Fix Autorenumber (in caso di errori) 2020-10-02 19:55:57 +02:00
marco
71a75044d8 piccoli fix, rimosso guardaserieicu(chiuso) 2020-10-02 19:55:54 +02:00
Alhaziel01
df1041403d Fix Backup Videoteca 2020-10-01 20:12:31 +02:00
Alhaziel01
92bc6e2ddf Fix e migliorie 2020-10-01 20:10:52 +02:00
Alhaziel01
8a53a37cc0 Fix dirette paramount 2020-10-01 20:08:50 +02:00
Alhaziel01
a6f45b40c5 Fanart in InfoPlus 2020-09-30 11:56:47 +02:00
Alhaziel01
ff194d6e7a Fix Community Channels 2020-09-30 11:56:41 +02:00
Alhaziel01
01229b08f1 Fix Ricerca Toonitalia 2020-09-30 11:56:31 +02:00
marco
893f7b1f51 reinserita sezione "canali live" in adesso in onda 2020-09-29 22:20:06 +02:00
marco
8a8d1e4f5e KoD 1.4
- completato il supporto al futuro Kodi 19\n- ridisegnato infoplus\n- fix vari ed eventuali\n
2020-09-29 21:08:25 +02:00
marco
d153ac5918 ripristinata vecchia sezione "adesso in onda" 2020-09-22 18:16:56 +02:00
mac12m99
c1d98f2136 ops 2020-09-17 20:39:35 +02:00
Alhaziel01
c71d779b82 Fix Wstream 2020-09-17 20:36:44 +02:00
Alhaziel01
e94a601bb2 Disabilitato Netfreex 2020-09-17 20:35:44 +02:00
Alhaziel01
25881db4dc Fix Wstream 2020-09-15 15:43:12 +02:00
Alhaziel01
da94233a7e Fix Akvideo Regex 2020-09-14 11:29:50 +02:00
marco
b16677e521 animeunity fix categorie 2020-09-13 21:11:57 +02:00
marco
d59034f6c9 migliorato unshortenit e fix akvideo 2020-09-13 18:11:04 +02:00
Alhaziel01
13caff32b9 Fix Locandine in Videoteca 2020-09-12 19:19:31 +02:00
marco
8fcabc6ef7 fix nored.icu e netfreex 2020-09-11 17:30:16 +02:00
marco
5afd354267 vcrypt: fix wstream 2020-09-11 17:29:42 +02:00
marco
f07aae695f ops 2020-08-30 16:21:21 +02:00
marco
d786332b6f fix altadefinizione01 e filmpertutti 2020-08-30 15:08:01 +02:00
marco
90bf7088cb ops 2020-08-28 18:41:38 +02:00
marco
bd383eb0f9 fix autoplay 2020-08-28 18:39:19 +02:00
marco
176b04c64b potenziato match tmdb e alcuni fix per la ricerca globale 2020-08-28 18:38:51 +02:00
marco
352d3f5b6c fix support.menu con ricerca globale 2020-08-28 18:37:41 +02:00
marco
10690f1f2b aggiornamento domini 2020-08-19 19:48:13 +02:00
marco
5c2c3776e2 ricerca globale più precisa 2020-08-19 17:23:42 +02:00
Alhaziel01
04f13fbe68 Fix AU 2020-08-14 16:27:07 +02:00
Alhaziel01
8160f64d86 Migliorie AW 2020-08-14 12:28:21 +02:00
Alhaziel01
51bd2af725 Fix Toonitalia 2020-08-14 11:34:36 +02:00
Alhaziel01
ac4ebd1abc Rinumerazione possibilità di cancellarla 2020-08-14 11:34:25 +02:00
marco
15a2771da6 casacinema cambio dominio, popcornstream eliminato (chiuso) e fix altadefinizione01 2020-08-14 10:19:21 +02:00
marco
d8b029d2a8 fix parziale tantifilm e fix guardaserie cam/icu in libreria 2020-08-13 20:14:43 +02:00
Alhaziel01
f5c2e9c132 Fix AnimeUniverse 2020-08-13 20:07:31 +02:00
Alhaziel01
3fae176f60 Migliorie AW 2020-08-13 18:53:50 +02:00
Alhaziel01
70020ffc9a mini fix youtube 2020-08-13 18:53:45 +02:00
Alhaziel01
9a6fce9bf5 fix Streaming Community 2020-08-13 08:49:54 +02:00
Alhaziel01
fe17666aa4 Fix 2020-08-12 19:54:57 +02:00
Alhaziel01
58397ac1fb Fix Live La 7 2020-08-12 19:41:44 +02:00
Alhaziel01
257b523ac6 Icone Live La 7 2020-08-12 18:52:09 +02:00
Alhaziel01
ec133f7778 - Fix Aggiunta alla libreria Mediaset Play
- Icone canali Live
2020-08-12 18:45:10 +02:00
Alhaziel01
8536c53bf2 Nome server Directo nelle traduzioni 2020-08-12 18:43:20 +02:00
Alhaziel01
4d8734416b Piccolo Fix Support 2020-08-12 18:43:04 +02:00
Alhaziel01
f58135a950 Fix Wstream 2020-08-10 09:43:12 +02:00
marco
f04aa71d31 KoD 1.3.1
- aggiunti nuovi canali: film4k, animealtadefinizione, streamingcommunity, animeuniverse , guardaserieICU
- HDmario ora supporta l'utilizzo di account
- Miglioramenti sezione news, è ora possibile raggruppare per canale o per contenuto, e settare l'ordinamento
- risolto il fastidioso problema per cui poteva capitare che la ricerca ripartisse dopo un refresh di kodi (tipicamente quando l'aggiornamento della videoteca finiva)
- alcuni fix ai canali
2020-08-06 19:56:57 +02:00
Alhaziel01
5af023ad21 Fix AW e AS 2020-08-03 10:48:58 +02:00
marco
e8284ca564 fix ricerca IGDS 2020-07-29 20:11:16 +02:00
Alhaziel01
7971f2bbb1 Fix Netfreex 2020-07-29 20:11:12 +02:00
Alhaziel01
6eaf28b9f3 Fix AW 2020-07-29 12:09:29 +02:00
Alhaziel01
b3b4df3c69 Fix Ricerca Metalvideo 2020-07-27 09:17:47 +02:00
Alhaziel01
2003e97192 Fix nomi corretti nei server 2020-07-25 17:24:07 +02:00
Alhaziel01
2c6d9671be Rimosso Server AnimeWorld 2020-07-25 17:24:00 +02:00
Alhaziel01
4b66e459f9 Fix AnimeWorld e AnimeSaturn 2020-07-25 17:23:51 +02:00
Alhaziel01
d20e289ece Fix Download 2020-07-24 16:12:09 +02:00
Alhaziel01
9b19173007 Fix Mediaset Play 2020-07-23 10:14:14 +02:00
Alhaziel01
2702a68998 Traduzioni "I Miei Link" 2020-07-21 20:15:37 +02:00
Alhaziel01
2e794b1c6f Fix AnimeWorld 2020-07-21 20:15:33 +02:00
Alhaziel01
7b7f3490df Fix Streamtape e Fembed 2020-07-21 20:15:29 +02:00
Alhaziel01
13f5f1d4ac Fix Tipologia Add-on 2020-07-21 14:42:14 +02:00
Alhaziel01
8851bfdb31 Fix Wstream 2020-07-20 17:00:42 +02:00
Alhaziel01
acf3f01d3d Fix Canali inclusi nella ricerca da impostazioni 2020-07-20 11:33:23 +02:00
Alhaziel01
3f8f1e2966 Fix Ricerca Trakt 2020-07-20 11:33:18 +02:00
Alhaziel01
11205c8e9e Fix Mediaset Play 2020-07-20 10:05:48 +02:00
marco
0e1eb946b2 KoD 1.3
- Aggiunti i canali Mediaset Play e La 7.
- Riscritto Animeunity.
- Le stagioni concluse vengono ora escluse dall'aggiornamento della videoteca.
- Ora è possibile aggiornare gli episodi di Kod dal menu contestuale della Libreria di Kod (se non gestite da Kod verranno cercate)
- Fix Adesso in Onda su ATV
- Fix Vari
2020-07-19 16:05:27 +02:00
marco
c93ba1b736 Merge branch 'stable' of https://github.com/kodiondemand/addon into stable 2020-07-12 22:58:37 +02:00
marco
3efaefd022 fix per ricerca globale 2020-07-12 22:58:26 +02:00
Alhaziel01
b7df7ebc12 VVVVID visibili tutti i contenuti (es. Jojo) 2020-07-11 11:48:16 +02:00
Alhaziel01
574be5cea2 Fix AnimeSaturn 2020-07-10 18:41:03 +02:00
marco
e8b13edc05 Merge branch 'stable' of https://github.com/kodiondemand/addon into stable 2020-07-10 17:52:02 +02:00
marco
ceb8800bc5 fix vcrypt 2020-07-10 17:51:35 +02:00
Alhaziel01
62b2fbc659 Fix Canali inclusi nella ricerca 2020-07-10 09:55:03 +02:00
Alhaziel01
5da1ab4cda Fix Dreamsub 2020-07-09 16:42:33 +02:00
Alhaziel01
e6d9c42e9f Fix HD4ME 2020-07-09 16:42:29 +02:00
Alhaziel01
05c8e43e95 Fix CloudVideo 2020-07-09 16:42:24 +02:00
marco
a4f9e5229f fix autoplay, priorità alla qualità 2020-07-06 21:08:09 +02:00
marco
2b6c8b0d9a fix CF 2020-07-06 15:10:35 +02:00
marco
4f219e0e5e svariati fix ai canali 2020-07-04 15:10:27 +02:00
marco
9ac59de81b fix ilcorsaronero 2020-07-04 11:49:32 +02:00
marco
0932a1969f KoD 1.2
- aggiunto nuovo canale: guardaserie.cam
- autoplay migliorato, ora i settaggi sono globali e non più per canale
- adesso in onda riscritto, ora usa EPG (ringraziamo epg-guide.com)
- Riprendi la visione di un film o episodio da server diversi (solo videoteca)
- fix e ottimizzazioni varie
2020-07-03 23:08:20 +02:00
Alhaziel01
8b78c88e41 Fix Adesso in Onda 2020-06-23 18:32:31 +02:00
Alhaziel01
4d29cd295d Fix Sposta / Backup Videoteca 2020-06-23 17:22:53 +02:00
Alhaziel01
191d419fd1 Fix Wstream 2020-06-22 09:25:17 +02:00
Alhaziel01
d56c7caa80 Fix Keymap 2020-06-18 20:22:40 +02:00
Alhaziel01
8a8e151a27 Fix Community Channel 2020-06-18 19:07:50 +02:00
marco
6e7e3e1589 fix aggiungi in videoteca dalla ricerca globale 2020-06-14 14:46:21 +02:00
Alhaziel01
d37f55b69d Fix Community Channel 2020-06-12 17:56:44 +02:00
marco
f1a5fbb818 fix dailymotion by alfa, piccole modifiche 2020-06-12 17:56:01 +02:00
marco
bdb6422218 carrellata di piccoli di fix per molti canali
casacinema e tantifilm ora funzionano di nuovo
apri nel browser ora appare anche nei menu dei canali
2020-06-06 13:30:43 +02:00
marco
9a3556da34 Merge branch 'stable' of https://github.com/kodiondemand/addon into stable 2020-06-05 21:38:27 +02:00
marco
1dc9ef598c fix hdpass per cambio di struttura 2020-06-05 21:38:05 +02:00
Alhaziel01
90c44bb8d1 Fix Episodi multipli (eurostreaming, italiaserie) 2020-06-05 12:25:47 +02:00
marco
f4a60081c3 wstream: aggiunto support ai link presenti sul geniodellostreaming 2020-06-04 22:44:38 +02:00
Alhaziel01
a212494bbf Fix Aggiunta alla videoteca 2020-06-03 20:10:18 +02:00
Alhaziel01
a277ff8f00 Fix Lista Server 2020-06-03 20:00:58 +02:00
Alhaziel01
1cf2bf7657 Fix Community Channel 2020-06-03 19:55:28 +02:00
marco
d62113f9d2 KoD 1.1
- Ottimizzata e migliorata la ricerca globale
- fix Mega
- altri cambiamenti minori
2020-05-31 11:46:47 +02:00
marco
d26a2def96 fix pagina successiva discovery list tmdb 2020-05-27 19:48:11 +02:00
marco
c54eb40158 fix hdmario con supporto autenticazione mail 2020-05-26 20:40:34 +02:00
marco
45ac1df0b2 migliorato hdmario 2020-05-24 19:48:20 +02:00
marco
25ec5d2707 fix cb01 serie tv con un link unico 2020-05-23 20:47:01 +02:00
Alhaziel01
859d570f54 alcune Traduzioni 2020-05-23 19:37:53 +02:00
Alhaziel01
dce1ca6bc2 Altro Fix Per Titoli 2020-05-23 19:37:49 +02:00
Alhaziel01
b3f32021a1 Fix Nome serie 2020-05-22 22:39:45 +02:00
Alhaziel01
32b0f766f6 Altri Fix 2020-05-22 22:16:58 +02:00
Alhaziel01
cfa4981bab Fix Netfreex 2020-05-22 22:16:52 +02:00
marco
b70d00e3ba fix vidmoly su altadefinizioneclick 2020-05-22 23:09:55 +02:00
marco
96918d58ca KoD 1.0.1
- Fix Trakt e aggiuntoi i segni di spunta (già visto) nei canali
- Fix Segna come già visto
- Aggiunta Disattivazione aggiornamento serie tramite multiselect
- Altri fix e migliorie
2020-05-22 19:27:16 +02:00
Alhaziel01
b73e3156fb Fix CB01 2020-05-21 18:59:22 +02:00
Alhaziel01
4cd7536c2d Fix Mega 2020-05-21 16:15:41 +02:00
Alhaziel01
2ad0db7b4c Fix CB01 2020-05-21 12:30:43 +02:00
marco
26bd9916ec ops 2020-05-20 08:27:06 +02:00
axlt2002
f347379b99 Aggiunto logger.info() per debug 2020-05-18 19:44:29 +02:00
Alhaziel01
c46146ade5 Fix Segna come Visto 2020-05-18 19:43:37 +02:00
Alhaziel01
8c324bdbb4 Fix Trakt 2020-05-18 19:43:31 +02:00
Alhaziel01
678398c8ec Fix Wstream 2020-05-18 19:43:21 +02:00
mac12m99
39bcb12922 aggiunti server anavids, streamtape, vidmoly
fix animeworld
2020-05-18 14:29:48 +02:00
Alhaziel
1c7c2383ca Fix Segna come Visto con Autoplay 2020-05-16 20:36:08 +02:00
Alhaziel01
a9dc325216 Fix Segna come visto 2020-05-16 20:35:07 +02:00
Alhaziel01
6f62778a6f Migliorie a Menu Rapido 2020-05-15 19:22:48 +02:00
Alhaziel01
ae5cd340c7 Fix Elementum download per Android TV 2020-05-15 19:22:47 +02:00
marco
a8d5fad1a8 KoD 1.0
- completato il supporto ai torrent e aggiunto ilcorsaronero.xyz
- aggiunto supporto agli episodi locali, ovvero poter inserire nella libreria di kodi un misto tra puntate di kod e file scaricati altrove
- le viste ora si salvano di nuovo dal menu laterale, ma rimangono salvate per il tipo di contenuto visualizzato e non per il singolo menu
- ripensato il menu rapido, che ora è più rapido, ridisegnate alcune finestre
2020-05-14 21:26:48 +02:00
marco
cdf84d5573 migliorie piratestreaming e tantifilm 2020-05-01 23:54:43 +02:00
Alhaziel01
e4a482e51d Fix Community Channels 2020-05-01 22:21:44 +02:00
marco
a9b4349839 filmpertutti: corretto scraper puntate (no 1x00)
tantilm: corretta aggiunta in videoteca dalla ricerca
2020-04-30 22:54:47 +02:00
marco
fcb3bc3a5a fix cb01 2020-04-30 15:52:35 +02:00
marco
7d32bde60d aggiornato cloudscraper (fix piratestreaming) 2020-04-29 11:36:46 +02:00
Alhaziel01
34b72249e7 Fix Filtri Community Channels 2020-04-27 23:09:21 +02:00
Alhaziel01
4e2bd6bde5 Fix CB01 Anime 2020-04-27 18:03:12 +02:00
marco
68eea5ec87 fix server cb01 2020-04-27 18:03:01 +02:00
marco
61b67a4781 miglioria findhost (fix cineblog) 2020-04-25 19:50:08 +02:00
Alhaziel01
03ff4bc0be Aggiornato Cloudscraper:
(Fix per Il Genio dello Streaming)
2020-04-25 15:06:59 +02:00
Alhaziel01
04d86640d6 Fix Serietvonline 2020-04-25 15:06:21 +02:00
Alhaziel01
bccf004548 Fix Ultima Ricerca 2020-04-24 20:36:24 +02:00
marco
69b097d6e5 ops 2020-04-23 21:03:25 +02:00
Alhaziel01
8e0c062f21 Fix cb01 2020-04-23 16:12:30 +02:00
marco
bcf05c0ab7 fix cinamlibero serietv 2020-04-23 16:12:24 +02:00
Alhaziel01
f0c97c699a Fix Wstream 2020-04-22 22:43:25 +02:00
Alhaziel01
83dacb78e7 Fix Wstream 2020-04-22 20:51:53 +02:00
Alhaziel01
71cfd2aa82 Fix Wstream 2020-04-22 19:50:31 +02:00
Alhaziel01
7c0e2c6391 Fix Animeworld 2020-04-22 19:29:51 +02:00
Alhaziel01
7efd7b4c97 Fix thumbnail 2020-04-22 19:29:49 +02:00
Alhaziel01
466d3c88d1 Rinumerazione all'aggiunta in videoteca 2020-04-22 19:29:44 +02:00
marco
c1682bc505 eliminato download da menu contestuale se contenuto già scaricato 2020-04-22 19:29:33 +02:00
marco
ca0b5a8c56 KoD 0.9.1
- fix vari ed eventuali
2020-04-20 23:49:45 +02:00
marco
dc3d2d4d2a viste di default come prima 2020-04-18 22:54:28 +02:00
marco
7f86424ec1 netfreex cambio url 2020-04-17 23:45:26 +02:00
marco
f26628222e fix ilgenio, link sotto vcrypt&company 2020-04-17 21:30:09 +02:00
Alhaziel01
4d4a1451a0 Fix Wstream 2020-04-17 18:38:41 +02:00
Alhaziel01
e1eb050e23 Community Channels:
- disable_pagination
 - ordered Json
2020-04-17 18:38:05 +02:00
marco
7e6e9fa213 Merge branch 'stable' of github.com:kodiondemand/addon into stable 2020-04-16 23:14:18 +02:00
marco
cfddab97df fix seriehd (sempre 1x01) 2020-04-16 23:13:59 +02:00
Alhaziel01
4789899f99 Adesso in onda Vista Film 2020-04-16 15:33:55 +02:00
Alhaziel01
fec9ba7513 Supporto Skin:
- Phenomenal
 - Black Galss Nova
2020-04-16 15:33:55 +02:00
Alhaziel01
96314849d0 Fix e Pulizia CB01 2020-04-16 15:32:33 +02:00
Alhaziel01
1c751090f8 Fix viste, Serie TV, Stagione, Episodio 2020-04-16 09:36:15 +02:00
marco
6de0f4fec4 KoD 0.9
- Nuova sezione Musica
- Download in Background
- Download dalla Videoteca e supporto file locali
- Backup e cancellazione della Videoteca
- Spostamento della Videoteca
- Migliorata integrazione con libreria di Kodi
- Gestione delle Viste Preferite
- Nuovo layout impostazioni
2020-04-15 22:58:06 +02:00
Alhaziel01
ecafd1b0df Merge branch 'stable' of https://github.com/kodiondemand/addon into stable 2020-04-15 15:39:09 +02:00
Alhaziel01
1e713ba94d Fix Community Channels 2020-04-15 15:32:47 +02:00
marco
b6784a595d cb01 sotto cloudflare 2020-04-15 15:18:46 +02:00
marco
6caba23bfb fix wstream 2020-04-14 20:16:47 +02:00
Alhaziel01
0b0f01515d Fix Filmpertutti 2020-04-14 20:14:30 +02:00
Alhaziel01
b562b1cec0 Fix stayonline.pro (in caso di http) 2020-04-14 20:14:24 +02:00
Alhaziel01
d3a9848004 Fix per stayonline 2020-04-14 20:14:17 +02:00
marco
7479a32e51 fix wstream per cambio di struttura 2020-04-12 17:34:21 +02:00
Alhaziel01
cf8eed5d45 Fix Fembed 2020-04-11 22:15:08 +02:00
Alhaziel01
22ef236e2c Fix Serie HD 2020-04-11 21:36:38 +02:00
Alhaziel01
949a4cd711 Velocizzato VVVVID 2020-04-11 21:34:00 +02:00
Alhaziel01
b39973550e Fix Altadefinizione Click 2020-04-11 16:16:54 +02:00
Alhaziel01
d191dcf21d Fix Fembed 2020-04-11 16:16:33 +02:00
Alhaziel01
98ecf30d00 Fix clipwatching 2020-04-11 16:13:06 +02:00
Alhaziel01
ec3fb47141 Fix Tanitfilm 2020-04-11 16:12:56 +02:00
Alhaziel01
99a0298f2c Fix Icone 2020-04-09 20:13:40 +02:00
Alhaziel01
28ea1f3a11 Fix Wstream 2020-04-09 10:27:16 +02:00
marco
751718fd02 Merge branch 'stable' of github.com:kodiondemand/addon into stable 2020-04-08 14:50:41 +02:00
marco
aa5d37551b fix vari 2020-04-08 14:47:57 +02:00
Alhaziel01
0009ef6c09 Fix Autorenumber e Vcrypt 2020-04-05 15:32:38 +02:00
Alhaziel01
0a15882059 Fix AnimeSaturn 2020-04-05 15:32:04 +02:00
Alhaziel01
e7c969d86d Fix Wstream 2020-04-05 11:29:24 +02:00
Alhaziel01
fc90834540 Fix Toonitalia 2020-04-05 11:29:18 +02:00
Alhaziel01
55c6ac7c8f Aggiornato cloudscraper 2020-04-05 11:29:13 +02:00
Alhaziel01
34be96127b Fix vcrypt 2020-04-05 11:29:06 +02:00
marco
073755251a disabilitato streamtime 2020-04-04 20:09:43 +02:00
Alhaziel01
e0c106f76b Fix Mega 2020-04-04 17:51:48 +02:00
marco
d5f11197a1 Merge branch 'stable' of github.com:kodiondemand/addon into stable 2020-04-02 21:47:08 +02:00
Alhaziel01
3f6459b746 Fix Go Unlimited e UPstream 2020-04-02 21:45:06 +02:00
Alhaziel
256c76ca7b Fix Inserisci URL 2020-03-27 21:28:08 +01:00
Alhaziel01
b950530a41 Fix Raiplay 2020-03-27 15:24:49 +01:00
Alhaziel01
d0a0f0d370 Fix AltadefinizioneClick 2020-03-27 15:24:45 +01:00
Alhaziel01
f2de8c1431 Fix AltadefinizioneClick 2020-03-26 22:30:11 +01:00
MaxE
45639bbbe0 Fix AkVideo 2020-03-26 19:37:12 +01:00
Alhaziel01
cbda250820 Raiplay:
- Nuova Sezione Teen
 - Nuove Icone
2020-03-26 17:19:32 +01:00
Alhaziel01
9a21e412fb RaiPlay:
- Nuova Sezione Learning
 - Ricerca per Sezione
 - Vari Fix
2020-03-26 16:12:57 +01:00
Alhaziel01
59664b1243 ops! 2020-03-26 12:40:31 +01:00
Alhaziel01
ef53e5c01f Merge branch 'stable' of https://github.com/kodiondemand/addon into stable 2020-03-26 12:29:01 +01:00
Alhaziel01
7b788c040e Fix divisione per stagioni Community Channels 2020-03-26 12:28:39 +01:00
marco
1f9878989e ops 2020-03-25 23:16:55 +01:00
Alhaziel
df99375dbd Fix Serie CB01 2020-03-25 22:14:18 +01:00
marco
dffd7d52e9 fix seriehd, guardaserie e aggiornati link canali 2020-03-25 20:33:50 +01:00
marco
3199fc568e fix tantifilm e server correlati, migliorato unshortenit 2020-03-25 20:31:58 +01:00
Alhaziel01
5ba0ca9087 Fix Community 2020-03-24 21:32:42 +01:00
Alhaziel01
85ed91646d Fix Community 2020-03-24 10:58:21 +01:00
Alhaziel01
8efa9aa373 Fix Aggiunta alla libreria per Community Channels 2020-03-24 10:31:08 +01:00
Alhaziel01
69e43695f8 Community Channels, Fix e Migliorie 2020-03-23 23:22:35 +01:00
Alhaziel
a867c6f46a FIx Serie CB01 2020-03-23 20:01:16 +01:00
marco
3ee9f693cf migliorie updater 2020-03-20 23:06:10 +01:00
Alhaziel
791d613489 Fix Wstream 2020-03-20 19:44:44 +01:00
Alhaziel
e1e8e6354e Community:
- Fix Titolo e icone Ricerca personalizzate
 - tvshows_list con link singolo
2020-03-20 19:11:26 +01:00
Alhaziel
d2b30dfab2 Fix Filtri Community Channels 2020-03-18 11:56:29 +01:00
Alhaziel
a0ff50b784 Fix Filtri Community Channels 2020-03-17 19:07:01 +01:00
Alhaziel
52b1b3dd17 Fix AKVideo 2020-03-17 11:25:46 +01:00
Alhaziel
a0eb342883 Fix Community 2020-03-16 15:22:09 +01:00
Alhaziel
a03f886016 Numerazione Standard 2020-03-16 12:15:56 +01:00
Alhaziel
fb9851954a Fix Cinemalibero 2020-03-16 12:15:55 +01:00
Alhaziel
40635e2eda fix il Genio dello Streaming 2020-03-16 12:15:54 +01:00
Alhaziel
ba94d6a3f5 Fix Community Channels 2020-03-16 12:15:54 +01:00
Alhaziel
892e8f6c6b Fix Qualità di Download 2020-03-16 12:15:53 +01:00
mac12m99
283b33f7d4 supporto link embed akvideo e wstream, miglioria vcrypt 2020-03-15 11:09:00 +01:00
marco
6e89e086eb niente captcha per akvideo 2020-03-15 10:30:15 +01:00
Alhaziel
91e7787591 fix librerie 2020-03-14 19:59:31 +01:00
marco
3cdedad7e8 KoD 0.8.1
- riorganizzate le impostazioni
- aggiunte descrizioni tag qualità su cb01 (presto anche sugli altri)
- aggiunto il supporto alle serie di polpotv
- fixato server mystream
- fix Rinumerazione per episodi Nuovi
2020-03-14 17:03:57 +01:00
marco
c642ddc358 fix nextep SMB 2020-03-11 18:15:56 +01:00
Alhaziel
3a8949f3b0 Fix Cb01 e Pufimovies 2020-03-11 17:38:17 +01:00
Alhaziel
f437da86ad Fix Community 2020-03-11 16:44:39 +01:00
Alhaziel
640814a503 Fix Community Channels 2020-03-11 12:33:51 +01:00
marco
c4912c946b Merge branch 'stable' of github.com:kodiondemand/addon into stable 2020-03-08 14:40:52 +01:00
marco
e8e1a7bab8 fix aggiornamenti serie/anime eurostreaming 2020-03-08 14:40:32 +01:00
Alhaziel01
2b2691d1c7 Fix Wstream 2020-03-08 13:48:27 +01:00
Alhaziel
c584e16256 Fix Wstream 2020-03-07 19:41:32 +01:00
Alhaziel
16631f6534 Fix Scarica Stagione 2020-03-07 19:06:50 +01:00
Alhaziel
75cc6b5ee2 Fix Wstream 2020-03-05 19:21:36 +01:00
Alhaziel
f6d0e3398a Fix Wstream 2020-03-04 20:32:51 +01:00
Alhaziel
c6bad05e14 nuovo link serieHD 2020-03-04 20:20:11 +01:00
Alhaziel
657236953c Fix Altadefinizione click 2020-03-04 20:20:04 +01:00
Alhaziel
2935a9be4c Fix findlinks in caso di lista 2020-03-04 20:19:55 +01:00
marco
5fbad3b614 fix wstream 2020-03-03 23:03:59 +01:00
Alhaziel
6038fa2a14 Fix Ultima Ricerca 2020-03-03 23:03:46 +01:00
Alhaziel
5c1e22670f Fix Generi Popcorn Streaming 2020-03-02 21:01:22 +01:00
Alhaziel
79dea32032 Aggiornato link Piratestreaming 2020-03-02 21:01:04 +01:00
Alhaziel
4ec0157930 Fix Altadefinizione01 L 2020-03-02 21:00:43 +01:00
marco
f19dcc1eb5 fix wstream per gestire i link falsi... 2020-02-27 22:49:45 +01:00
marco
4045a46a74 workaround per osx 2020-02-27 19:51:45 +01:00
marco
46ddc61b74 fix updater 2020-02-27 19:49:21 +01:00
marco
2b2eb98c79 rimesso "controlla aggiornamenti" 2020-02-27 19:48:18 +01:00
Alhaziel
c946650998 Fix Community Channels 2020-02-27 19:14:13 +01:00
Alhaziel
96fd3fec53 Fix Ordine Episodi SerieHD 2020-02-27 19:14:12 +01:00
marco
137e06f2d5 aggiornati URL canali 2020-02-26 21:20:56 +01:00
alfa-addon
685c2747bd fix youtube (trailer) 2020-02-26 21:08:37 +01:00
marco
e0820ab0f7 fix wstream per cambio di struttura 2020-02-25 23:10:09 +01:00
marco
5adcd634d6 fix updater 2020-02-24 23:10:51 +01:00
marco
d3e4777723 workaround per osx 2020-02-24 23:06:10 +01:00
marco
f3e7c9a20e disabilitato temporaneamente akvideo 2020-02-24 22:27:28 +01:00
marco
173f508120 fix lentezza cb01 2020-02-24 18:33:29 +01:00
marco
9f2ee86c19 Merge branch 'stable' of github.com:kodiondemand/addon into stable 2020-02-23 11:02:59 +01:00
marco
51cc670648 priorità alle librerie interne 2020-02-23 11:02:35 +01:00
Alhaziel
42df4b2276 Fix CB01 Tag Qualità 2020-02-22 15:11:02 +01:00
marco
a1f6c6073c fix updater 2020-02-22 14:19:49 +01:00
marco
ca6d5eb56d KoD 0.8
- tanti miglioramenti sotto il cofano, supporto iniziale al futuro kodi 19
- Nuova modalità di visualizzazione per episodio successivo
- fixato wstream tramite l'aggiunta della finestra per risolvere il reCaptcha
- aggiunta sezione segnala un problema in Aiuto
- altri fix e migliorie varie a canali e server
2020-02-22 13:36:58 +01:00
Alhaziel
82b61df289 Fix SerieHD 2020-02-17 09:41:12 +01:00
marco
19677122bf fix hdpass 2020-02-15 21:18:02 +01:00
Alhaziel
218f5ef437 Fix VVVVID 2020-02-15 21:17:11 +01:00
mac12m99
08a2ef1d7c fix hdpass (altadefinizioneclick, la casa del cinema e seriehd)
disabilitato vedohd (chiuso, rimanda a cb01)
2020-02-15 21:16:42 +01:00
marco
c141de9729 fix wstream 2020-02-12 19:23:45 +01:00
Alhaziel
c8cb746b41 Fix Raiplay (in caso di serie Vuote) 2020-02-05 20:21:36 +01:00
marco
23e9ac3875 KoD 0.7.2
- - aggiunto raiplay
- agigunto d.s.d.a (ex documentaristreamingda)
- svariati fix ai canali (eurostreaming, streamtime, piratestreaming, altadefinizioneclick)
- la videoteca ora può essere messa nelle unità di rete
- aggiunto server upstream
- altri piccoli fix vari
2020-02-04 18:24:33 +01:00
marco
be85578100 fix autoplay -> nascondi server 2020-01-31 18:00:51 +01:00
marco
d149168a59 fix akvideo su netfreex 2020-01-31 17:58:49 +01:00
Alhaziel
3001be766d Fix Lista Serie TV CB01 2020-01-31 17:58:24 +01:00
Alhaziel
cb0cc0ce65 Fix Aggiornamento Informazioni Libreria 2020-01-31 17:58:07 +01:00
Alhaziel
6eb938756a Fix Download Episodio 2020-01-31 17:57:51 +01:00
Alhaziel
5e2141103d Fix Aggiornamento Informazioni Libreria 2020-01-31 17:57:19 +01:00
Alhaziel
510d268cfd Fix SerieHd Episodi multipli 2020-01-31 17:57:00 +01:00
Alhaziel
7deeef5036 fix download (aggiunta di nuove librerie da Alfa) 2020-01-31 17:56:30 +01:00
Alhaziel
2831cfb784 Fix Community Channels 2020-01-31 17:56:14 +01:00
mac12m99
2a6f9b324d fix mixdrop 2020-01-25 12:01:50 +01:00
marco
2d269c4481 fix altadefinizioneclick 2020-01-25 12:01:35 +01:00
mac12m99
a083214406 fix cinemalibero 2020-01-24 20:53:50 +01:00
Alhaziel
ef03a17f04 Fix Dreamsub 2020-01-22 20:57:03 +01:00
Alhaziel
e3d20d6453 Fix Community Channel 2020-01-22 20:40:37 +01:00
mac12m99
a78d3be334 fix updater 2020-01-22 19:38:06 +01:00
marco
5723060aa7 Merge remote-tracking branch 'origin/stable' into stable 2020-01-21 21:06:42 +01:00
marco
ee3dc628f4 migliorie updater 2020-01-21 21:06:35 +01:00
Alhaziel
6f66c964e8 Fix dooplay_menu 2020-01-21 12:42:12 +01:00
Alhaziel
cc5a81457b Aggiornamento URL Canali 2020-01-21 12:16:36 +01:00
Alhaziel
bf4bf72322 Fix Nascondi Server da menu AutoPlay del canale 2020-01-21 11:56:07 +01:00
Alhaziel
b37a7f4cca Fix SerieHD 2020-01-21 11:53:11 +01:00
Alhaziel
b6a670705f Fix 2020-01-21 11:53:08 +01:00
Alhaziel
7bf337b38b Nascondi Server da menu AutoPlay del canale 2020-01-21 11:45:12 +01:00
marco
419d796320 Merge branch 'stable' of github.com:kodiondemand/addon into stable 2020-01-20 22:58:21 +01:00
marco
9be1be7f57 fix 2020-01-20 22:57:50 +01:00
Alhaziel
e82855ba9a Fix Pagina Successiva AnimeUnity 2020-01-20 19:40:55 +01:00
marco
2e42be55ea fix cb01 -> sezione film (causa oscar..) 2020-01-20 19:36:44 +01:00
marco
d585b0e042 KoD 0.7.1
- A grande richiesta, è ora possibile riprodurre in automatico l'episodio successivo di una serie in libreria
- aggiunta la possibilità di nascondere la lista dei server, quando si usa l'autoplay
- aggiunto canale pufimovies.com
- fix vari
2020-01-20 18:31:32 +01:00
mac12m99
1b63ed4046 fix autoplay in videoteca 2020-01-15 19:54:09 +01:00
greko
86a48249cc check dns solo se il resolver è disattivato 2020-01-15 19:53:48 +01:00
mac12m99
e7f3c52a0d supporto clouflare per wstream e akstream 2020-01-13 21:17:42 +01:00
mac12m99
cc76e69c7f fix filmpertutti videoteca 2020-01-12 15:50:54 +01:00
mac12m99
44704ae556 fix override DNS e altre migliorie 2020-01-10 22:32:52 +01:00
Alhaziel
ac2edfd117 Fix Launcher 2020-01-08 20:38:22 +01:00
marco
b4376525de KoD 0.7
- nuovo metodo di override DNS
- aggiunta opzione nascondi server, se usi l'autoplay
- migliorie al codice e fix vari
2020-01-08 19:19:59 +01:00
marco
d1cc659707 aggiornato dominio cb01 causa nuovo oscuramento 2020-01-05 23:16:41 +01:00
marco
5e14d4d156 fix tantiflm, url fembed (tantifilmHD), nuovo server hdplayer e tentativo iniziale fix mystream 2019-12-31 12:36:01 +01:00
Alhaziel
54d195dcac Fix CB01 (miniserie) 2019-12-27 19:33:49 +01:00
Alhaziel
bf3201a956 Fix Cinemalibero 2019-12-27 19:33:14 +01:00
Alhaziel
75b95c75b0 Fix Wstream 2019-12-27 18:43:45 +01:00
Alhaziel
df13645ce9 Merge branch 'stable' of https://github.com/kodiondemand/addon into stable 2019-12-27 15:52:24 +01:00
Alhaziel
f861072df8 Fix VVVVID 2019-12-27 15:52:09 +01:00
marco
e2e1e7ed31 Merge branch 'stable' of github.com:kodiondemand/addon into stable 2019-12-25 22:45:15 +01:00
Whiplash
9d3d9ba356 [polpotv] url update 2019-12-25 22:44:25 +01:00
Alhaziel
7d96b94ce5 Community Channels Ricerca personalizzata 2019-12-23 19:56:46 +01:00
Alhaziel01
ad0efcb1fa Fix popolari genere ecc... 2019-12-22 19:23:46 +01:00
Alhaziel
e82e91e26b Fix Ricerca Oggi in TV 2019-12-22 14:16:22 +01:00
Alhaziel
68c60358d1 Fix Ricerca Mondoserie TV 2019-12-22 14:16:10 +01:00
Alhaziel
efab57cafe VVVVID ricerca multithread 2019-12-22 14:15:54 +01:00
Alhaziel
823a9f4f18 Ulteriori Impostazioni per ricerca Globale 2019-12-22 14:15:40 +01:00
marco
d777071851 fix ricerche salvate 2019-12-22 14:15:28 +01:00
Alhaziel
8a490da29e Ricerca Globale ricerche salvate e impostazioni 2019-12-22 14:15:12 +01:00
Alhaziel
c01ff42d98 Fix e pulizia Cinemalibero 2019-12-22 14:14:54 +01:00
marco
f4e9f29f40 KoD 0.6
-Nuova ricerca globale
-migliorie prestazionali in generale
-fix vari ai server
2019-12-20 22:32:38 +01:00
Alhaziel
c2c0ccf525 Fix Per Community Channels e link Diretti 2019-12-12 20:33:21 +01:00
marco
f6ab98e172 rimozione codice ora inutile 2019-12-12 20:33:13 +01:00
Alhaziel
2b7ab70252 fix SerieHD, Mixdrop e canali community 2019-12-11 18:38:40 +01:00
greko
075ed18534 fix cinemalibero 2019-12-11 18:34:40 +01:00
greko17
2a479ca0c3 migliorate impostazioni resolverDNS 2019-12-11 18:31:48 +01:00
mac12m99
2308088e56 rimossi log di troppo 2019-12-10 18:08:56 +01:00
Alhaziel
58e7a3e84d KoD 0.5.2
- Fix Parziale Cloudflare
- Maggiore Reattività
- Su ANDROID alcuni siti es. SerieHD e GuardaSerie.click non funzionano
2019-12-07 19:50:43 +01:00
Alhaziel
98580ba11a Fix PolpoTV 2019-12-02 21:07:17 +01:00
Alhaziel
3350173000 Fix Piratestreaming 2019-12-02 21:07:07 +01:00
Alhaziel
576c8c7a7e Support in caso di episodi multipli 2019-12-02 21:06:57 +01:00
Alhaziel
05060d369b Riscritto Piratestreaming 2019-12-02 21:06:47 +01:00
Alhaziel
4ca049652e Trakt Miglioramento Grafico 2019-12-02 21:06:38 +01:00
Alhaziel
abac049cdb Fix Onlystream 2019-12-02 21:06:25 +01:00
Alhaziel
777616de78 Fix Cloudscraper
ritornati alla vecchia versione di js2py
modificata versione 1.1.24 di cloudscrape
2019-12-02 21:06:16 +01:00
Alhaziel
33e1cf1d72 Fix Trakt 2019-12-02 21:05:56 +01:00
greko17
0239663fa5 fix: checkdns sostituito cb01.uno con 2019-12-02 21:05:47 +01:00
greko17
b8744c5861 fix: deltabit 2019-12-02 21:05:47 +01:00
marco
daad0d3ddb KoD 0.5.1
-override DNS di default
-nuovi canali: streamtime e netfreex
-fixato cloudflare
-aggiunta opzione apri nel browser
2019-11-30 20:27:09 +01:00
Alhaziel
4332859f47 Aggiornato js2py
Previene loop in caso cloudscrape fallisca
2019-11-27 22:39:55 +01:00
marco
bef725452a fix mondoserietv (ricerca serie) 2019-11-27 21:37:14 +01:00
mac12m99
8ec04a00c8 updater: riavvio per cambio traduzioni non più necessario 2019-11-27 21:24:44 +01:00
Alhaziel
b3ddcbec65 migliorie ai canali community 2019-11-26 21:30:43 +01:00
Alhaziel01
8ff73ef36c Altro Fix Wstream 2019-11-23 15:39:10 +01:00
Alhaziel01
b4d9e8b082 Fix Wstream 2019-11-23 13:59:46 +01:00
Alhaziel
5a329c25d0 Fix Community 2019-11-23 13:59:33 +01:00
mac12m99
ef0133db4f fix film cb01 2019-11-22 22:52:11 +01:00
Alhaziel
1fb7041390 Fix Fastsubita
(cherry picked from commit 72e8b2d0f9)
2019-11-22 21:21:58 +01:00
Alhaziel
c696c1a787 Fix Turbovid 2019-11-21 20:00:06 +01:00
Alhaziel
f2ee0bf7af Fix dailimotion 2019-11-21 19:59:55 +01:00
Alhaziel
bfdae83a7a Fix Wstream 2019-11-21 19:59:31 +01:00
Alhaziel
e15a4c0dbf Fix CommunityChannels 2019-11-21 19:56:30 +01:00
Alhaziel01
ce14919980 Fix Scarica Stagione 2019-11-20 22:46:22 +01:00
Alhaziel01
ba0dcb6a94 Fix Icone (autothumb) 2019-11-20 21:53:11 +01:00
Alhaziel
703d2d0704 Icone Genere in ricerca e migliorato autothumb 2019-11-20 21:20:39 +01:00
Alhaziel
3c4625e513 Rimossi colori in ricerca alternativa 2019-11-20 21:19:58 +01:00
Alhaziel01
e91bce397f Fix Download e Speedvideo 2019-11-20 21:19:47 +01:00
Alhaziel
de759e2310 risoluzioni in ordine per download a qualità massima 2019-11-20 21:16:15 +01:00
marco
87122f99c4 fix alcuni link wstream 2019-11-20 20:35:00 +01:00
mac12m99
1d70735986 nuova regex wstream 2019-11-20 20:19:20 +01:00
mac12m99
87835c2051 lingua all se non riconosciuta 2019-11-20 20:19:07 +01:00
Alhaziel
b0a69f9d86 Aggiornato Httptools e cloudscrape
Fix per Py3 e commenti in inglese
2019-11-20 20:18:50 +01:00
Alhaziel
7b0a3152de Fix Channelselector 2019-11-20 20:18:36 +01:00
marco
490997083b Merge branch 'stable' of github.com:kodiondemand/addon into stable 2019-11-19 21:36:05 +01:00
marco
51e4976202 supporto ai link stayonline.pro 2019-11-19 21:35:09 +01:00
Alhaziel
b14d22887a Fix Wstream 2019-11-18 21:15:49 +01:00
Alhaziel
b1775e3e46 Fix CB01, sezione server 2019-11-17 13:10:12 +01:00
mac12m99
4d4d5cf1d6 updater: allineamento con fix xbox 2019-11-17 13:07:11 +01:00
Alhaziel
2b451ff3e8 Fix Animesaturn 2019-11-14 20:05:56 +01:00
Alhaziel
9b316dd0fe Aggiornati link ai canali
Aggiunto AnimeSaturn ai link diretti
2019-11-14 18:41:13 +01:00
marco
4e80db95f3 Fix vari canali AnimeWorld, Filmpertutti, serieTVU e dreamsub.
Fix server clipwatching e aggiunta risoluzione in Wstream
2019-11-14 09:16:10 +01:00
greko17
d8a61c0b20 fix: serietvu.py 2019-11-11 22:28:47 +01:00
Alhaziel
bbc69034dd Fix Speedvideo 2019-11-11 22:28:37 +01:00
Alhaziel
6863b1e902 Server Tantifilm (Fembed) 2019-11-11 22:28:03 +01:00
Alhaziel
f186d6bd82 Fix CB01 Serie / Ultime Aggiornate 2019-11-11 22:27:50 +01:00
Alhaziel
2765295811 Fix Download e salvataggio Impostazioni 2019-11-11 22:27:06 +01:00
greko17
76042344bb fix: backin.py 2019-11-11 22:22:39 +01:00
mac12m99
413556b939 cb01: aggiungiamo link anche se hanno sbagliato a scrivere streaming 2019-11-11 22:22:36 +01:00
Alhaziel
b90f78fd66 Alro Fix a Rinumerazione 2019-11-09 16:16:13 +01:00
Alhaziel
bd159d8644 Merge branch 'stable' of https://github.com/kodiondemand/addon into stable 2019-11-09 15:25:38 +01:00
Alhaziel
8db0b7e321 Fix Rinumerazione 2019-11-09 15:25:03 +01:00
Alhaziel
08291d6bc3 Fix Go Unlimited 2019-11-09 13:50:30 +01:00
Alhaziel
6a0c0ba205 Server VUP Player 2019-11-09 13:50:23 +01:00
Alhaziel
4ac0002bca Fix Cloudvideo 2019-11-09 13:50:17 +01:00
Alhaziel
b2d1b804cd Fix Mixdrop 2019-11-09 13:50:00 +01:00
Alhaziel
8bcd6cd52a Fix CB01 Novità + Ultimi 100 Film Aggiornati 2019-11-09 13:49:48 +01:00
Alhaziel01
04ee8cbc30 Fix SerieSubIta 2019-11-09 13:45:00 +01:00
greko17
92046ff620 fix: cinemalibero.py
aggiunta in videoteca delle serie
2019-11-09 08:53:26 +01:00
Whiplash
6a1e80a8c9 [polpotv] movie list url update 2019-11-09 08:53:18 +01:00
greko17
224f1de723 add: serietvsubita.py
cerca
2019-11-09 08:53:08 +01:00
greko17
aa5a771f36 fix: serietvsubita.py 2019-11-09 08:52:46 +01:00
Alhaziel
56f3ade3f0 Fix Supervideo 2019-11-08 17:29:59 +01:00
Alhaziel
0d3508fc82 Fix AnimeWorld 2019-11-08 17:27:29 +01:00
Alhaziel
3ff4a5be15 Fix Community Relative Path 2019-11-08 17:27:18 +01:00
Alhaziel
064f282c00 Fix Ricerca (Generi, Orain onda, ecc...) 2019-11-08 17:26:38 +01:00
mac12m99
5f5551afd8 ennesimo fix wstream per cambio di struttura 2019-11-07 20:56:26 +01:00
Alhaziel
69479b0423 Fix VVVVID 2019-11-07 20:47:42 +01:00
marco
aea652c703 KoD 0.5
-Ridefinito il modo in cui vengono scritti i canali, per assicurare migliore stabilità, debuggabilità e coerenza
-Riscritti di conseguenza molti canali, corregendo di fatto moltissimi problemi che avete segnalato
-Quando aggiungi in videoteca da fonti in più lingue (ita/sub ita) o più qualità, ti viene chiesto quale tipo di fonte vuoi.
-Per gli amanti degli anime, aggiunto VVVVID (senza bisogno di account!)
-Aggiunti i server supervideo e hdload, fixato wstream
-migliorie varie
2019-11-07 19:10:53 +01:00
mac12m99
29660ea54d miglioramenti updater per major release 2019-11-03 15:43:32 +01:00
marco
ed7b5e94e0 Revert "Merge branch 'stable' into stable"
This reverts commit a641beef22, reversing
changes made to 04c9d46a99.
2019-11-01 21:45:34 +01:00
mac12m99
a641beef22 Merge branch 'stable' into stable 2019-11-01 21:30:27 +01:00
mac12m99
04c9d46a99 disabilitati server chiusi 2019-11-01 15:56:05 +01:00
mac12m99
f57d32d632 aggiunto server mixdrop 2019-11-01 15:55:44 +01:00
greko
14eef54645 fix checkdns 2019-10-26 15:04:01 +02:00
Alhaziel
27df24c2dc Ennesimo Fix per Wstream... 2019-10-24 20:37:22 +02:00
Alhaziel
c14fe12eef Fix Wstream (di nuovo..) 2019-10-22 20:11:32 +02:00
mac12m99
c082e505fc fix wstream per cambio url 2019-10-21 18:09:10 +02:00
marco
9881c927f7 migliorie updater 2019-10-18 22:28:47 +02:00
marco
76c763dff0 fix cineblog 2019-10-18 21:09:07 +02:00
Alhaziel
7bc350d8ab Fix CB01 2019-10-14 17:03:11 +02:00
Alhaziel
a1e3eb2181 Fix CB01 - Ultimi 100 Film Aggiornati 2019-10-09 20:59:16 +02:00
Alhaziel
862149fc2d Fix Wstream 2019-10-09 19:10:46 +02:00
Alhaziel
c457c58857 Fix Novità CB01 2019-10-09 19:10:36 +02:00
Alhaziel
4f1e8eaee4 Fix CB01 2019-10-09 10:17:30 +02:00
Alhaziel
305ffe9fec Fix Server CB01 2019-10-08 17:08:50 +02:00
Alhaziel
0dc9ece8ab Fix CB01 2019-10-08 09:52:03 +02:00
mac12m99
94762b6b6a fixati i link protetti da vcrypt (presenti prevalentemente su cb01 ed eurostreaming) 2019-09-27 20:41:53 +02:00
mac12m99
63220c1e1f nuovo fix cb01 (ennesimo cambio struttura) 2019-09-23 20:58:36 +02:00
Alhaziel
52096a8380 Fix Openload 2019-09-21 12:46:50 +02:00
Alhaziel
72865c2b58 Fix Rapidcrypt 2019-09-21 11:46:03 +02:00
Alhaziel
d6310a0181 Fix Menu, Novità CB01 2019-09-20 10:24:48 +02:00
Alhaziel
03b1039449 Fix CB01 2019-09-20 09:51:14 +02:00
Alhaziel
5ae9234ef4 Fix Animeworld In Corso / Ultimi Episodi 2019-09-06 21:05:12 +02:00
Alhaziel
8aa5d2d784 SerieHD - Fix Pagina Successiva 2019-09-04 20:50:03 +02:00
Alhaziel
10d22c9929 Fix Toonitalia 2019-09-04 17:23:25 +02:00
mac12m99
bbee4ee18d KoD 0.4.2
semplice cambiamento di versione senza nessuna modifica, necessario per aggiornare la versione sul repository.
Questo sarà l'ultimo aggiornamento del repository, dopodiche verrà dismesso e l'unico metodo per installare KoD sarà tramite installer (vedi la procedura sul sito kodiondemand.github.io).
Se l'hai installato dal repository non devi preoccuparti, sappi solo che se lo dovessi installare su un nuovo dipositivo, la procedura è cambiata.
Nel caso avessi problemi usa l'installer (anche senza disinstallare) e dovrebbe andare.
2019-09-03 19:34:22 +02:00
Alhaziel
683dffb0db Fix Pagina Successiva in ricerca IMDB 2019-09-03 12:13:24 +02:00
greko17
94ee66fdf9 modificato messaggio DNS 2019-09-02 19:43:06 +02:00
mac12m99
a3c90d90d7 KoD 0.5
KoD 0.5
-riscritti molti canali per cambiamenti nella struttura stessa di kod
-altre robe carine
2019-08-30 20:47:43 +02:00
mac12m99
aceb155d98 test 2019-08-30 20:40:51 +02:00
mac12m99
53a28ba09c aggiornate traduzioni 2019-08-30 18:11:48 +02:00
mac12m99
aa3291eedb aggiunto check connessione e DNS 2019-08-30 18:03:45 +02:00
thepasto
d789aec06d aggiunto supporto a woof.tube (#78) 2019-08-25 23:05:02 +02:00
Alhaziel
d6c68f860c Fix Lista server in Libreria 2019-08-17 10:38:51 +02:00
mac12m99
de7bfcfb26 fix errore di sintassi 2019-08-14 12:18:10 +02:00
mac12m99
25b3a66a21 traduzioni mancanti sezione "I miei link" 2019-08-11 13:55:24 +02:00
mac12m99
e6b7ad33a3 fix sezione "I miei link"
(cherry picked from commit 0d085e66c9)
2019-08-11 13:52:40 +02:00
mac12m99
6f9e523070 evito di mostrare errore quando l'updater elimina un file non esistente
(cherry picked from commit 4b7da3530f)
2019-08-09 16:01:42 +02:00
mac12m99
6c3e9b4922 fix e migliorie all'updater 2019-08-08 12:09:45 +02:00
mac12m99
4726019e4d rimosso canale clone e canale chiuso 2019-07-23 20:42:11 +02:00
greko
d55f8dee92 Aggiornati canali con redirect 2019-07-23 18:23:17 +02:00
mac12m99
9f5efce980 aggiornato dominio animeworld 2019-07-20 11:13:37 +02:00
Alhaziel
d79e0ebdf0 Rimosso canale non più funzionante 2019-07-15 18:34:58 +02:00
Alhaziel
c25da23cb2 Revert "Revert "Merge branch 'stable' of https://github.com/kodiondemand/addon into stable""
This reverts commit cd19484c70.
2019-07-13 18:52:28 +02:00
Alhaziel
cd19484c70 Revert "Merge branch 'stable' of https://github.com/kodiondemand/addon into stable"
This reverts commit cf50d10a00, reversing
changes made to 4a695c096c.
2019-07-13 18:51:48 +02:00
Alhaziel
cf50d10a00 Merge branch 'stable' of https://github.com/kodiondemand/addon into stable 2019-07-13 18:40:08 +02:00
Alhaziel
4a695c096c Fix Ricerca per Altadefinizione01 2019-07-13 18:39:43 +02:00
mac12m99
0564d46bcf miglioramenti vari updater + fix definitivo android 2019-07-13 18:24:40 +02:00
Alhaziel
bb903c6d87 Fix per Wstream 2019-07-13 16:59:40 +02:00
Alhaziel
4e96fed455 Cambio dominio per FilmPerTutti 2019-07-11 19:50:15 +02:00
greko
df893e17ff fix per bit.ly e fix di alcuni canali 2019-07-11 17:43:59 +02:00
Alhaziel
478f6e1a10 pagination fix 2019-07-09 21:03:29 +02:00
Alhaziel
8df6e3df60 Fix ricerca CB01 2019-07-09 21:03:17 +02:00
Alhaziel
a8313b0bc2 Fix Numerazione Automatica 2019-07-09 21:02:55 +02:00
mac12m99
e53eb1de90 KoD 0.4.1 2019-07-07 13:33:29 +02:00
marco
9d475beb24 try except unzipper -> workaround 2019-07-07 13:32:26 +02:00
marco
353a675629 possible fix crash if no internet connection 2019-07-07 13:32:17 +02:00
marco
697c6fd8e4 possible fix badZipFile on android 2019-07-07 13:32:05 +02:00
Alhaziel
832f94ed99 Fix Numerazione Automatica 2019-07-07 13:31:53 +02:00
mac12m99
7facd7cee2 Rimossa dipendenza inutile 2019-07-01 08:38:38 +02:00
marco
9c2ab8d8fc stable 2019-06-30 10:42:09 +02:00
mac12m99
3fb9b068d9 KoD 0.4 (#57)
* fix next page

* testing new filmontv

* Wstream quick fix, no resolution displayed :(

* new filmontv

* now regex is ok

* fix .po files

* +netlovers

* working on filmontv

* fix debriders

* new updater

* updater

* fix crash

* fix updater and re-add dev mode

* new url eurostreaming

* Delete netlovers.py

* Delete netlovers.json

* -net from menù

* fix eurostreaming: numero stagione e newest (#50)

* fix canale

* fix newest

* fix numero puntata

* cleanup

* cleanup 2

* fix updater crash on windows

* Fix Animeworld

* Nuovo Autorenumber

* initial background downloader support

* ops

* Update channels.json

* Update channels.json

* fix openload

* move json update to cohesist with updater

* disable json url updates

* fix typo

* fix typo 2

* Add files via upload

* Add files via upload

* fix autoplay in community channels

* fix toonitalia

* Fix Toonitalia

* workaround serietvsubita

* Nuova Rinumerazione Automatica

* Fix per Rinumerazione Automatica

* workaround updater

* Fix on air

* ops

* Personalizzazione sezione "Oggi in TV"

* Aggiunto orario sezione Oggi in TV

* aggiunto bit.ly (#56)

* aggiunto bit.ly

* Aggiunta personalizzazione homepage

* Revert "initial background downloader support"

This reverts commit f676ab0f

* KoD 0.4
2019-06-30 10:35:48 +02:00
1815 changed files with 206763 additions and 93021 deletions

View File

@@ -1,6 +0,0 @@
[Dolphin]
Timestamp=2019,4,23,18,58,8
Version=4
[Settings]
HiddenFilesShown=true

View File

@@ -0,0 +1,21 @@
---
name: Segnala Problemi ad un Canale
about: Invio segnalazione per un canale non funzionante
title: 'Inserisci il nome del canale'
labels: Problema Canale
assignees: ''
---
**Per poter scrivere o allegare file nella pagina devi:**
- cliccare sui [ ... ] in alto a destra della scheda
- Edit. Da questo momento puoi scrivere e/o inviare file.
Inserisci il nome del canale
- Indica il tipo di problema riscontrato, sii il più esauriente possibile. Che azione ha portato all'errore (Es. non riesco ad aggiungere film nella videoteca, ne dal menu contestuale, ne dalla voce in fondo alla lista dei server)
- Ottieni il log seguendo le istruzioni: https://telegra.ph/LOG-11-20 e invialo qui.

View File

@@ -0,0 +1,19 @@
---
name: Segnala Problemi ad un Server
about: Invio segnalazione per un server non funzionante
title: 'Inserisci il nome del server'
labels: Problema Server
assignees: ''
---
**Per poter scrivere o allegare file nella pagina devi:**
- cliccare sui [ ... ] in alto a destra della scheda
- Edit. Da questo momento puoi scrivere e/o inviare file.
Inserisci il nome del server che indica problemi e se il problema è circoscritto ad un solo canale, indicalo
- Allega il file di log nella sua completezza. Non cancellarne delle parti.

286
.github/ISSUE_TEMPLATE/test-canale.md vendored Normal file
View File

@@ -0,0 +1,286 @@
---
name: Test Canale
about: Pagina per il test di un canale
title: ''
labels: Test Canale
assignees: ''
---
Documento Template per il Test del canale
Specifica, dove possibile, il tipo di problema che incontri, anche se non è presente alcuna voce per indicarlo.
Se hai **suggerimenti/consigli/dubbi sul test**...Proponili e/o Chiedi! Scrivendo un commento a questo stesso issue, che trovi in fondo, dopo questa pagina.
**Avvertenze:**
Per il test dei canali **DEVI**:
- utilizzare la versione **[BETA](https://kodiondemand.github.io/repo/KoD-installer-BETA.zip)** di KOD!
- **ABILITARE IL DEBUG PER I LOG**
**Per eseguire il test, ricordati di titolarlo con il nome del canale da te scelto, e salvare la pagina cliccando sul bottone verde in basso "SUBMIT NEW ISSUE"**
**Ogni volta che hai un ERRORE con avviso di LOG. Puoi scegliere se:
ALLEGARE IMMEDIATAMENTE il file kodi.log nel punto, della pagina, in cui sei nel test
Allegare il file kodi.log a fine pagina.**
**Per poter scrivere o allegare file nella pagina devi:**
- cliccare sui [ ... ] in alto a destra della scheda
- Edit. Da questo momento puoi scrivere e/o inviare file.
Dopodiché clicca sul bottone verde "Update comment" per continuare il test nel modo consueto o per terminarlo!
Se hai problemi non previsti dal test, segnalali aggiungendoli in fondo al test.
**SE VEDI I QUADRATINI MA NON RIESCI A CLICCARLI... DEVI CLICCARE SUL BOTTONE VERDE "SUBMIT NEW ISSUE"**
***
I file relativi al canale li trovi:
- su browser:
[Apre la pagina dei Canali](https://github.com/kodiondemand/addon/tree/master/channels)
- sul device:
[nella specifica cartella](https://github.com/kodiondemand/addon/wiki/Percorsi-sui-diversi-S.O.) , .kodi/addons/channels.
Per aprirli non servono programmi particolari un semplice editor di testo è sufficiente.
**Test N.1**: Controllo del file .json
Occorrente: file .json
**1. Indica la coerenza delle voci presenti in "language" con i contenuti presenti sul sito:**
valori: ita, sub-ita (sub-ita)
- [ ] coerenti
- [ ] non coerenti
Se non sono coerenti il test è FALLITO, continua comunque a revisionare il resto
**2. Icone del canale**
Controlla sia presente qualcosa, tra le " " di thumbnail e banner, e che le immagini appaiano su KoD
**in thumbnail:**
- [ ] Presente
- [ ] Assente
**in banner:**
- [ ] Presente
- [ ] Assente
**3. Verifica la coerenza delle voci presenti in "categories" con i contenuti presenti sul sito:**
Riepilogo voci:
movie, tvshow, anime, documentary, vos, adult
(se il sito contiene film e serie, devono esserci sia movie che tvshow, se contiene solo film, solo movie)
- [ ] Corrette
- [ ] 1 o più Errata/e
- [ ] Assenti - Non sono presenti voci in categories, in questo caso non puoi continuare il test.
Se le voci sono: Assenti, dopo aver compilato la risposta, salva il test e **NON** proseguire.
**TEST FALLITO**
***
**Test su KOD.**
Entra in KOD -> Canali. Nella lista accedi al canale che stai testando.
**N.B.**: Il nome del canale è il campo **name** nel file .json.
**Test N.2: Pagina Canale**
**1. Cerca o Cerca Film...**
Cerca un titolo a caso in KOD e lo stesso titolo sul sito. Confronta i risultati.
- [ ] OK
- indica il tipo di problema
**Sezione FILM (se il sito non ha film elimina questa parte)**
**TestN.3: Pagina dei Titoli**
*Test da effettuare mentre sei dentro un menu del canale (film, serietv, in corso ecc..)*.
Voci nel menu contestuale di KOD. Posizionati su di un titolo e controlla se hai le seguenti voci, nel menu contestuale (tasto c o tenendo enter premuto):
**1. Aggiungi Film in videoteca**
- [ ] Si
- [ ] No
**2. Scarica Film (devi avere il download abilitato)**
- [ ] Si
- [ ] No
**Fine test menu contestuale**
**Fondo pagina dei titoli**
**3. Paginazione, controlla ci sia la voce "Successivo" (se non c'è controlla sul sito se è presente)**
- [ ]
- [ ] NO
**Dentro un titolo
**4. Entra nella pagina del titolo e verifica ci sia almeno 1 server:**
- [ ] Si
- [ ] No
**5. Eventuali problemi riscontrati**
- scrivi qui il problema/i
**Sezione Serie TV (se il sito non ha serietv elimina questa parte)**
Test da effettuare mentre sei nella pagina dei titoli.
Per ogni titolo verifica ci siano le voci nel menu contestuale.
**1. Aggiungi Serie in videoteca**
- [ ] Si
- [ ] No
**2. Scarica Stagione (devi avere il download abilitato)**
- [ ] Si
- [ ] No
**3. Scarica Serie (devi avere il download abilitato)**
- [ ] Si
- [ ] No
**4. Cerca o Cerca Serie...**
Cerca un titolo a caso in KOD e lo stesso titolo sul sito. Confronta i risultati.
- [ ] Ok
- indica il tipo di problema
**5. Entra nella pagina della serie, verifica che come ultima voce ci sia "Aggiungi in videoteca":**
- [ ] Si, appare
- [ ] Non appare
**6. Entra nella pagina dell'episodio, **NON** deve apparire la voce "Aggiungi in videoteca":**
- [ ] Si, appare
- [ ] Non appare
**7. Eventuali problemi riscontrati**
- scrivi qui il problema/i
**Sezione Anime (se il sito non ha anime elimina questa parte)**
Test da effettuare mentre sei nella pagina dei titoli. Per ogni titolo verifica ci siano le voci nel menu contestuale.
**1. Rinumerazione (se gli episodi non appaiono nella forma 1x01)**
- [ ] Si
- [ ] No
**2. Aggiungi Serie in videoteca**
- [ ] Si
- [ ] No
**3. Aggiungi 2-3 titoli in videoteca.**
- [ ] Aggiunti correttamente
- [Indica eventuali problemi] (copia-incolla per tutti i titoli con cui hai avuto il problema)
- COPIA qui l'ERRORE dal LOG
**4. Scarica Serie**
- [ ] Si
- [ ] No
**5. Cerca o Cerca Serie...**
Cerca un titolo a caso in KOD e lo stesso titolo sul sito. Confronta i risultati.
- [ ] Ok
- indica il tipo di problema
**6. Entra nella pagina della serie, verifica che come ultima voce ci sia "Aggiungi in videoteca":**
- [ ] Appare
- [ ] Non appare
**7. Entra nella pagina dell'episodio, NON ci deve essere la voce "Aggiungi in videoteca":**
- [ ] Non appare
- [ ] Appare
**8. Eventuali problemi riscontrati**
- scrivi qui il problema/i
** TEST PER IL CONFRONTO TRA SITO E CANALE **
**TestN.4: Pagina Sito - Menu Canale**
Occorrente: Browser, KOD! e il file canale.py ( da browser o da file )
Avviso:
- Sul Browser disattiva eventuali componenti aggiuntivi che bloccano i JS (javascript), li riattivi alla fine del test.
Entra in ogni menu e controlla che i risultati, delle prime 5-6 pagine, siano gli stessi che trovi sul sito, comprese le varie info (ita/sub-ita, qualità ecc..), inoltre entra, se ci sono, nei menu dei generi - anni - lettera, verifica che cliccando su una voce si visualizzino i titoli.
*Copia questa sezione per ogni voce che presenta problemi:*
- [ ] Voce menu ( del canale dove riscontri errori)
Titoli non corrispondenti:
- [ ] Il totale dei Titoli è diverso da quello del sito. Alcuni Titoli non compaiono.
- [ ] Appaiono titoli per pagine informative o link a siti esterni. Es: Avviso agli utenti.
- [ ] La lingua, del titolo, è diversa da quella riportata dal sito
- [ ] Non è indicato in 1 o più titoli che sono SUB-ITA
- [ ] Cliccando su "Successivo" non si visualizzano titoli
- [ ] Non è indicata la qualità: Hd-DVD/rip e altri, nonostante sul sito siano presenti
- [ ] NO
*Fine Copia*
**Test.N5: Ricerca Globale**
Per questo test ti consiglio di inserire come UNICO sito quello che stai testando, come canale incluso in: Ricerca Globale -> scegli i canali da includere
Il test è già compilato con le spunte, dato che devi copiarlo solo in caso di errori. Togli la spunta dove funziona.
Si consiglia di cercare almeno a fino 5 titoli. O perlomeno non fermarti al 1°.
Cerca 5 FILM a tuo piacimento, se il titolo non esce controlla confrontando i risultati sul sito...:
*Copia questa sezione per ogni voce che presenta problemi*
controlla ci siano queste voci se titolo è un FILM:
- [ ] inserisci il titolo cercato che da problemi
- [x] Aggiungi in videoteca
- [x] Scarica Film
*Fine Copia*
controlla ci siano queste voci se titolo è una SERIE/ANIME:
*Copia questa sezione per ogni voce che presenta problemi*
controlla ci siano queste voci se titolo è un FILM:
- [ ] inserisci il titolo cercato che da problemi
- [x] Aggiungi in videoteca
- [x] Scarica Serie
- [x] Scarica Stagione
- [ ] inserisci il titolo cercato che da problemi
*Fine Copia*
Se il canale ha la parte Novità (questa stringa avvisa che NON è presente: "not_active": ["include_in_newest"]).
**Test.N6: Novità.**
Per questo test ti consiglio di inserire come UNICO sito quello che stai testando, come canale incluso in: Novità -> categoria (film, serie o altro )
- [ ] Descrivere il problema
Fine TEST!
Grazie mille da parte di tutto il team KoD!

30
.github/workflows/tests.yml vendored Normal file
View File

@@ -0,0 +1,30 @@
name: Test Suite
on:
workflow_dispatch:
jobs:
tests:
runs-on: macos-latest
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4.3.0
with:
python-version: 3.9
- name: Run tests
run: |
./tests/run.sh
- name: Commit & Push changes
uses: dmnemec/copy_file_to_another_repo_action@main
env:
API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }}
with:
source_file: 'reports'
destination_repo: 'kodiondemand/kodiondemand.github.io'
user_email: 'tests@kod.bot'
user_name: 'bot'
commit_message: 'Test suite'

30
.github/workflows/updateDomains.yml vendored Normal file
View File

@@ -0,0 +1,30 @@
name: Update channel domains
on:
workflow_dispatch:
schedule:
- cron: '30 17 * * *'
jobs:
update:
runs-on: ubuntu-latest
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Install dependencies
run: pip install requests
- name: Update domains
run: python tools/updateDomains.py
- name: Commit & Push changes
uses: actions-js/push@master
with:
message: "Aggiornamento domini"
branch: "master"
github_token: ${{ secrets.API_TOKEN_GITHUB }}

View File

@@ -0,0 +1,32 @@
name: Update channel domains
on:
workflow_dispatch:
schedule:
- cron: '30 17 * * *'
jobs:
update:
runs-on: ubuntu-latest
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
with:
ref: stable
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Install dependencies
run: pip install requests
- name: Update domains
run: python tools/updateDomains.py
- name: Commit & Push changes
uses: actions-js/push@master
with:
message: "Aggiornamento domini"
branch: "stable"
github_token: ${{ secrets.API_TOKEN_GITHUB }}

7
.gitignore vendored
View File

@@ -3,4 +3,11 @@
.DS_Store
.idea/
.directory
custom_code.json
last_commit.txt
__pycache__/
.vscode/settings.json
bin/
lib/abi
tests/home/
reports/

55
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,55 @@
Ciao, grazie per aver preso in considerazione di contribuire a questo progetto!<br>
Ci sono molti modi per farlo, e per alcuni di essi non è necessario essere uno sviluppatore.
Puoi ad esempio [segnalare i cambiamenti di struttura](#segnalare-i-cambiamenti-di-struttura) dei canali/server, [scrivere guide o registrare video-esempi](#scrivere-guide-o-registrare-video-esempi) su alcune funzionalità "avanzate", dare consigli su funzionalità nuove o per migliorare quelle già presenti.
# Segnalare i cambiamenti di struttura
KoD, alla fine, non è altro che un browser che estrapola dai siti le info richieste secondo regole ben precise, basate sulla struttura dei siti.<br>
I siti web cambiano, spesso, ciò che oggi funziona domani potrebbe non più funzionare, pertanto sono fondamentali le segnalazioni, ma esse per essere realmente utili devono:
- contenere il file di log (lo potete generare andando in Aiuto - Segnala un problema e seguendo le istruzioni)
- spiegare brevemente qual'è il problema e dove, ad esempio "cineblog da errore quando entro nella sezione Film", oppure "wstream non da nessun errore ma il video di fatto non parte"
- essere replicabili, se si tratta di cose che accadono una volta ogni tanto puoi provare a segnalare lo stesso, sperando che nel log ci sia qualche indizio. Se non c'è, nada
Prima di segnalare un problema assicurati che sia realmemte legato a KoD, sotto alcuni requisiti necessari:
- avere l'ultima versione di KoD, per controllare vai qui e confronta il numero con quello presente nella sezione aiuto: https://github.com/kodiondemand/addon/commits/stable
- avere una versione di kodi supportata, attualmente si tratta di 17.x e 18.x
- verificare che il problema non dipenda dal sito stesso: se esce il messaggio 'Apri nel Browser': apri il tuo Browser e prova se li il film o serie tv funziona, senno apri il menù contestuale (tasto c) e clicca su "apri nel browser"
Sei pregato di attenerti il più possibile a quanto descritto qua perchè un semplice "non funziona" fa solo perdere tempo.
Puoi fare tutte le segnalazioni nella sezione [issues](https://github.com/kodiondemand/addon/issues), cliccando su "new issue" appariranno dei template che ti guideranno nel processo.
Assicurati che qualcun'altro non abbia già effettuato la stessa segnalazione, nel caso avessi altro da aggiungere rispondi ad un issue già aperto piuttosto che farne uno nuovo.
# Scrivere guide o registrare video-esempi
Cerca di essere sintetico ma senza tralasciare le informazioni essenziali, una volta fatto mandalo pure sul [gruppo telegram](https://t.me/kodiondemand) taggando gli admin (@admin).<br>
Verrà preso in considerazione il prima possibile ed eventualmente inserito nella [wiki](https://github.com/kodiondemand/addon/wiki) o verrà creato un comando richiamabile nel gruppo.
# Consigli
Effettuali sempre nella sezione [issues](https://github.com/kodiondemand/addon/issues), miraccomando descrivi e fai esempi pratici.<br>
# Per sviluppatori
Di seguito tutte le info su come prendere confidenza col codice e come contribuire
## Da dove posso partire?
Un buon punto di partenza è [la wiki](https://github.com/kodiondemand/addon/wiki), qui è presente un minimo di documentazione sul funzionamento di KoD.<br>
Ti consigliamo vivamente, una volta compreso il funzionamento generale dell'addon (e prima di iniziare a sviluppare), di [forkare e clonare il repository](https://help.github.com/en/github/getting-started-with-github/fork-a-repo).<br>
Questo perchè, oltre al fatto di poter iniziare a mandare modifiche sul tuo account github, l'utilizzo di git abilita la [dev mode](https://github.com/kodiondemand/addon/wiki/dev-mode), che ti sarà di aiuto nelle tue attività.
## che cosa posso fare?
Puoi provare a fixare un bug che hai riscontrato, aggiungere un canale/server che ti interessa ecc..
Oppure puoi guardare nella sezione [Projects](https://github.com/kodiondemand/addon/projects) cosa è previsto e iniziare a svilupparlo!
## ho fatto le modifiche che volevo, e ora?
Pusha sul tuo fork le modifiche che hai fatto e manda una pull request. Se è la prima volta ecco qualche link che ti aiuterà:
- http://makeapullrequest.com/
- http://www.firsttimersonly.com/
- [How to Contribute to an Open Source Project on GitHub](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github).
Quando crei la pull request, ricordati di spiegare brevemente qual'è la modifica e perchè l'hai fatta.
Quando avremo tempo revisioneremo le modifiche, potremmo anche segnalarti alcuni problemi, nel caso prenditi pure il tutto il tempo che vuoi per sistemare (non è necessaria un'altra pull, tutti i commit verranno riportati nella prima).<br>
Quando sarà tutto a posto accetteremo la pull includendo le modifiche
## Regole per le collaborazioni:
- Se si riutilizza codice proveniente da altri addon è necessario citarne la fonte, per rispetto di chi ci ha lavorato, in caso contrario il pull request verrà respinto.
- Ogni modifica o novità inviata dev'essere testata, può capitare che vi sia sfuggito qualche bug (è normale), ma l'invio di materiale senza preventivi controlli non è gradito.
- I nuovi canali devono essere funzionanti e completi di tutte le feature, comprese videoteca ed autoplay, non verranno accettati finchè non lo saranno.

1348
LICENSE

File diff suppressed because it is too large Load Diff

View File

@@ -1,24 +1,13 @@
# Kodi On Demand
### Un fork italiano di [Alfa](https://github.com/alfa-addon)
Ognuno è libero (anzi, invitato!) a collaborare, per farlo è possibile utilizzare i pull request.
KOD, come Alfa, è sotto licenza GPL v3, pertanto siete liberi di utilizzare parte del codice, a patto di rispettare i termini di suddetta licenza, che si possono riassumere in:
Installazione: https://kodiondemand.github.io/#download
KoD, come Alfa, è sotto licenza GPL v3, pertanto siete liberi di utilizzare parte del codice, a patto di rispettare i termini di suddetta licenza, che si possono riassumere in:
- Il tuo addon deve essere rilasciando secondo la stessa licenza, ovvero essere open source (il fatto che lo zip sia visibile da chiunque non ha importanza, è necessario avere un repository git come questo)
- Aggiungere i crediti a tutto ciò che copiate/modificate, ad esempio aggiungendo un commento nel file in questione o, meglio, facendo un cherry-pick (in modo da preservarnee lo storico)
- Aggiungere i crediti a tutto ciò che copiate/modificate, ad esempio aggiungendo un commento nel file in questione o, meglio, facendo un cherry-pick (in modo da preservarne lo storico)
### Come contribuire?
- Fai un Fork del repository.
- Effettua tutte le modifiche e fai un push nel tuo repository remoto.
- Testa tutte le funzioni principali (videoteca, autoplay, scraper web) o eventuali aggiunte extra.
- Apri una pull request.
### Come contribuire o fare segnalazioni?
Ti piace il progetto e vuoi dare una mano? Leggi [qui](https://github.com/kodiondemand/addon/blob/master/CONTRIBUTING.md)
Regole per le collaborazioni:
- Se si riutilizza codice proveniente da altri addon è necessario citarne la fonte, per rispetto di chi ci ha lavorato, in caso contrario il pull request verrà respinto.
- Ogni modifica o novità inviata dev'essere testata, può capitare che vi sia sfuggito qualche bug (è normale), ma l'invio di materiale senza preventivi controlli non è gradito.
- I nuovi canali devono essere funzionanti e completi di tutte le feature, comprese videoteca ed autoplay, non verranno accettati finchè non lo saranno.
Se parte del codice di un tuo addon è stato incluso in questo progetto e ne desideri l'eliminazione, crea una issue portando le prove di essere veramente uno dei dev e lo elimineremo.
### Qualcosa non funziona?
Sentiti libero di segnalarlo al team [qui](https://github.com/kodiondemand/addon/issues)

View File

@@ -1 +0,0 @@
theme: jekyll-theme-midnight

View File

@@ -1,26 +1,34 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.kod" name="Kodi on Demand" version="0.3.1" provider-name="KOD Team">
<addon id="plugin.video.kod" name="Kodi on Demand" version="1.7.7" provider-name="KoD Team">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
<import addon="metadata.themoviedb.org"/>
<import addon="metadata.tvdb.com"/>
<import addon="script.module.web-pdb" />
<!-- <import addon="script.module.libtorrent" optional="true"/> -->
<import addon="metadata.themoviedb.org" optional="true"/>
<import addon="metadata.tvshows.themoviedb.org" optional="true"/>
<!-- <import addon="metadata.tvdb.com"/> -->
</requires>
<extension point="xbmc.python.pluginsource" library="default.py">
<provides>video</provides>
</extension>
<extension point="kodi.context.item">
<menu id="kodi.core.main">
<item library="contextmenu.py">
<label>90001</label>
<visible>!String.StartsWith(ListItem.FileNameAndPath, plugin://plugin.video.kod/) + [ String.IsEqual(ListItem.dbtype, tvshow) | String.IsEqual(ListItem.dbtype, movie) | String.IsEqual(ListItem.dbtype, season) | String.IsEqual(ListItem.dbtype, episode) ]</visible>
</item>
</menu>
</extension>
<extension point="xbmc.addon.metadata">
<summary lang="en">Kodi on Demand is a Kodi add-on to search and watch contents on the web.</summary>
<summary lang="it">Kodi on Demand è un addon di Kodi per cercare e guardare contenuti sul web.</summary>
<assets>
<icon>logo.png</icon>
<fanart>fanart.jpg</fanart>
<screenshot>resources/media/themes/ss/1.png</screenshot>
<screenshot>resources/media/themes/ss/2.png</screenshot>
<screenshot>resources/media/themes/ss/3.png</screenshot>
<icon>resources/media/logo.png</icon>
<fanart>resources/media/fanart.jpg</fanart>
<screenshot>resources/media/screenshot-1.png</screenshot>
<screenshot>resources/media/screenshot-2.png</screenshot>
<screenshot>resources/media/screenshot-3.png</screenshot>
</assets>
<news>Benvenuto su KOD!</news>
<news>- fix di routine ai canali/server
</news>
<description lang="it">Naviga velocemente sul web e guarda i contenuti presenti</description>
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]
[COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR]</disclaimer>
@@ -30,6 +38,6 @@
<forum>https://t.me/kodiondemand</forum>
<source>https://github.com/kodiondemand/addon</source>
</extension>
<extension point="xbmc.service" library="videolibrary_service.py" start="login|startup">
<extension point="xbmc.service" library="service.py" start="login|startup">
</extension>
</addon>

View File

@@ -1,63 +1,44 @@
{
"altadefinizione01_club": "https://www.altadefinizione01.cc",
"altadefinizione01_link": "http://altadefinizione01.link",
"altadefinizione01": "https://altadefinizione01.to",
"altadefinizioneclick": "https://altadefinizione.cloud",
"altadefinizionehd": "https://altadefinizione.doctor",
"animeforge": "https://ww1.animeforce.org",
"animeleggendari": "https://animepertutti.com",
"animestream": "https://www.animeworld.it",
"animespace": "https://animespace.tv",
"animesubita": "http://www.animesubita.org",
"animetubeita": "http://www.animetubeita.com",
"animevision": "https://www.animevision.it",
"animeworld": "https://www.animeworld.it",
"asiansubita": "http://asiansubita.altervista.org",
"casacinema": "https://www.casacinema.site",
"casacinemainfo": "https://www.casacinema.info",
"cb01anime": "http://www.cineblog01.ink",
"cinemalibero": "https://cinemalibero.icu",
"cinemastreaming": "https://cinemastreaming.icu",
"documentaristreamingda": "https://documentari-streaming-da.com",
"dreamsub": "https://www.dreamsub.stream",
"eurostreaming": "https://eurostreaming.gratis",
"eurostreaming_video": "https://www.eurostreaming.best",
"fastsubita": "http://fastsubita.com",
"ffilms":"https://ffilms.org",
"filmigratis": "https://filmigratis.net",
"filmgratis": "https://www.filmaltadefinizione.net",
"filmontv": "https://www.comingsoon.it",
"filmpertutti": "https://www.filmpertutti.tube",
"filmsenzalimiti": "https://filmsenzalimiti.best",
"filmsenzalimiticc": "https://www.filmsenzalimiti.host",
"filmsenzalimiti_blue": "https://filmsenzalimiti.best",
"filmsenzalimiti_info": "https://www.filmsenzalimiti.host",
"filmstreaming01": "https://filmstreaming01.com",
"filmstreamingita": "http://filmstreamingita.live",
"guarda_serie": "https://guardaserie.site",
"guardafilm": "http://www.guardafilm.top",
"guardarefilm": "https://www.guardarefilm.video",
"guardaseriecc": "https://guardaserie.site",
"guardaserieclick": "https://www.guardaserie.media",
"guardaserie_stream": "https://guardaserie.co",
"guardaserieonline": "http://www.guardaserie.media",
"guardogratis": "http://guardogratis.io",
"ilgeniodellostreaming": "https://ilgeniodellostreaming.pw",
"italiafilm": "https://www.italia-film.pw",
"italiafilmhd": "https://italiafilm.info",
"italiaserie": "https://italiaserie.org",
"itastreaming": "https://itastreaming.film",
"majintoon": "https://toonitalia.org",
"mondolunatico": "http://mondolunatico.org",
"mondolunatico2": "http://mondolunatico.org/stream/",
"mondoserietv": "https://mondoserietv.com",
"piratestreaming": "https://www.piratestreaming.watch",
"seriehd": "https://www.seriehd.info",
"serietvonline": "https://serietvonline.xyz",
"serietvsubita": "http://serietvsubita.xyz",
"serietvu": "https://www.serietvu.club",
"streamingaltadefinizione": "https://www.streamingaltadefinizione.space",
"streamking": "http://streamking.cc",
"tantifilm": "https://www.tantifilm.plus",
"toonitalia": "https://toonitalia.org"
}
"direct": {
"altadefinizione01": "https://altadefinizione01.pet",
"animealtadefinizione": "https://www.animealtadefinizione.it",
"animeforce": "https://www.animeforce.it",
"animesaturn": "https://www.animesaturn.cx",
"animeunity": "https://www.animeunity.tv",
"animeworld": "https://www.animeworld.so",
"aniplay": "https://aniplay.co",
"casacinema": "https://casacinema.media",
"cb01anime": "https://cineblog01.red",
"cinemalibero": "https://cinemalibero.cafe",
"cinetecadibologna": "http://cinestore.cinetecadibologna.it",
"dinostreaming": "https://dinostreaming.it",
"discoveryplus": "https://www.discoveryplus.com",
"dreamsub": "https://www.animeworld.so",
"eurostreaming": "https://eurostreaming.diy",
"eurostreaming_actor": "https://eurostreaming.futbol",
"filmstreaming": "https://film-streaming-ita.cam",
"guardaseriecam": "https://guardaserie.kitchen",
"hd4me": "https://hd4me.net",
"ilcorsaronero": "https://ilcorsaronero.link",
"ilgeniodellostreaming_cam": "https://ilgeniodellostreaming.foo",
"italiafilm": "https://italia-film.biz",
"mediasetplay": "https://mediasetinfinity.mediaset.it",
"mondoserietv": "http://ww25.mondoserietv.club/?subid1=20230304-0434-261c-9cb0-a0044930e0a9",
"paramount": "https://www.mtv.it",
"piratestreaming": "https://piratestreaming.design",
"plutotv": "https://pluto.tv",
"raiplay": "https://www.raiplay.it",
"serietvu": "http://ww1.serietvu.live/?sub1=47fb879a-5325-11ee-94a7-cc35006f53d1",
"streamingcommunity": "https://streamingcommunity.buzz",
"streamingita": "https://streamingita.digital",
"tantifilm": "https://tantifilm.hair",
"toonitalia": "https://toonitalia.xyz"
},
"findhost": {
"altadefinizione": "https://altadefinizione.nuovo.live",
"altadefinizionecommunity": "https://altaregistrazione.net",
"animealtadefinizione": "https://www.animealtadefinizione.it",
"cineblog01": "https://cb01.uno",
"filmpertutti": "https://filmpertuttiii.nuovo.live"
}
}

107
channels/0example.json.txt Normal file
View File

@@ -0,0 +1,107 @@
Rev:0.2
Update: 03-10-2019
#####################
Promemoria da cancellare pena la non visibilità del canale in KOD!!
#####################
le voci in settings sono state inserite per l'unico scopo
di velocizzare la scrittura del file
Vanno lasciate solo quelle voci il cui funzionamento sul
canale non vanno attivate.
"not_active": ["include_in_newest"], VA INSERITO nei canali che NON hanno nessuna voce newest.
Ovviamente va mantenuto tutto il codice di quell'id tra le {}
se vanno cancellati tutti deve rimanere la voce:
"settings": []
##################### Cancellare fino a qui!
{
"id": "nome del file .json",
"name": "Nome del canale visualizzato in KOD",
"language": ["ita", "sub-ita"],
"active": false,
"thumbnail": "",
"banner": "",
"categories": ["movie", "tvshow", "anime", "vos", "documentary"],
"not_active": ["include_in_newest"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "@70728",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero di link da verificare",
"default": 2,
"enabled": false,
"visible": "eq(-1,false)",
"lvalues": [ "3", "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "@30019",
"default": 0,
"enabled": false,
"visible": false,
"lvalues": ["Non Filtrare"]
}
],
"renumber": [
{
"id": "autorenumber",
"type": "bool",
"label": "@70712",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "autorenumber_mode",
"type": "bool",
"label": "@70688",
"default": false,
"enabled": false,
"visible": "eq(-1,false)"
}
]
}

269
channels/0example.py.txt Normal file
View File

@@ -0,0 +1,269 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per 'idcanale nel json'
# By: pincopallo!
# Eventuali crediti se vuoi aggiungerli
# ------------------------------------------------------------
# Rev: 0.2
# Update 12-10-2019
# fix:
# 1. aggiunto pagination e sistemate alcune voci
# 2. modificato problemi in eccezioni
# 3. aggiunta la def check
# 4. modifica alla legenda e altre aggiunte
# Questo vuole solo essere uno scheletro per velocizzare la scrittura di un canale.
# La maggior parte dei canali può essere scritta con il decoratore.
# I commenti sono più un promemoria... che una vera e propria spiegazione!
# Niente di più.
# Ulteriori informazioni sono reperibili nel wiki:
# https://github.com/kodiondemand/addon/wiki/decoratori
"""
Questi sono commenti per i beta-tester.
Su questo canale, nella categoria 'Ricerca Globale'
non saranno presenti le voci 'Aggiungi alla Videoteca'
e 'Scarica Film'/'Scarica Serie', dunque,
la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
Novità. Indicare in quale/i sezione/i è presente il canale:
- Nessuna, film, serie, anime...
Avvisi:
- Eventuali avvisi per i tester
Ulteriori info:
"""
# CANCELLARE Ciò CHE NON SERVE per il canale, lascia il codice commentato ove occorre,
# ma fare PULIZIA quando si è finito di testarlo
# Qui gli import
#import re
# per l'uso dei decoratori, per i log, e funzioni per siti particolari
from core import support
# in caso di necessità
#from core import scrapertools, httptools, servertools, tmdb
from core.item import Item # per newest
#from lib import unshortenit
##### fine import
# se il sito ha un link per ottenere l'url corretto in caso di oscuramenti
# la funzione deve ritornare l'indirizzo corretto, verrà chiamata solo se necessario (link primario irraggiungibile)
def findhost(url):
permUrl = httptools.downloadpage(url, follow_redirects=False).headers
if 'google' in permUrl['location']:
host = permUrl['location'].replace('https://www.google.it/search?q=site:', '')
else:
host = permUrl['location']
return host
# se si usa findhost metti in channels.json l'url del sito che contiene sempre l'ultimo dominio
host = config.get_channel_url(findhost)
# se non si usa metti direttamente l'url finale in channels.json
host = config.get_channel_url()
headers = [['Referer', host]]
### fine variabili
#### Inizio delle def principali ###
@support.menu
def mainlist(item):
support.info(item)
# Ordine delle voci
# Voce FILM, puoi solo impostare l'url
film = ['', # url per la voce FILM, se possibile la pagina principale con le ultime novità
#Voce Menu,['url','action','args',contentType]
('Al Cinema', ['', 'peliculas', '']),
('Generi', ['', 'genres', 'genres']),
('Per Lettera', ['', 'genres', 'letters']),
('Anni', ['', 'genres', 'years']),
('Qualità', ['', 'genres', 'quality']),
('Mi sento fortunato', ['', 'genres', 'lucky']),
('Popolari', ['', 'peliculas', '']),
('Sub-ITA', ['', 'peliculas', ''])
]
# Voce SERIE, puoi solo impostare l'url
tvshow = ['', # url per la voce Serie, se possibile la pagina con titoli di serie
#Voce Menu,['url','action','args',contentType]
('Novità', ['', '', '']),
('Per Lettera', ['', 'genres', 'letters']),
('Per Genere', ['', 'genres', 'genres']),
('Per anno', ['', 'genres', 'years'])
]
# Voce ANIME, puoi solo impostare l'url
anime = ['', # url per la voce Anime, se possibile la pagina con titoli di anime
#Voce Menu,['url','action','args',contentType]
('Novità', ['', '', '']),
('In Corso',['', '', '', '']),
('Ultimi Episodi',['', '', '', '']),
('Ultime Serie',['', '', '', ''])
]
"""
Eventuali Menu per voci non contemplate!
"""
# se questa voce non è presente il menu genera una voce
# search per ogni voce del menu. Es. Cerca Film...
search = '' # se alla funzione search non serve altro
# VOCE CHE APPARIRA' come prima voce nel menu di KOD!
# [Voce Menu,['url','action','args',contentType]
top = ([ '' ['', '', '', ''])
# Se vuoi creare un menu personalizzato o perchè gli altri non
# ti soddisfano
# [Voce Menu,['url','action','args',contentType]
nome = [( '' ['', '', '', ''])
return locals()
# Legenda known_keys per i groups nei patron
# known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality',
# 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang']
# url = link relativo o assoluto alla pagina titolo film/serie
# title = titolo Film/Serie/Anime/Altro
# title2 = titolo dell'episodio Serie/Anime/Altro
# season = stagione in formato numerico
# episode = numero episodio, in formato numerico.
# thumb = linkrealtivo o assoluto alla locandina Film/Serie/Anime/Altro
# quality = qualità indicata del video
# year = anno in formato numerico (4 cifre)
# duration = durata del Film/Serie/Anime/Altro
# genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia
# rating = punteggio/voto in formato numerico
# type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito
# lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA.
# AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!!
@support.scrape
def peliculas(item):
support.info(item)
#support.dbg() # decommentare per attivare web_pdb
action = ''
blacklist = ['']
patron = r''
patronBlock = r''
patronNext = ''
pagination = ''
#debug = True # True per testare le regex sul sito
return locals()
@support.scrape
def episodios(item):
support.info(item)
#support.dbg()
action = ''
blacklist = ['']
patron = r''
patronBlock = r''
patronNext = ''
pagination = ''
#debug = True
return locals()
# Questa def è utilizzata per generare i menu del canale
# per genere, per anno, per lettera, per qualità ecc ecc
@support.scrape
def genres(item):
support.info(item)
#support.dbg()
action = ''
blacklist = ['']
patron = r''
patronBlock = r''
patronNext = ''
pagination = ''
#debug = True
return locals()
############## Fine ordine obbligato
## Def ulteriori
# per quei casi dove il sito non differenzia film e/o serie e/o anime
# e la ricerca porta i titoli mischiati senza poterli distinguere tra loro
# andranno modificate anche le def peliculas e episodios ove occorre
def check(item):
support.info('select --->', item)
#support.dbg()
data = httptools.downloadpage(item.url, headers=headers).data
# pulizia di data, in caso commentare le prossime 2 righe
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
block = scrapertools.find_single_match(data, r'')
if re.findall('', data, re.IGNORECASE):
support.info('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
args='serie',
contentType='tvshow',
#data1 = data decommentando portiamo data nella def senza doverla riscaricare
))
############## Fondo Pagina
# da adattare al canale
def search(item, text):
support.info('search', item)
itemlist = []
text = text.replace(' ', '+')
item.url = host + '/index.php?do=search&story=%s&subaction=search' % (text)
# bisogna inserire item.contentType per la ricerca globale
# se il canale è solo film, si può omettere, altrimenti bisgona aggiungerlo e discriminare.
item.contentType = item.contentType
try:
return peliculas(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
info('search log:', line)
return []
# da adattare al canale
# inserire newest solo se il sito ha la pagina con le ultime novità/aggiunte
# altrimenti NON inserirlo
def newest(categoria):
support.info('newest ->', categoria)
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info('newest log: ', {0}.format(line))
return []
return itemlist
# da adattare...
# consultare il wiki sia per support.server che ha vari parametri,
# sia per i siti con hdpass
#support.server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True)
def findvideos(item):
support.info('findvideos ->', item)
return support.server(item, headers=headers)

20
channels/1337x.json Normal file
View File

@@ -0,0 +1,20 @@
{
"id": "1337x",
"name": "1337x",
"language": ["ita", "sub-ita", "eng"],
"active": true,
"thumbnail": "1337x.png",
"banner": "1337x.png",
"categories": ["movie", "tvshow", "torrent"],
"not_active": ["include_in_newest"],
"settings": [
{
"id": "itaSearch",
"type": "bool",
"label": "Cerca contenuti in italiano",
"default": false,
"enabled": true,
"visible": true
}
]
}

193
channels/1337x.py Normal file
View File

@@ -0,0 +1,193 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per 1337x
# ------------------------------------------------------------
import inspect
from core import support
from platformcode import logger, config
# host = support.config.get_channel_url()
host = 'https://www.1337x.to'
@support.menu
def mainlist(item):
menu = [('Film ITA {bullet bold}',['/movie-lib-sort/all/it/popularity/desc/all/1/', 'peliculas', '', 'movie']),
('Film {submenu}',['/movie-library/1/', 'peliculas', 'filter', 'movie']),
('Serie TV {bullet bold}',['/series-library/', 'az', '', 'tvshow'])]
search = ''
return locals()
def moviefilter(item):
if logger.testMode:
return host +'/movie-lib-sort/all/all/score/desc/all/1/'
from platformcode import platformtools
item.args = ''
controls = []
data = support.match(item).data
patronBlock = r'<select name="{}"[^>]+>(.+?)</select>'
patron = r'value="([^"]+)">([^<]+)'
genres = support.match(data, patronBlock=patronBlock.format('genre'), patron=patron).matches
years = support.match(data, patronBlock=patronBlock.format('year'), patron=patron).matches
langs = support.match(data, patronBlock=patronBlock.format('lang'), patron=patron).matches
sorts = support.match(data, patronBlock=patronBlock.format('sortby'), patron=patron).matches
orders = support.match(data, patronBlock=patronBlock.format('sort'), patron=patron).matches
item.genreValues = [x[0] for x in genres]
item.yearValues = [x[0] for x in years]
item.langValues = [x[0] for x in langs]
item.sortValues = [x[0] for x in sorts]
item.orderValues = [x[0] for x in orders]
genres = [g[1] for g in genres]
years = [g[1] for g in years]
langs = [g[1] for g in langs]
sorts = [g[1] for g in sorts]
orders = [g[1] for g in orders]
controls.append({'id': 'lang', 'label': 'Lingua', 'type': 'list', 'enabled':True, 'visible':True, 'lvalues':langs, 'default': 0})
controls.append({'id': 'genre', 'label': 'Genere', 'type': 'list', 'enabled':True, 'visible':True, 'lvalues':genres, 'default': 0})
controls.append({'id': 'year', 'label': 'Anno', 'type': 'list', 'enabled':True, 'visible':True, 'lvalues':years, 'default': 0})
controls.append({'id': 'sort', 'label': 'Anno', 'type': 'list', 'enabled':True, 'visible':True, 'lvalues':sorts, 'default': 0})
controls.append({'id': 'order', 'label': 'Anno', 'type': 'list', 'enabled':True, 'visible':True, 'lvalues':orders, 'default': 0})
return platformtools.show_channel_settings(list_controls=controls, item=item, caption='Filtro', callback='filtered')
def filtered(item, values):
genre = item.genreValues[values['genre']]
lang = item.langValues[values['lang']]
sortby = item.sortValues[values['sort']]
order = item.orderValues[values['order']]
year = item.yearValues[values['year']]
return '{}/movie-lib-sort/{}/{}/{}/{}/{}/1/'.format(host, genre, lang, sortby, order, year)
def az(item):
import string
itemlist = [item.clone(title='1-9', url=item.url +'num/1/', action='peliculas', thumbnail=support.thumb('az'))]
for letter in list(string.ascii_lowercase):
itemlist.append(item.clone(title=letter.upper(), url=item.url + letter +'/1/', action='peliculas', thumbnail=support.thumb('az')))
return itemlist
def search(item, text):
support.info('search', text)
item.args = 'search'
if config.get_setting('itaSearch', channel=item.channel, default=False):
text += ' ita'
text = text.replace(' ', '+')
item.url = '{}/search/{}/1/'.format(host, text)
try:
return peliculas(item)
# Cattura la eccezione così non interrompe la ricerca globle se il canale si rompe!
except:
import sys
for line in sys.exc_info():
support.logger.error("search except: ", line)
return []
@support.scrape
def peliculas(item):
if item.args == 'filter':
item.url = moviefilter(item)
if not item.url:
data = ' '
else:
data = support.match(item).data
# debug = True
if item.args == 'search':
sceneTitle = 'undefined'
patron = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<(?:[^>]+>){3,7}(?P<seed>[^<]+)<(?:[^>]+>){6}(?P<size>[^<]+)<span'
patronNext = r'"([^"]+)">&gt;&gt;'
elif item.contentType == 'movie':
patron = r'<img[^>]+data-original="(?P<thumb>[^"]+)(?:[^>]+>){15}(?P<title>[^<]+).*?<p>(?P<plot>[^<]+).*?<a href="(?P<url>[^"]+)'
patronNext = r'"([^"]+)">&gt;&gt;'
else:
action = 'seasons'
patron = r'<img src="(?P<thumb>[^"]+)(?:[^>]+>){4}\s*<a href="(?P<url>[^"]+)[^>]+>(?P<title>[^<]+)'
if (item.args == 'search' or item.contentType != 'movie') and not support.stackCheck(['get_channel_results']):
patronNext = None
def itemlistHook(itemlist):
lastUrl = support.match(data, patron=r'href="([^"]+)">Last').match
if lastUrl:
currentPage = support.match(item.url, string=True, patron=r'/(\d+)/').match
nextPage = int(currentPage) + 1
support.nextPage(itemlist, item, next_page=item.url.replace('/{}'.format(currentPage), '/{}'.format(nextPage)), function_or_level='peliculas')
return itemlist
return locals()
@support.scrape
def seasons(item):
item.contentType = 'season'
action = 'episodios'
patron = r'<li>\s*<a href="(?P<url>[^"]+)[^>]+>\s*<img alt="[^"]*"\ssrc="(?P<thumb>[^"]+)(?:([^>]+)>){2}\s*(?P<title>\w+ (?P<season>\d+))'
return locals()
@support.scrape
def episodios(item):
patron = r'<img src="(?P<thumb>[^"]+)(?:[^>]+>){13}\s*(?P<season>\d+)x(?P<episode>\d+)\s*<span class="seperator">(?:[^>]+>){2}\s*<a href="(?P<url>[^"]+)">(?P<title>[^<]+)'
def itemlistHook(itemlist):
itemlist.reverse()
return itemlist
return locals()
def findvideos(item):
itemlist = []
item.disableAutoplay = True
if item.args == 'search':
itemlist.append(item.clone(server='torrent', action='play'))
else:
from lib.guessit import guessit
items = support.match(item.url, patron=r'<a href="([^"]+)">([^<]+)<(?:[^>]+>){3}([^<]+)<(?:[^>]+>){6}([^<]+)<span').matches
for url, title, seed, size in items:
parsedTitle = guessit(title)
title = support.scrapertools.unescape(parsedTitle.get('title', ''))
lang = ''
if parsedTitle.get('language'):
langs = parsedTitle.get('language')
if isinstance(langs, list):
lang = 'MULTI'
else:
lang = vars(langs).get('alpha3').upper()
if not (lang.startswith('MUL') or lang.startswith('ITA')):
subs = parsedTitle.get('subtitle_language')
if isinstance(subs, list):
lang = 'Multi-Sub'
else:
lang = vars(subs).get('alpha3').upper()
if lang:
title = '{} [{}]'.format(title, lang)
sizematch = support.match(size, patron='(\d+(?:\.\d+)?)\s* (\w+)').match
sizenumber = float(sizematch[0])
if sizematch[1].lower() == 'gb':
sizenumber = sizenumber * 1024
itemlist.append(item.clone(title = '{} [{} SEEDS] [{}]'.format(title, seed, size), seed=int(seed), size=sizenumber, url=host + url, server='torrent', action='play'))
itemlist.sort(key=lambda it: (it.seed, it.size), reverse=True)
Videolibrary = True if 'movie' in item.args else False
return support.server(item, itemlist=itemlist, Videolibrary=Videolibrary, Sorted=False)
def play(item):
from core import servertools
data = support.match(item.url, patron=r'href="(magnet[^"]+)').match
return servertools.find_video_items(item, data=data)

View File

@@ -1,16 +0,0 @@
{
"id": "LIKUOO",
"name": "LIKUOO",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://likuoo.video/files_static/images/logo.jpg",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,95 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'http://www.likuoo.video'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Pornstar" , action="categorias", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item_p">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedthumbnail = "https:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">&#187;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item">.*?'
patron += '<a href="([^"]+)" title="(.*?)">.*?'
patron += 'src="(.*?)".*?'
patron += '<div class="runtime">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
scrapedtime = scrapedtime.replace("m", ":").replace("s", " ")
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " +scrapedtitle
contentTitle = title
thumbnail = "https:" + scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">&#187;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videochannel=item.channel
return itemlist

View File

@@ -1,16 +0,0 @@
{
"id": "TXXX",
"name": "TXXX",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.txxx.com/images/desktop-logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,151 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urllib
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'http://www.txxx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="channel-thumb">.*?'
patron += '<a href="([^"]+)" title="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
patron += '<span>(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,num in matches:
scrapedplot = ""
scrapedurl = host + scrapedurl
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="lista", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" ,
text_color="blue", url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="categories-list__link" href="([^"]+)">.*?'
patron += '<span class="categories-list__name cat-icon" data-title="([^"]+)">.*?'
patron += '<span class="categories-list__badge">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,num in matches:
url = urlparse.urljoin(item.url,scrapedurl)
scrapedthumbnail = ""
scrapedplot = ""
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="lista", title=title , url=url ,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '</div>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
contentTitle = scrapedtitle
scrapedhd = scrapertools.find_single_match(scrapedtime, '<span class="thumb__hd">(.*?)</span>')
duration = scrapertools.find_single_match(scrapedtime, '<span class="thumb__duration">(.*?)</span>')
if scrapedhd != '':
title = "[COLOR yellow]" +duration+ "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
else:
title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=title) )
next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next.*?" href="([^"]+)" title="Next Page"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
partes = video_url.split('||')
video_url = decode_url(partes[0])
video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url)
video_url += '&' if '?' in video_url else '?'
video_url += 'lip=' + partes[2] + '&lt=' + partes[3]
itemlist.append(item.clone(action="play", title=item.title, url=video_url))
return itemlist
def decode_url(txt):
_0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~'
reto = ''; n = 0
# En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes)
txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt)
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M')
while n < len(txt):
a = _0x52f6x15.index(txt[n])
n += 1
b = _0x52f6x15.index(txt[n])
n += 1
c = _0x52f6x15.index(txt[n])
n += 1
d = _0x52f6x15.index(txt[n])
n += 1
a = a << 2 | b >> 4
b = (b & 15) << 4 | c >> 2
e = (c & 3) << 6 | d
reto += chr(a)
if c != 64: reto += chr(b)
if d != 64: reto += chr(e)
return urllib.unquote(reto)

View File

@@ -1,15 +0,0 @@
{
"id": "absoluporn",
"name": "absoluporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.absoluporn.es/image/deco/logo.gif",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,96 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'http://www.absoluporn.es'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/wall-date-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas valorados" , action="lista", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/wall-main-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="lista", url=host + "/wall-time-1.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search-%s-1.html" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '&nbsp;<a href="([^"]+)" class="link1">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace(".html", "_date.html")
scrapedurl = host +"/" + scrapedurl
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = '<div class="thumb-main-titre"><a href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += '<div class="time">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=thumbnail, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'servervideo = \'([^\']+)\'.*?'
patron += 'path = \'([^\']+)\'.*?'
patron += 'filee = \'([^\']+)\'.*?'
matches = scrapertools.find_multiple_matches(data, patron)
for servervideo,path,filee in matches:
scrapedurl = servervideo + path + "56ea912c4df934c216c352fa8d623af3" + filee
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

11
channels/accuradio.json Normal file
View File

@@ -0,0 +1,11 @@
{
"id": "accuradio",
"name": "AccuRadio",
"active": true,
"language": ["*"],
"thumbnail": "accuradio.png",
"banner": "accuradio.png",
"categories": ["music"],
"not_active":["include_in_global_search"],
"settings" :[]
}

88
channels/accuradio.py Normal file
View File

@@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per accuradio
# ------------------------------------------------------------
import random
from core import httptools, support
from platformcode import logger
host = 'https://www.accuradio.com'
api_url = host + '/c/m/json/{}/'
headers = [['Referer', host]]
def mainlist(item):
itemlist = []
item.action = 'peliculas'
js = httptools.downloadpage(api_url.format('brands')).json
for it in js.get('features',[]):
itemlist.append(
item.clone(url= '{}/{}'.format(host,it.get('canonical_url','')),
title=support.typo(it['name'],'italic') + support.typo(it.get('channels',''),'_ [] color kod')
))
for it in js.get('brands',[]):
itemlist.append(
item.clone(url= '{}/{}'.format(host,it.get('canonical_url','')),
title=support.typo(it['name'],'bullet bold') + support.typo(it.get('channels',''),'_ [] color kod')
))
itemlist.append(item.clone(title=support.typo('Cerca...', 'bold color kod'), action='search', thumbnail=support.thumb('search')))
support.channel_config(item, itemlist)
return itemlist
@support.scrape
def peliculas(item):
# debug=True
action = 'playradio'
patron = r'data-id="(?P<id>[^"]+)"\s*data-oldid="(?P<oldid>[^"]+)".*?data-name="(?P<title>[^"]+)(?:[^>]+>){2}<img src="(?P<thumb>[^"]+)(?:[^>]+>){16}\s*(?P<plot>[^<]+)'
return locals()
def playradio(item):
import xbmcgui, xbmc
items = httptools.downloadpage('{}/playlist/json/{}/?ando={}&rand={}'.format(host, item.id, item.oldid, random.random())).json
playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
playlist.clear()
for i in items:
if 'id' in i:
url = i['primary'] + i['fn'] + '.m4a'
title = i['title']
artist = i['track_artist']
album = i['album']['title']
year = i['album']['year']
thumb = 'https://www.accuradio.com/static/images/covers300' + i['album']['cdcover']
duration = i.get('duration',0)
info = {'duration':duration,
'album':album,
'artist':artist,
'title':title,
'year':year,
'mediatype':'music'}
item = xbmcgui.ListItem(title, path=url)
item.setArt({'thumb':thumb, 'poster':thumb, 'icon':thumb})
item.setInfo('music',info)
playlist.add(url, item)
xbmc.Player().play(playlist)
def search(item, text):
support.info(text)
item.url = host + '/search/' + text
itemlist = []
try:
data = support.match(item.url).data
artists = support.match(data, patronBlock=r'artistResults(.*?)</ul', patron=r'href="(?P<url>[^"]+)"\s*>(?P<title>[^<]+)').matches
if artists:
for url, artist in artists:
itemlist.append(item.clone(title=support.typo(artist,'bullet bold'), thumbnail=support.thumb('music'), url=host+url, action='peliculas'))
item.data = data
itemlist += peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return itemlist

View File

@@ -1,14 +0,0 @@
{
"id": "alsoporn",
"name": "alsoporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://alsoporn.com/images/alsoporn.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,91 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'http://www.alsoporn.com'
def mainlist(item):
logger.info()
itemlist = []
# itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/en/g/All/new/1"))
itemlist.append( Item(channel=item.channel, title="Top" , action="lista", url=host + "/g/All/top/1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/=%s/" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)" />'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return sorted(itemlist, key=lambda i: i.title)
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="alsoporn_prev">.*?'
patron += '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<span>([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'([^\']+)\'')
data = httptools.downloadpage(item.url).data
scrapedurl1 = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
scrapedurl1 = scrapedurl1.replace("//www.playercdn.com/ec/i2.php?", "https://www.trinitytube.xyz/ec/i2.php?")
data = httptools.downloadpage(item.url).data
scrapedurl2 = scrapertools.find_single_match(data,'<source src="(.*?)"')
itemlist.append(item.clone(action="play", title=item.title, fulltitle = item.title, url=scrapedurl2))
return itemlist

View File

@@ -0,0 +1,11 @@
{
"id": "altadefinizione",
"name": "Altadefinizione",
"language": ["ita", "sub-ita"],
"active": true,
"thumbnail": "altadefinizione.png",
"banner": "altadefinizione.png",
"categories": ["movie", "tvshow", "vos"],
"settings": [],
"not_active": ["include_in_newest"]
}

136
channels/altadefinizione.py Normal file
View File

@@ -0,0 +1,136 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per altadefinizione
# ------------------------------------------------------------
from core import httptools, support, tmdb, scrapertools
from platformcode import config, logger
import re
def findhost(url):
host = support.match(url, patron=r'<h2[^>]+><a href="([^"]+)').match.rstrip('/')
permUrl = httptools.downloadpage(host, follow_redirects=False, only_headers=True).headers
if 'location' in permUrl.keys(): # handle redirection
return permUrl['location']
return host
host = config.get_channel_url(findhost)
headers = [['Referer', host]]
@support.menu
def mainlist(item):
menu = [('Film',['/category/film/', 'peliculas', '', 'movie']),
('Film al cinema {submenu}',['/category/ora-al-cinema/', 'peliculas', '', 'movie']),
('Generi',['', 'genres', '', 'undefined']),
('Saghe',['', 'genres', 'saghe', 'undefined']),
('Serie TV',['/category/serie-tv/', 'peliculas', '', 'tvshow']),
#('Aggiornamenti Serie TV', ['/aggiornamenti-serie-tv/', 'peliculas']) da fixare
]
search = ''
return locals()
@support.scrape
def genres(item):
action = 'peliculas'
blacklist = ['Scegli il Genere', 'Film', 'Serie Tv', 'Sub-Ita', 'Anime', "Non reperibile", 'Anime Sub-ITA', 'Prossimamente',]
wantSaga = True if item.args == 'saghe' else False
patronBlock = r'<div class=\"categories-buttons-container\"(?P<block>.*?)</div>'
if not wantSaga: # se non richiedo le sage carico le icone in automatico
patronMenu = r'<a href=\"(?P<url>https:\/\/.*?)\".*?>(?P<title>.*?)</a>'
else: # mantengo l'icona del padre
patron = r'<a href=\"(?P<url>https:\/\/.*?)\".*?>(?P<title>.*?)</a>'
def itemlistHook(itemlist):
itl = []
for item in itemlist:
isSaga = item.fulltitle.startswith('Saga')
if len(item.fulltitle) != 3:
if (isSaga and wantSaga) or (not isSaga and not wantSaga):
itl.append(item)
return itl
return locals()
def search(item, text):
item.url = "{}/?{}".format(host, support.urlencode({'s': text}))
item.args = 'search'
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("search except: %s" % line)
return []
@support.scrape
def peliculas(item):
if not item.args == 'search': # pagination not works
if not item.nextpage:
item.page = 1
else:
item.page = item.nextpage
if not item.parent_url:
item.parent_url = item.url
item.nextpage = item.page + 1
nextPageUrl = "{}/page/{}".format(item.parent_url, item.nextpage)
resp = httptools.downloadpage(nextPageUrl, only_headers = True)
if (resp.code > 399): # no more elements
nextPageUrl = ''
else:
action = 'check'
patron= r'<article class=\"elementor-post.*?(<img .*?src=\"(?P<thumb>[^\"]+).*?)?<h1 class=\"elementor-post__title\".*?<a href=\"(?P<url>[^\"]+)\" >\s*(?P<title>[^<]+?)\s*(\((?P<lang>Sub-[a-zA-Z]+)*\))?\s*(\[(?P<quality>[A-Z]*)\])?\s*(\((?P<year>[0-9]{4})\))?\s+<'
return locals()
def episodios(item):
item.quality = ''
data = item.data if item.data else httptools.downloadpage(item.url).data
itemlist = []
for it in support.match(data, patron=[r'div class=\"single-season.*?(?P<id>season_[0-9]+).*?>Stagione:\s(?P<season>[0-9]+).*?(\s-\s(?P<lang>[a-zA-z]+?))?<']).matches:
block = support.match(data, patron = r'div id=\"'+ it[0] +'\".*?</div').match
for ep in support.match(block, patron=[r'<li><a href=\"(?P<url>[^\"]+).*?img\" src=\"(?P<thumb>[^\"]+).*?title\">(?P<episode>[0-9]+)\.\s+(?P<title>.*?)</span>']).matches:
itemlist.append(item.clone(contentType = 'episode',
action='findvideos',
thumb = ep[1],
title = support.format_longtitle(support.cleantitle(ep[3]), season = it[1], episode = ep[2], lang= it[3]),
url = ep[0], data = '')
)
support.check_trakt(itemlist)
support.videolibrary(itemlist, item)
if (config.get_setting('downloadenabled')):
support.download(itemlist, item)
return itemlist
def check(item):
item.data = httptools.downloadpage(item.url).data
if 'season-details' in item.data.lower():
item.contentType = 'tvshow'
return episodios(item)
else:
return findvideos(item)
def findvideos(item):
video_url = item.url
if item.contentType == 'movie':
video_url = support.match(item, patron=[r'<div class="video-wrapper">.*?<iframe src=\"(https://.*?)\"',
r'window.open\(\'([^\']+).*?_blank']).match
if (video_url == ''):
return []
itemlist = [item.clone(action="play", url=srv) for srv in support.match(video_url, patron='<div class="megaButton" meta-type="v" meta-link="([^"]+).*?(?=>)>').matches]
itemlist = support.server(item,itemlist=itemlist)
return itemlist

View File

@@ -1,62 +1,10 @@
{
"id": "altadefinizione01",
"name": "Altadefinizione01",
"language": ["ita"],
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/altadefinizione01.png",
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/altadefinizione01.png",
"categories": ["movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero di link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
"thumbnail": "altadefinizione01.png",
"banner": "altadefinizione01.png",
"categories": ["movie", "vos"],
"settings": []
}

View File

@@ -2,72 +2,137 @@
# ------------------------------------------------------------
# Canale per altadefinizione01
# ------------------------------------------------------------
from core import servertools, httptools, tmdb, scrapertoolsV2, support
from core.item import Item
from platformcode import logger, config
from specials import autoplay
#URL che reindirizza sempre al dominio corrente
#host = "https://altadefinizione01.to"
__channel__ = "altadefinizione01"
host = config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'rapidvideo', 'streamcherry', 'megadrive']
list_quality = ['default']
checklinks = config.get_setting('checklinks', 'altadefinizione01')
checklinks_number = config.get_setting('checklinks_number', 'altadefinizione01')
headers = [['Referer', host]]
blacklist_categorie = ['Altadefinizione01', 'Altadefinizione.to']
def mainlist(item):
support.log()
itemlist =[]
support.menu(itemlist, 'Al Cinema','peliculas',host+'/cinema/')
support.menu(itemlist, 'Ultimi Film Inseriti','peliculas',host)
support.menu(itemlist, 'Film Sub-ITA','peliculas',host+'/sub-ita/')
support.menu(itemlist, 'Film Ordine Alfabetico ','AZlist',host+'/catalog/')
support.menu(itemlist, 'Categorie Film','categories',host)
support.menu(itemlist, 'Cerca...','search')
"""
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
Eccezioni note che non superano il test del canale:
return itemlist
Avvisi:
- L'url si prende da questo file.
- è presente nelle novità-> Film.
Ulteriori info:
"""
from core import scrapertools, httptools, support
from core.item import Item
from platformcode import config, logger
def categories(item):
support.log(item)
itemlist = support.scrape(item,'<li><a href="([^"]+)">(.*?)</a></li>',['url','title'],headers,'Altadefinizione01',patron_block='<ul class="kategori_list">(.*?)</ul>',action='peliculas')
return support.thumb(itemlist)
# def findhost(url):
# data = httptools.downloadpage(url).data
# host = scrapertools.find_single_match(data, '<div class="elementor-button-wrapper"> <a href="([^"]+)"')
# return host
def AZlist(item):
support.log()
return support.scrape(item,r'<a title="([^"]+)" href="([^"]+)"',['title','url'],headers,patron_block=r'<div class="movies-letter">(.*?)<\/div>',action='peliculas_list')
host = config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
film = [
('Al Cinema', ['/cinema/', 'peliculas', 'pellicola']),
('Ultimi Aggiornati-Aggiunti', ['','peliculas', 'update']),
('Generi', ['', 'genres', 'genres']),
('Lettera', ['/catalog/a/', 'genres', 'orderalf']),
('Anni', ['', 'genres', 'years']),
('Sub-ITA', ['/sub-ita/', 'peliculas', 'pellicola'])
]
return locals()
@support.scrape
def peliculas(item):
support.info('peliculas', item)
## deflang = 'ITA'
action="findvideos"
patron = r'<div class="cover boxcaption"> +<h2>\s*<a href="(?P<url>[^"]+)">(?P<title>[^<]+).*?src="(?P<thumb>[^"]+).*?<div class="trdublaj">(?P<quality>[^<]+).*?<span class="ml-label">(?P<year>[0-9]+).*?<span class="ml-label">(?P<duration>[^<]+).*?<p>(?P<plot>[^<]+)'
patronNext = '<span>\d</span> <a href="([^"]+)">'
if item.args == "search":
patronBlock = r'</script> <div class="boxgrid caption">(?P<block>.*)<div id="right_bar">'
elif item.args == 'update':
patronBlock = r'<div class="widget-title">Ultimi Film Aggiunti/Aggiornati</div>(?P<block>.*?)<div id="alt_menu">'
patron = r'style="background-image:url\((?P<thumb>[^\)]+).+?<p class="h4">(?P<title>.*?)</p>[^>]+> [^>]+> [^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> [^>]+> [^>]+>[^>]+>(?P<year>\d{4})[^>]+>[^>]+> [^>]+>[^>]+>(?P<duration>\d+|N/A)?.+?>.*?(?:>Film (?P<lang>Sub ITA)</a></p> )?<p>(?P<plot>[^<]+)<.*?href="(?P<url>[^"]+)'
patronNext = '' # non ha nessuna paginazione
elif item.args == 'orderalf':
patron = r'<td class="mlnh-thumb"><a href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"' \
'.+?[^>]+>[^>]+ [^>]+[^>]+ [^>]+>(?P<title>[^<]+).*?[^>]+>(?P<year>\d{4})<' \
'[^>]+>[^>]+>(?P<quality>[A-Z]+)[^>]+> <td class="mlnh-5">(?P<lang>.*?)</td>'
else:
patronBlock = r'<div class="cover_kapsul ml-mask">(?P<block>.*)<div class="page_nav">'
# debug = True
return locals()
@support.scrape
def genres(item):
support.info('genres',item)
action = "peliculas"
blacklist = ['Altadefinizione01']
if item.args == 'genres':
patronBlock = r'<ul class="kategori_list">(?P<block>.*?)<div class="tab-pane fade" id="wtab2">'
patronMenu = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>'
elif item.args == 'years':
patronBlock = r'<ul class="anno_list">(?P<block>.*?)</li> </ul> </div>'
patronMenu = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>'
elif item.args == 'orderalf':
patronBlock = r'<div class="movies-letter">(?P<block>.*?)<div class="clearfix">'
patronMenu = '<a title=.*?href="(?P<url>[^"]+)"><span>(?P<title>.*?)</span>'
#debug = True
return locals()
@support.scrape
def orderalf(item):
support.info('orderalf',item)
action = 'findvideos'
patron = r'<td class="mlnh-thumb"><a href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"'\
'.+?[^>]+>[^>]+ [^>]+[^>]+ [^>]+>(?P<title>[^<]+).*?[^>]+>(?P<year>\d{4})<'\
'[^>]+>[^>]+>(?P<quality>[A-Z]+)[^>]+> <td class="mlnh-5">(?P<lang>.*?)</td>'
patronNext = r'<span>[^<]+</span>[^<]+<a href="(.*?)">'
return locals()
def search(item, text):
support.info(item, text)
itemlist = []
text = text.replace(" ", "+")
item.url = host + "/index.php?do=search&story=%s&subaction=search" % (text)
item.args = "search"
try:
return peliculas(item)
# Cattura la eccezione così non interrompe la ricerca globle se il canale si rompe!
except:
import sys
for line in sys.exc_info():
logger.error("search except: %s" % line)
return []
def newest(categoria):
# import web_pdb; web_pdb.set_trace()
support.log(categoria)
support.info(categoria)
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host
item.action = "peliculas"
item.contentType = 'movie'
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -77,77 +142,16 @@ def newest(categoria):
return itemlist
def search(item, texto):
support.log(texto)
item.url = "%s/index.php?do=search&story=%s&subaction=search" % (
host, texto)
try:
if item.extra == "movie":
return subIta(item)
if item.extra == "tvshow":
return peliculas_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
support.log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="cover_kapsul ml-mask".*?<a href="(.*?)">(.*?)<\/a>.*?<img .*?src="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())'
matches = scrapertoolsV2.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText, empty in matches:
info = scrapertoolsV2.find_multiple_matches(data, r'<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">')
infoLabels = {}
for infoLabels['year'], duration, scrapedplot, checkUrl in info:
if checkUrl == scrapedurl:
break
infoLabels['duration'] = int(duration.replace(' min', '')) * 60 # calcolo la durata in secondi
scrapedthumbnail = host + scrapedthumbnail
scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
fulltitle = scrapedtitle
if subDiv:
fulltitle += support.typo(subText + ' _ () color limegreen')
fulltitle += support.typo(scrapedquality.strip()+ ' _ [] color kod')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contenType,
contentTitle=scrapedtitle,
contentQuality=scrapedquality.strip(),
plot=scrapedplot,
title=fulltitle,
fulltitle=scrapedtitle,
show=scrapedtitle,
url=scrapedurl,
infoLabels=infoLabels,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.nextPage(itemlist,item,data,'<span>[^<]+</span>[^<]+<a href="(.*?)">')
return itemlist
def peliculas_list(item):
support.log()
item.fulltitle = ''
block = r'<tbody>(.*)<\/tbody>'
patron = r'<a href="([^"]+)" title="([^"]+)".*?> <img.*?src="([^"]+)".*?<td class="mlnh-3">([0-9]{4}).*?mlnh-4">([A-Z]+)'
return support.scrape(item,patron, ['url', 'title', 'thumb', 'year', 'quality'], patron_block=block)
def findvideos(item):
support.log()
support.info('findvideos', item)
data = httptools.downloadpage(item.url).data
iframe = support.match(data, patron='src="(http[^"]+)" frameborder=\"0\" allow=\"accelerometer; autoplay;').match
if iframe:
item.url = iframe
return support.server(item)
itemlist = support.server(item, headers=headers)
return itemlist
# TODO: verificare se si puo' reinsierire il trailer youtube
#itemlist = [item.clone(action="play", url=srv[0], quality=srv[1]) for srv in support.match(item, patron='<a href="#" data-link="([^"]+).*?<span class="d">([^<]+)').matches]
#itemlist = support.server(item, itemlist=itemlist, headers=headers)
#return itemlist

View File

@@ -1,76 +0,0 @@
{
"id": "altadefinizione01_club",
"name": "Altadefinizione01 C",
"active": true,
"adult": false,
"language": ["ita"],
"fanart": "https://www.altadefinizione01.vision/templates/Darktemplate/images/logo.png",
"thumbnail": "https://www.altadefinizione01.vision/templates/Darktemplate/images/logo.png",
"banner": "https://www.altadefinizione01.vision/templates/Darktemplate/images/logo.png",
"categories": [
"movie"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Cerca informazioni extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_film",
"type": "bool",
"label": "Includi in Novità",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Non filtrare",
"IT"
]
}
]
}

View File

@@ -1,269 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel Altadefinizione01C Film -*-
# -*- Riscritto per KOD -*-
# -*- By Greko -*-
# -*- last change: 04/05/2019
from channelselector import get_thumb
from core import httptools, channeltools, scrapertools, servertools, tmdb, support
from core.item import Item
from platformcode import config, logger
from specials import autoplay, filtertools
__channel__ = "altadefinizione01_club"
host = config.get_channel_url(__channel__)
# ======== Funzionalità =============================
checklinks = config.get_setting('checklinks', __channel__)
checklinks_number = config.get_setting('checklinks_number', __channel__)
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream','openload','rapidvideo','streamango'] # per l'autoplay
list_quality = ['default'] #'rapidvideo', 'streamango', 'openload', 'streamcherry'] # per l'autoplay
# =========== home menu ===================
def mainlist(item):
"""
Creo il menu principale del canale
:param item:
:return: itemlist []
"""
logger.info("%s mainlist log: %s" % (__channel__, item))
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
# Menu Principale
support.menu(itemlist, 'Film Ultimi Arrivi bold', 'peliculas', host, args='pellicola')
support.menu(itemlist, 'Genere', 'categorie', host, args='genres')
support.menu(itemlist, 'Per anno submenu', 'categorie', host, args=['Film per Anno','years'])
support.menu(itemlist, 'Per lettera', 'categorie', host + '/catalog/a/', args=['Film per Lettera','orderalf'])
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host + '/cinema/', args='pellicola')
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host + '/sub-ita/', args='pellicola')
support.menu(itemlist, 'Cerca film submenu', 'search', host)
autoplay.show_option(item.channel, itemlist)
return itemlist
# ======== def in ordine di menu ===========================
# =========== def per vedere la lista dei film =============
def peliculas(item):
logger.info("%s mainlist peliculas log: %s" % (__channel__, item))
itemlist = []
# scarico la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# da qui fare le opportuni modifiche
if item.args != 'orderalf':
if item.args == 'pellicola' or item.args == 'years':
bloque = scrapertools.find_single_match(data, '<div class="cover boxcaption">(.*?)<div id="right_bar">')
elif item.args == "search":
bloque = scrapertools.find_single_match(data, '<div class="cover boxcaption">(.*?)</a>')
else:
bloque = scrapertools.find_single_match(data, '<div class="cover boxcaption">(.*?)<div class="page_nav">')
patron = '<h2>.<a href="(.*?)".*?src="(.*?)".*?class="trdublaj">(.*?)<div class="ml-item-hiden".*?class="h4">(.*?)<.*?label">(.*?)</span'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedimg, scrapedqualang, scrapedtitle, scrapedyear in matches:
if 'sub ita' in scrapedqualang.lower():
scrapedlang = 'Sub-Ita'
else:
scrapedlang = 'ITA'
itemlist.append(Item(
channel=item.channel,
action="findvideos",
contentTitle=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
infoLabels={'year': scrapedyear},
contenType="movie",
thumbnail=host+scrapedimg,
title= "%s [%s]" % (scrapedtitle, scrapedlang),
language=scrapedlang
))
# poichè il sito ha l'anno del film con TMDB la ricerca titolo-anno è esatta quindi inutile fare lo scrap delle locandine
# e della trama dal sito che a volte toppano
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
support.nextPage(itemlist,item,data,'<span>[^<]+</span>[^<]+<a href="(.*?)">')
return itemlist
# =========== def pagina categorie ======================================
def categorie(item):
logger.info("%s mainlist categorie log: %s" % (__channel__, item))
itemlist = []
# scarico la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# da qui fare le opportuni modifiche
if item.args == 'genres':
bloque = scrapertools.find_single_match(data, '<ul class="kategori_list">(.*?)</ul>')
patron = '<li><a href="/(.*?)">(.*?)</a>'
elif item.args[1] == 'years':
bloque = scrapertools.find_single_match(data, '<ul class="anno_list">(.*?)</ul>')
patron = '<li><a href="/(.*?)">(.*?)</a>'
elif item.args[1] == 'orderalf':
bloque = scrapertools.find_single_match(data, '<div class="movies-letter">(.*)<div class="clearfix">')
patron = '<a title=.*?href="(.*?)"><span>(.*?)</span>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapurl, scraptitle in sorted(matches):
if "01" in scraptitle:
continue
else:
scrapurl = host+scrapurl
if item.args[1] != 'orderalf': action = "peliculas"
else: action = 'orderalf'
itemlist.append(Item(
channel=item.channel,
action= action,
title = scraptitle,
url= scrapurl,
thumbnail = get_thumb(scraptitle, auto = True),
extra = item.extra,
))
return itemlist
# =========== def pagina lista alfabetica ===============================
def orderalf(item):
logger.info("%s mainlist orderalf log: %s" % (__channel__, item))
itemlist = []
# scarico la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# da qui fare le opportuni modifiche
patron = '<td class="mlnh-thumb"><a href="(.*?)".title="(.*?)".*?src="(.*?)".*?mlnh-3">(.*?)<.*?"mlnh-5">.<(.*?)<td' #scrapertools.find_single_match(data, '<td class="mlnh-thumb"><a href="(.*?)".title="(.*?)".*?src="(.*?)".*?mlnh-3">(.*?)<.*?"mlnh-5">.<(.*?)<td')
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedimg, scrapedyear, scrapedqualang in matches:
if 'sub ita' in scrapedqualang.lower():
scrapedlang = 'Sub-ita'
else:
scrapedlang = 'ITA'
itemlist.append(Item(
channel=item.channel,
action="findvideos_film",
contentTitle=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
infoLabels={'year': scrapedyear},
contenType="movie",
thumbnail=host+scrapedimg,
title = "%s [%s]" % (scrapedtitle, scrapedlang),
language=scrapedlang,
context="buscar_trailer"
))
# se il sito permette l'estrazione dell'anno del film aggiungere la riga seguente
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
support.nextPage(itemlist,item,data,'<span>[^<]+</span>[^<]+<a href="(.*?)">')
return itemlist
# =========== def pagina del film con i server per verderlo =============
def findvideos(item):
logger.info("%s mainlist findvideos_film log: %s" % (__channel__, item))
itemlist = []
# scarico la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# da qui fare le opportuni modifiche
patron = '<a href="#" data-link="(.*?)">'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
logger.info("altadefinizione01_club scrapedurl log: %s" % scrapedurl)
try:
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
logger.info("Videoitemlist2: %s" % videoitem)
videoitem.title = "%s [%s]" % (item.contentTitle, videoitem.title)
videoitem.show = item.show
videoitem.contentTitle = item.contentTitle
videoitem.contentType = item.contentType
videoitem.channel = item.channel
videoitem.year = item.infoLabels['year']
videoitem.infoLabels['plot'] = item.infoLabels['plot']
except AttributeError:
logger.error("data doesn't contain expected URL")
# Controlla se i link sono validi
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
# Aggiunge alla videoteca
if item.extra != 'findvideos' and item.extra != "library" and config.get_videolibrary_support() and len(itemlist) != 0 :
support.videolibrary(itemlist, item)
return itemlist
# =========== def per cercare film/serietv =============
#http://altadefinizione01.link/index.php?do=search&story=avatar&subaction=search
def search(item, text):
logger.info("%s mainlist search log: %s %s" % (__channel__, item, text))
itemlist = []
text = text.replace(" ", "+")
item.url = host + "/index.php?do=search&story=%s&subaction=search" % (text)
#item.extra = "search"
try:
return peliculas(item)
# Cattura la eccezione così non interrompe la ricerca globle se il canale si rompe!
except:
import sys
for line in sys.exc_info():
logger.error("%s Sono qua: %s" % (__channel__, line))
return []
# =========== def per le novità nel menu principale =============
def newest(categoria):
logger.info("%s mainlist newest log: %s %s %s" % (__channel__, categoria))
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,104 +0,0 @@
{
"id": "altadefinizione01_link",
"name": "Altadefinizione01 L",
"active": true,
"adult": false,
"language": ["ita"],
"fanart": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
"thumbnail": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
"banner": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
"fix" : "reimpostato url e modificato file per KOD",
"change_date": "2019-30-04",
"categories": [
"movie",
"vosi"
],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "https://altadefinizione01.estate/",
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Cerca informazioni extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Non filtrare",
"ITA",
"vosi"
]
},
{
"id": "perfil",
"type": "list",
"label": "profilo dei colori",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,147 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel Altadefinizione01L Film - Serie -*-
# -*- By Greko -*-
import channelselector
from specials import autoplay
from core import servertools, support, jsontools
from core.item import Item
from platformcode import config, logger
__channel__ = "altadefinizione01_link"
# ======== def per utility INIZIO ============================
list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
list_quality = ['default']
host = config.get_setting("channel_host", __channel__)
headers = [['Referer', host]]
# =========== home menu ===================
def mainlist(item):
"""
Creo il menu principale del canale
:param item:
:return: itemlist []
"""
support.log()
itemlist = []
# Menu Principale
support.menu(itemlist, 'Novità bold', 'peliculas', host)
support.menu(itemlist, 'Film per Genere', 'genres', host, args='genres')
support.menu(itemlist, 'Film per Anno submenu', 'genres', host, args='years')
support.menu(itemlist, 'Film per Qualità submenu', 'genres', host, args='quality')
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host + '/film-del-cinema')
support.menu(itemlist, 'Popolari bold', 'peliculas', host + '/piu-visti.html')
support.menu(itemlist, 'Mi sento fortunato bold', 'genres', host, args='lucky')
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host + '/film-sub-ita/')
support.menu(itemlist, 'Cerca film submenu', 'search', host)
# per autoplay
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
support.channel_config(item, itemlist)
return itemlist
# ======== def in ordine di action dal menu ===========================
def peliculas(item):
support.log
itemlist = []
patron = r'class="innerImage">.*?href="([^"]+)".*?src="([^"]+)"'\
'.*?class="ml-item-title">([^<]+)</.*?class="ml-item-label"> (\d{4}) <'\
'.*?class="ml-item-label">.*?class="ml-item-label ml-item-label-.+?"> '\
'(.+?) </div>.*?class="ml-item-label"> (.+?) </'
listGroups = ['url', 'thumb', 'title', 'year', 'quality', 'lang']
patronNext = '<span>\d</span> <a href="([^"]+)">'
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patronNext=patronNext,
action='findvideos')
return itemlist
# =========== def pagina categorie ======================================
def genres(item):
support.log
itemlist = []
#data = httptools.downloadpage(item.url, headers=headers).data
action = 'peliculas'
if item.args == 'genres':
bloque = r'<ul class="listSubCat" id="Film">(.*?)</ul>'
elif item.args == 'years':
bloque = r'<ul class="listSubCat" id="Anno">(.*?)</ul>'
elif item.args == 'quality':
bloque = r'<ul class="listSubCat" id="Qualita">(.*?)</ul>'
elif item.args == 'lucky': # sono i titoli random nella pagina, cambiano 1 volta al dì
bloque = r'FILM RANDOM.*?class="listSubCat">(.*?)</ul>'
action = 'findvideos'
patron = r'<li><a href="([^"]+)">(.*?)<'
listGroups = ['url','title']
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patron_block = bloque,
action=action)
return itemlist
# =========== def per cercare film/serietv =============
#host+/index.php?do=search&story=avatar&subaction=search
def search(item, text):
support.log()
itemlist = []
text = text.replace(" ", "+")
item.url = host+"/index.php?do=search&story=%s&subaction=search" % (text)
try:
return peliculas(item)
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.info("%s mainlist search log: %s" % (__channel__, line))
return []
# =========== def per le novità nel menu principale =============
def newest(categoria):
support.log(categoria)
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def findvideos(item):
support.log()
itemlist = support.server(item, headers=headers)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,78 +1,11 @@
{
"id": "altadefinizioneclick",
"name": "AltadefinizioneClick",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneclick.png",
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneciclk.png",
"categories": ["movie","vosi"],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "https://altadefinizione.cloud",
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
"active": false,
"language": ["ita","sub-ita"],
"thumbnail": "altadefinizioneclick.png",
"bannermenu": "altadefinizioneciclk.png",
"categories": ["tvshow","movie","vos"],
"not_active":["include_in_newest_series"],
"settings": []
}

View File

@@ -1,112 +1,151 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per altadefinizioneclick
# Canale per Altadefinizione Click
# ----------------------------------------------------------
import re
from core import support
from platformcode import config, logger
from core import servertools, support
from core.item import Item
from platformcode import logger, config
from specials import autoplay
def findhost(url):
return support.match(url, patron=r'<div class="elementor-button-wrapper">\s*<a href="([^"]+)"').match
#host = config.get_setting("channel_host", 'altadefinizioneclick')
__channel__ = 'altadefinizioneclick'
host = config.get_channel_url(__channel__)
host = config.get_channel_url(findhost)
if host.endswith('/'):
host = host[:-1]
headers = {'Referer': host, 'x-requested-with': 'XMLHttpRequest'}
order = ['', 'i_piu_visti', 'i_piu_votati', 'i_piu_votati_dellultimo_mese', 'titolo_az', 'voto_imdb_piu_alto'][config.get_setting('order', 'altadefinizionecommunity')]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango', "vidoza", "thevideo", "okru", 'youtube']
list_quality = ['1080p']
checklinks = config.get_setting('checklinks', 'altadefinizioneclick')
checklinks_number = config.get_setting('checklinks_number', 'altadefinizioneclick')
headers = [['Referer', host]]
@support.menu
def mainlist(item):
support.log()
itemlist = []
logger.debug(item)
support.menu(itemlist, 'Film', 'peliculas', host + "/nuove-uscite/")
support.menu(itemlist, 'Per Genere submenu', 'menu', host, args='Film')
support.menu(itemlist, 'Per Anno submenu', 'menu', host, args='Anno')
support.menu(itemlist, 'Sub-ITA', 'peliculas', host + "/sub-ita/")
support.menu(itemlist, 'Cerca...', 'search', host, 'movie')
support.aplay(item, itemlist,list_servers, list_quality)
support.channel_config(item, itemlist)
film = ['/type/movie',
('Generi', ['/type/movie', 'genres', 'genres']),
('Anni', ['/type/movie', 'genres', 'year']),]
return itemlist
tvshow = ['/serie-tv/tvshow',
('Generi', ['/serie-tv/tvshow', 'genres', 'genres']),
('Anni', ['/serie-tv/tvshow', 'genres', 'year'])]
return locals()
def search(item, texto):
support.log("search ", texto)
item.extra = 'search'
item.url = host + "/?s=" + texto
logger.debug("search ", texto)
item.args = 'search'
item.url = host + "/search?s={}&f={}&page=1".format(texto, item.contentType)
try:
return peliculas(item)
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
support.logger.error("%s" % line)
return []
def newest(categoria):
support.log(categoria)
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host + "/nuove-uscite/"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
@support.scrape
def genres(item):
logger.debug(item)
data = support.httptools.downloadpage(item.url, cloudscraper=True).data
blacklist= ['Film', 'Serie TV']
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
if item.args == 'genres':
categories ={}
res = support.match(host + '/cerca', patron=r'for="cat-(\d+)[^>]+>([^<]+)').matches
for _id, name in res:
categories[name] = _id
return itemlist
patronBlock = r'{}<span></span>(?P<block>.*?)</ul>\s*</li'.format('Film' if item.contentType == 'movie' else 'Serie TV')
patronMenu = r'<a href="[^"]+">(?P<title>[^<]+)'
def itemHook(it):
it.cat_id = categories[it.fulltitle]
return it
if item.args == 'year':
patron = r'value="(?P<year_id>[^"]+)"[^>]*>(?P<title>\d+)'
patronBlock = r'Anno</option>(?P<block>.*?</select>)'
elif item.args == 'quality':
patronMenu = r'quality/(?P<quality_id>[^"]+)">(?P<title>[^<]+)'
patronBlock = r'Risoluzione(?P<block>.*?)</ul>'
action = 'peliculas'
return locals()
def menu(item):
support.log()
itemlist = support.scrape(item, '<li><a href="([^"]+)">([^<]+)</a></li>', ['url', 'title'], headers, patron_block='<ul class="listSubCat" id="'+ str(item.args) + '">(.*?)</ul>', action='peliculas')
return support.thumb(itemlist)
@support.scrape
def peliculas(item):
support.log()
if item.extra == 'search':
patron = r'<a href="([^"]+)">\s*<div class="wrapperImage">(?:<span class="hd">([^<]+)<\/span>)?<img[^s]+src="([^"]+)"[^>]+>[^>]+>[^>]+>([^<]+)<[^<]+>(?:.*?IMDB:\s([^<]+)<\/div>)?'
elements = ['url', 'quality', 'thumb', 'title', 'rating']
item.quality = 'HD'
json = {}
params ={'type':item.contentType, 'anno':item.year_id, 'quality':item.quality_id, 'cat':item.cat_id, 'order':order}
if item.contentType == 'movie':
action = 'findvideos'
else:
patron = r'<img width[^s]+src="([^"]+)[^>]+><\/a>.*?<a href="([^"]+)">([^(?:\]|<)]+)(?:\[([^\]]+)\])?<\/a>[^>]+>[^>]+>[^>]+>(?:\sIMDB\:\s([^<]+)<)?(?:.*?<span class="hd">([^<]+)<\/span>)?\s*<a'
elements =['thumb', 'url', 'title','lang', 'rating', 'quality']
itemlist = support.scrape(item, patron, elements, headers, patronNext='<a class="next page-numbers" href="([^"]+)">')
return itemlist
action = 'episodios'
if not item.page: item.page = 1
try:
# support.dbg()
if item.args in ['search']:
page = support.httptools.downloadpage(item.url, headers=headers)
if page.json:
data = "\n".join(page.json['data'])
else:
data = page.data
else:
params['page'] = item.page
url = '{}/load-more-film?{}'.format(host, support.urlencode(params))
json = support.httptools.downloadpage(url, headers=headers).json
data = "\n".join(json['data'])
except:
data = ' '
patron = r'wrapFilm">\s*<a href="(?P<url>[^"]+)">[^>]+>(?P<year>\d+)(?:[^>]+>){2}(?P<rating>[^<]+)(?:[^>]+>){4}\s*<img src="(?P<thumb>[^"]+)(?:[^>]+>){3}(?P<title>[^<[]+)(?:\[(?P<lang>[sSuUbBiItTaA-]+))?'
# patron = r'wrapFilm">\s*<a href="(?P<url>[^"]+)">[^>]+>(?P<year>\d+)(?:[^>]+>){2}(?P<rating>[^<]+)(?:[^>]+>){2}(?P<quality>[^<]+)(?:[^>]+>){2}\s*<img src="(?P<thumb>[^"]+)(?:[^>]+>){3}(?P<title>[^<[]+)(?:\[(?P<lang>[sSuUbBiItTaA-]+))?'
# paginazione
if json.get('have_next') or 'have_next_film=true' in data:
def fullItemlistHook(itemlist):
cat_id = support.match(data, patron=r''''cat':"(\d+)"''').match
if cat_id: item.cat_id = cat_id
item.page += 1
support.nextPage(itemlist, item, function_or_level='peliculas')
return itemlist
return locals()
@support.scrape
def episodios(item):
logger.debug(item)
# debug = True
data = item.data
patron = r'class="playtvshow "\s+data-href="(?P<url>[^"]+)'
def itemHook(it):
spl = it.url.split('/')[-2:]
it.infoLabels['season'] = int(spl[0])+1
it.infoLabels['episode'] = int(spl[1])+1
it.url = it.url.replace('/watch-unsubscribed', '/watch-external')
it.title = '{}x{:02d} - {}'.format(it.contentSeason, it.contentEpisodeNumber, it.fulltitle)
return it
return locals()
def findvideos(item):
support.log()
itemlist = []
playWindow = support.match(item, patron='(?:playWindow|iframe)" (?:href|src)="([^"]+)').match
if host in playWindow:
url = support.match(playWindow, patron='allowfullscreen[^<]+src="([^"]+)"').match
else:
url = playWindow
itemlist.append(item.clone(action='play', url=url, quality=''))
itemlist = support.hdpass_get_servers(item)
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item ,'color kod bold')
return itemlist
return support.server(item, itemlist=itemlist)

View File

@@ -0,0 +1,37 @@
{
"id": "altadefinizionecommunity",
"name": "Altadefinizione Community",
"language": ["ita", "sub-ita"],
"active": false,
"thumbnail": "altadefinizionecommunity.png",
"banner": "",
"categories": ["movie", "tvshow", "vos"],
"not_active": ["include_in_newest"],
"settings": [
{
"default": "",
"enabled": true,
"id": "username",
"label": "username",
"type": "text",
"visible": true
},
{
"default": "",
"enabled": true,
"id": "password",
"label": "password",
"type": "text",
"visible": true
},
{
"id": "order",
"type": "list",
"label": "Ordine di Visualizzazione",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [ "Nessuno", "I più visti", "I più votati", "I più votati dell'ultimo mese", "Titolo A-Z", "Voto IMDB più alto"]
}
]
}

View File

@@ -0,0 +1,273 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Altadefinizione Community
from core import support
from lib.fakeMail import Gmailnator
from platformcode import config, platformtools, logger
from core import scrapertools, httptools
def findhost(url):
return support.match(url, patron=r'<a href="([^"]+)/\w+">Accedi').match
host = config.get_channel_url(findhost)
register_url = 'https://altaregistrazione.net'
if 'altadefinizionecommunity' not in host:
config.get_channel_url(findhost, forceFindhost=True)
if host.endswith('/'):
host = host[:-1]
headers = {'Referer': host}
order = ['', 'i_piu_visti', 'i_piu_votati', 'i_piu_votati_dellultimo_mese', 'titolo_az', 'voto_imdb_piu_alto'][config.get_setting('order', 'altadefinizionecommunity')]
@support.menu
def mainlist(item):
logger.debug(item)
film = ['/type/movie',
('Generi', ['/type/movie', 'genres', 'genres']),
('Anni', ['/type/movie', 'genres', 'year']),]
tvshow = ['/serie-tv/tvshow',
('Generi', ['/serie-tv/tvshow', 'genres', 'genres']),
('Anni', ['/serie-tv/tvshow', 'genres', 'year'])]
return locals()
def search(item, text):
logger.debug("search ", text)
# per evitare fastidi da ricerca globale
if not item.globalsearch:
registerOrLogin()
item.args = 'search'
item.url = host + "/search?s={}&f={}".format(text.replace(' ', '+'), item.contentType)
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
@support.scrape
def genres(item):
registerOrLogin()
logger.debug(item)
data = support.httptools.downloadpage(item.url).data
blacklist= ['Film', 'Serie TV']
if item.args == 'genres':
categories ={}
res = support.match(host + '/cerca', patron=r'for="cat-(\d+)[^>]+>([^<]+)').matches
for _id, name in res:
categories[name] = _id
patronBlock = r'{}<span></span>(?P<block>.*?)</ul>\s*</li'.format('Film' if item.contentType == 'movie' else 'Serie TV')
patronMenu = r'<a href="[^"]+">(?P<title>[^<]+)'
def itemHook(it):
it.cat_id = categories[it.fulltitle]
return it
if item.args == 'year':
patronMenu = r'value="(?P<year_id>[^"]+)"[^>]*>(?P<title>\d+)'
patronBlock = r'Anno</option>(?P<block>.*?</select>)'
elif item.args == 'quality':
patronMenu = r'quality/(?P<quality_id>[^"]+)">(?P<title>[^<]+)'
patronBlock = r'Risoluzione(?P<block>.*?)</ul>'
action = 'peliculas'
return locals()
@support.scrape
def peliculas(item):
item.quality = 'HD'
json = {}
if not item.page: item.page = 1
params ={'type':item.contentType, 'anno':item.year_id, 'quality':item.quality_id, 'cat':item.cat_id, 'order':order, 'page':item.page}
# debug = True
action = 'findvideos' if item.contentType == 'movie' else 'episodios'
try:
# support.dbg()
if item.args in ['search']:
page = support.httptools.downloadpage(item.url, headers=headers)
if page.json:
data = "\n".join(page.json['data'])
else:
data = page.data
else:
params['page'] = item.page
url = '{}/load-more-film?{}'.format(host, support.urlencode(params))
json = support.httptools.downloadpage(url, headers=headers).json
data = "\n".join(json['data'])
except:
data = ' '
patron = r'wrapFilm"[^>]*>\s*<a href="(?P<url>[^"]+)">[^>]+>(?P<year>\d+)(?:[^>]+>){2}(?P<rating>[^<]+)(?:[^>]+>){4}\s*<img src="(?P<thumb>[^"]+)(?:[^>]+>){2,6}\s+<h3>(?P<title>[^<[]+)(?:\[(?P<lang>[sSuUbBiItTaA -]+))?'
# patron = r'wrapFilm">\s*<a href="(?P<url>[^"]+)">[^>]+>(?P<year>\d+)(?:[^>]+>){2}(?P<rating>[^<]+)(?:[^>]+>){4}\s*<img src="(?P<thumb>[^"]+)(?:[^>]+>){3}(?P<title>[^<[]+)(?:\[(?P<lang>[sSuUbBiItTaA-]+))?'
def itemHook(item):
item.quality = item.quality.replace('2K', 'HD').replace('4K', 'HD')
item.title = item.title.replace('2K', 'HD').replace('4K', 'HD')
return item
# paginazione
if json.get('have_next') or support.match(data, patron=r'have_next_film\s*=\s*true').match:
def fullItemlistHook(itemlist):
cat_id = support.match(data, patron=r''''cat':"(\d+)"''').match
if cat_id: item.cat_id = cat_id
item.page += 1
support.nextPage(itemlist, item, function_or_level='peliculas')
return itemlist
return locals()
@support.scrape
def episodios(item):
registerOrLogin()
logger.debug(item)
# debug = True
data = item.data
patron = r'class="playtvshow "\s+data-href="(?P<url>[^"]+)'
def itemHook(it):
spl = it.url.split('/')[-2:]
it.infoLabels['season'] = int(spl[0])+1
it.infoLabels['episode'] = int(spl[1])+1
it.url = it.url.replace('/watch-unsubscribed', '/watch-external')
it.title = '{}x{:02d} - {}'.format(it.contentSeason, it.contentEpisodeNumber, it.fulltitle)
return it
return locals()
def findvideos(item):
itemlist = []
resolve_url(item)
itemlist.append(item.clone(action='play', url=support.match(item.url, patron='allowfullscreen[^<]+src="([^"]+)"', cloudscraper=True).match, quality=''))
return support.server(item, itemlist=itemlist)
def play(item):
if host in item.url: # intercetto il server proprietario
# if registerOrLogin():
return support.get_jwplayer_mediaurl(support.httptools.downloadpage(item.url, cloudscraper=True).data, 'Diretto')
# else:
# platformtools.play_canceled = True
# return []
else:
return [item]
def resolve_url(item):
registerOrLogin()
if '/watch-unsubscribed' not in item.url and '/watch-external' not in item.url:
playWindow = support.match(support.httptools.downloadpage(item.url, cloudscraper=True).data, patron='playWindow" href="([^"]+)')
video_url = playWindow.match
item.data = playWindow.data
item.url = video_url.replace('/watch-unsubscribed', '/watch-external')
return item
def login():
r = support.httptools.downloadpage(host, cloudscraper=True)
Token = support.match(r.data, patron=r'name=\s*"_token"\s*value=\s*"([^"]+)', cloudscraper=True).match
if 'id="logged"' in r.data:
logger.info('Già loggato')
else:
logger.info('Login in corso')
post = {'_token': '',
'form_action':'login',
'email': config.get_setting('username', channel='altadefinizionecommunity'),
'password':config.get_setting('password', channel='altadefinizionecommunity')}
r = support.httptools.downloadpage(host + '/login', post=post, headers={'referer': host}, cloudscraper=True)
if r.code not in [200, 302] or 'Email o Password non validi' in r.data:
platformtools.dialog_ok('AltadefinizioneCommunity', 'Username/password non validi')
return False
return 'id="logged"' in r.data
def registerOrLogin():
if config.get_setting('username', channel='altadefinizionecommunity') and config.get_setting('password', channel='altadefinizionecommunity'):
if login():
return True
action = platformtools.dialog_yesno('AltadefinizioneCommunity',
'Questo server necessita di un account, ne hai già uno oppure vuoi tentare una registrazione automatica?',
yeslabel='Accedi', nolabel='Tenta registrazione', customlabel='Annulla')
if action == 1: # accedi
from specials import setting
from core.item import Item
user_pre = config.get_setting('username', channel='altadefinizionecommunity')
password_pre = config.get_setting('password', channel='altadefinizionecommunity')
setting.channel_config(Item(config='altadefinizionecommunity'))
user_post = config.get_setting('username', channel='altadefinizionecommunity')
password_post = config.get_setting('password', channel='altadefinizionecommunity')
if user_pre != user_post or password_pre != password_post:
return registerOrLogin()
else:
return []
elif action == 0: # tenta registrazione
import random
import string
logger.debug('Registrazione automatica in corso')
mailbox = Gmailnator()
randPsw = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(10))
logger.debug('email: ' + mailbox.address)
logger.debug('pass: ' + randPsw)
reg = platformtools.dialog_register(register_url, email=True, password=True, email_default=mailbox.address, password_default=randPsw)
if not reg:
return False
regPost = httptools.downloadpage(register_url, post={'email': reg['email'], 'password': reg['password']}, cloudscraper=True)
if regPost.url == register_url:
error = scrapertools.htmlclean(scrapertools.find_single_match(regPost.data, 'Impossibile proseguire.*?</div>'))
error = scrapertools.unescape(scrapertools.re.sub('\n\s+', ' ', error))
platformtools.dialog_ok('AltadefinizioneCommunity', error)
return False
if reg['email'] == mailbox.address:
if "L'indirizzo email risulta già registrato" in regPost.data:
# httptools.downloadpage(baseUrl + '/forgotPassword', post={'email': reg['email']})
platformtools.dialog_ok('AltadefinizioneCommunity', 'Indirizzo mail già utilizzato')
return False
mail = mailbox.waitForMail()
if mail:
checkUrl = scrapertools.find_single_match(mail.body, '<a href="([^"]+)[^>]+>Verifica').replace(r'\/', '/')
logger.debug('CheckURL: ' + checkUrl)
httptools.downloadpage(checkUrl, cloudscraper=True)
config.set_setting('username', mailbox.address, channel='altadefinizionecommunity')
config.set_setting('password', randPsw, channel='altadefinizionecommunity')
platformtools.dialog_ok('AltadefinizioneCommunity',
'Registrato automaticamente con queste credenziali:\nemail:' + mailbox.address + '\npass: ' + randPsw)
else:
platformtools.dialog_ok('AltadefinizioneCommunity', 'Impossibile registrarsi automaticamente')
return False
else:
platformtools.dialog_ok('AltadefinizioneCommunity', 'Hai modificato la mail quindi KoD non sarà in grado di effettuare la verifica in autonomia, apri la casella ' + reg['email']
+ ' e clicca sul link. Premi ok quando fatto')
logger.debug('Registrazione completata')
else:
return False
return True

View File

@@ -1,70 +0,0 @@
{
"id": "altadefinizionehd",
"name": "AltadefinizioneHD",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "https://altadefinizione.doctor/wp-content/uploads/2019/02/logo.png",
"bannermenu": "https://altadefinizione.doctor/wp-content/uploads/2019/02/logo.png",
"categories": ["tvshow","movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -1,264 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Altadefinizione HD
# ----------------------------------------------------------
import re
from channelselector import thumb
from core import httptools, scrapertools, servertools, tmdb
from core.item import Item
from platformcode import logger, config
from specials import autoplay
__channel__ = 'altadefinizionehd'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
list_servers = ['openload']
list_quality = ['default']
def mainlist(item):
logger.info("[altadefinizionehd.py] mainlist")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
action="video",
title="[B]Film[/B]",
url=host + '/movies/',
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="menu",
title="[B] > Film per Genere[/B]",
url=host,
extra='GENERE',
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="menu",
title="[B] > Film per Anno[/B]",
url=host,
extra='ANNO',
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="video",
title="Film Sub-Ita",
url=host + "/genre/sub-ita/",
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="video",
title="Film Rip",
url=host + "/genre/dvdrip-bdrip-brrip/",
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="video",
title="Film al Cinema",
url=host + "/genre/cinema/",
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="search",
extra="movie",
title="[COLOR blue]Cerca Film...[/COLOR]",
thumbnail=CercaThumbnail,
fanart=FilmFanart)]
autoplay.show_option(item.channel, itemlist)
itemlist = thumb(itemlist)
return itemlist
def menu(item):
logger.info("[altadefinizionehd.py] menu")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
logger.info("[altadefinizionehd.py] DATA"+data)
patron = r'<li id="menu.*?><a href="#">FILM PER ' + item.extra + r'<\/a><ul class="sub-menu">(.*?)<\/ul>'
logger.info("[altadefinizionehd.py] BLOCK"+patron)
block = scrapertools.find_single_match(data, patron)
logger.info("[altadefinizionehd.py] BLOCK"+block)
patron = r'<li id=[^>]+><a href="(.*?)">(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(block)
for url, title in matches:
itemlist.append(
Item(channel=item.channel,
action='video',
title=title,
url=url))
return itemlist
def newest(categoria):
logger.info("[altadefinizionehd.py] newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host
item.action = "video"
itemlist = video(item)
if itemlist[-1].action == "video":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def video(item):
logger.info("[altadefinizionehd.py] video")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
logger.info("[altadefinizionehd.py] Data" +data)
if 'archive-content' in data:
regex = r'<div id="archive-content".*?>(.*?)<div class="pagination'
else:
regex = r'<div class="items".*?>(.*?)<div class="pagination'
block = scrapertools.find_single_match(data, regex)
logger.info("[altadefinizionehd.py] Block" +block)
patron = r'<article .*?class="item movies">.*?<img src="([^"]+)".*?<span class="quality">(.*?)<\/span>.*?<a href="([^"]+)">.*?<h4>([^<]+)<\/h4>(.*?)<\/article>'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedthumb, scrapedquality, scrapedurl, scrapedtitle, scrapedinfo in matches:
title = scrapedtitle + " [" + scrapedquality + "]"
patron = r'IMDb: (.*?)<\/span> <span>(.*?)<\/span>.*?"texto">(.*?)<\/div>'
matches = re.compile(patron, re.DOTALL).findall(scrapedinfo)
logger.info("[altadefinizionehd.py] MATCHES" + str(matches))
for rating, year, plot in matches:
infoLabels = {}
infoLabels['Year'] = year
infoLabels['Rating'] = rating
infoLabels['Plot'] = plot
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=title,
fulltitle=scrapedtitle,
infoLabels=infoLabels,
url=scrapedurl,
thumbnail=scrapedthumb))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
patron = '<a class='+ "'arrow_pag'" + ' href="([^"]+)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="video",
title="[COLOR blue]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail=thumb()))
return itemlist
def search(item, texto):
logger.info("[altadefinizionehd.py] init texto=[" + texto + "]")
item.url = host + "/?s=" + texto
return search_page(item)
def search_page(item):
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<img src="([^"]+)".*?.*?<a href="([^"]+)">(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
patron = '<a class='+ "'arrow_pag'" + ' href="([^"]+)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="search_page",
title="[COLOR blue]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail=thumb()))
return itemlist
def findvideos(item):
data = httptools.downloadpage(item.url).data
patron = r"<li id='player-.*?'.*?class='dooplay_player_option'\sdata-type='(.*?)'\sdata-post='(.*?)'\sdata-nume='(.*?)'>.*?'title'>(.*?)</"
matches = re.compile(patron, re.IGNORECASE).findall(data)
itemlist = []
for scrapedtype, scrapedpost, scrapednume, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="play",
fulltitle=item.title + " [" + scrapedtitle + "]",
show=scrapedtitle,
title=item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]",
url=host + "/wp-admin/admin-ajax.php",
post=scrapedpost,
server=scrapedtitle,
nume=scrapednume,
type=scrapedtype,
extra=item.extra,
folder=True))
autoplay.start(itemlist, item)
return itemlist
def play(item):
import urllib
payload = urllib.urlencode({'action': 'doo_player_ajax', 'post': item.post, 'nume': item.nume, 'type': item.type})
data = httptools.downloadpage(item.url, post=payload).data
patron = r"<iframe.*src='(([^']+))'\s"
matches = re.compile(patron, re.IGNORECASE).findall(data)
url = matches[0][0]
url = url.strip()
data = httptools.downloadpage(url, headers=headers).data
itemlist = servertools.find_video_items(data=data)
return itemlist
NovitaThumbnail = "https://superrepo.org/static/images/icons/original/xplugin.video.moviereleases.png.pagespeed.ic.j4bhi0Vp3d.png"
GenereThumbnail = "https://farm8.staticflickr.com/7562/15516589868_13689936d0_o.png"
FilmFanart = "https://superrepo.org/static/images/fanart/original/script.artwork.downloader.jpg"
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
ListTxt = "[COLOR orange]Torna a video principale [/COLOR]"
AvantiTxt = config.get_localized_string(30992)
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
thumbnail = "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"

View File

@@ -1,15 +0,0 @@
{
"id": "analdin",
"name": "analdin",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.analdin.com/images/logo-retina.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,113 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'https://www.analdin.com/es'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/más-reciente/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/más-visto/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/mejor-valorado/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorías/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<strong class="popup-title">Canales</strong>(.*?)<strong>Models</strong>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><a class="item" href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
patron += 'src="([^"]+)".*?'
patron += '<div class="videos">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return sorted(itemlist, key=lambda i: i.title)
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="popup-video-link" href="([^"]+)".*?'
patron += 'thumb="([^"]+)".*?'
patron += '<div class="duration">(.*?)</div>.*?'
patron += '<strong class="title">\s*([^"]+)</strong>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtime,scrapedtitle in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=thumbnail, contentTitle = title))
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'video_url: \'([^\']+)\''
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl in matches:
url = scrapedurl
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
return itemlist

View File

@@ -0,0 +1,21 @@
{
"id": "animealtadefinizione",
"name": "AnimealtAdefinizione",
"active": false,
"language": ["ita", "sub-ita"],
"thumbnail": "animealtadefinizione.png",
"banner": "animealtadefinizione.png",
"categories": ["anime", "sub-ita"],
"default_off": ["include_in_newest"],
"settings": [
{
"id": "perpage",
"type": "list",
"label": "Elementi per pagina",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": ["20","30","40","50","60","70","80","90","100"]
}
]
}

View File

@@ -0,0 +1,134 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per animealtadefinizione
# ----------------------------------------------------------
from core import support
host = support.config.get_channel_url()
headers = [['Referer', host]]
perpage_list = ['20','30','40','50','60','70','80','90','100']
perpage = perpage_list[support.config.get_setting('perpage' , 'animealtadefinizione')]
epPatron = r'<td>\s*(?P<title>[^<]+)[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>\s*<img[^>]+/Streaming'
@support.menu
def mainlist(item):
anime=['/anime/',
('Tipo',['', 'menu', 'Anime']),
('Anno',['', 'menu', 'Anno']),
('Genere', ['', 'menu','Genere']),
('Ultimi Episodi',['', 'peliculas', 'last'])]
return locals()
@support.scrape
def menu(item):
action = 'peliculas'
patronBlock= r'<a href="' + host + r'/category/' + item.args.lower() + r'/">' + item.args + r'</a>\s*<ul class="sub-menu">(?P<block>.*?)</ul>'
patronMenu = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
return locals()
def search(item, texto):
support.info(texto)
item.search = texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
def newest(categoria):
support.info(categoria)
item = support.Item()
try:
if categoria == "anime":
item.url = host
item.args = "last"
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
@support.scrape
def peliculas(item):
if '/movie/' in item.url:
item.contentType = 'movie'
action='findvideos'
elif item.args == 'last':
item.contentType = 'episode'
action='episodios'
else:
item.contentType = 'tvshow'
action='episodios'
if item.search:
query = 's'
searchtext = item.search
else:
query='category_name'
searchtext = item.url.split('/')[-2]
if not item.pag: item.pag = 1
# debug = True
anime = True
data = support.match(host + '/wp-admin/admin-ajax.php', post='action=itajax-sort&loop=main+loop&location=&thumbnail=1&rating=1sorter=recent&columns=4&numarticles='+perpage+'&paginated='+str(item.pag)+'&currentquery%5B'+query+'%5D='+searchtext).data.replace('\\','')
patron = r'<a href="(?P<url>[^"]+)"><img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)" class="[^"]+" alt="" title="(?P<title>[^"]+?)\s+(?P<type>Movie)?\s*(?P<lang>Sub Ita|Ita)?\s*[sS]treaming'
typeContentDict = {'movie':['movie']}
typeActionDict = {'findvideos':['movie']}
def itemHook(item):
item.url = support.re.sub('episodio-[0-9-]+', '', item.url)
return item
def itemlistHook(itemlist):
if item.search:
itemlist = [ it for it in itemlist if ' Episodio ' not in it.title ]
if len(itemlist) == int(perpage):
item.pag += 1
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), action='peliculas'))
return itemlist
return locals()
@support.scrape
def episodios(item):
anime = True
# debug = True
pagination = int(perpage)
patron = epPatron
return locals()
def findvideos(item):
itemlist = []
if item.contentType == 'movie':
matches = support.match(item, patron=epPatron).matches
for title, url in matches:
# support.dbg()
get_video_list(item, url, title, itemlist)
else:
get_video_list(item, item.url, support.config.get_localized_string(30137), itemlist)
return support.server(item, itemlist=itemlist)
def get_video_list(item, url, title, itemlist):
if 'vvvvid' in url:
itemlist.append(item.clone(title='VVVVID', url=url, server='vvvvid', action='play'))
else:
from requests import get
if not url.startswith('http'): url = host + url
url = support.match(get(url).url, string=True, patron=r'file=([^$]+)').match
if 'http' not in url: url = 'http://' + url
itemlist.append(item.clone(title=title, url=url, server='directo', action='play'))
return itemlist

View File

@@ -2,19 +2,18 @@
"id": "animeforce",
"name": "AnimeForce",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://www.animeforce.org/wp-content/uploads/2013/05/logo-animeforce.png",
"banner": "http://www.animeforce.org/wp-content/uploads/2013/05/logo-animeforce.png",
"active": false,
"thumbnail": "animeforce.png",
"banner": "animeforce.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"label": "Includi in Ricerca Globale",
"default": false,
"enabled": false,
"visible": false
"visible": true
},
{
"id": "include_in_newest_anime",
@@ -25,12 +24,37 @@
"visible": true
},
{
"id": "include_in_newest_italiano",
"id": "checklinks",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
}
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "autorenumber",
"type": "bool",
"label": "@70712",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "autorenumber_mode",
"type": "bool",
"label": "@70688",
"default": false,
"enabled": true,
"visible": "eq(-1,true)"
}
]
}

View File

@@ -1,505 +1,162 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per http://animeinstreaming.net/
# Canale per AnimeForce
# ------------------------------------------------------------
import re
import urllib
import urlparse
from core import httptools, scrapertools, servertools, tmdb
from core.item import Item
from platformcode import config, logger
from servers.decrypters import adfly
__channel__ = "animeforge"
host = config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
from core import support
host = support.config.get_channel_url()
headers = [['Referer', host]]
PERPAGE = 20
# -----------------------------------------------------------------
@support.menu
def mainlist(item):
log("mainlist", "mainlist", item.channel)
itemlist = [Item(channel=item.channel,
action="lista_anime",
title="[COLOR azure]Anime [/COLOR]- [COLOR lightsalmon]Lista Completa[/COLOR]",
url=host + "/lista-anime/",
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="animeaggiornati",
title="[COLOR azure]Anime Aggiornati[/COLOR]",
url=host,
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="ultimiep",
title="[COLOR azure]Ultimi Episodi[/COLOR]",
url=host,
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca ...[/COLOR]",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
anime = ['/anime',
('In Corso',['/anime/anime-status/in-corso/', 'peliculas', 'status']),
('Completi',['/anime/anime-status/completo/', 'peliculas', 'status']),
('Genere',['/anime', 'submenu', 'genre']),
('Anno',['/anime', 'submenu', 'anime-year']),
('Tipologia',['/anime', 'submenu', 'anime-type']),
('Stagione',['/anime', 'submenu', 'anime-season']),
('Ultime Serie',['/category/anime/articoli-principali/','peliculas','last'])
]
return locals()
# =================================================================
@support.scrape
def submenu(item):
action = 'peliculas'
patronBlock = r'data-taxonomy="' + item.args + r'"(?P<block>.*?)</select'
patronMenu = r'<option class="level-\d+ (?P<url>[^"]+)"[^>]+>(?P<t>[^(]+)[^\(]+\((?P<num>\d+)'
def itemHook(item):
if not item.url.startswith('http'):
item.url = host + '/anime/' + item.args + '/' + item.url
item.title = support.typo(item.t, 'bold')
return item
return locals()
# -----------------------------------------------------------------
def newest(categoria):
log("newest", "newest" + categoria)
support.info(categoria)
itemlist = []
item = Item()
item = support.Item()
try:
if categoria == "anime":
item.contentType = 'tvshow'
item.url = host
item.action = "ultimiep"
itemlist = ultimiep(item)
if itemlist[-1].action == "ultimiep":
itemlist.pop()
# Continua la ricerca in caso di errore
item.args = 'newest'
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
support.logger.error("{0}".format(line))
return []
return itemlist
# =================================================================
# -----------------------------------------------------------------
def search(item, texto):
log("search", "search", item.channel)
item.url = host + "/?s=" + texto
def search(item, text):
support.info('search',text)
item.search = text
item.url = host + '/lista-anime/'
item.contentType = 'tvshow'
try:
return search_anime(item)
# Continua la ricerca in caso di errore
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
support.logger.error("%s" % line)
return []
# =================================================================
@support.scrape
def peliculas(item):
search = item.search
anime = True
action = 'check'
# -----------------------------------------------------------------
def search_anime(item):
log("search_anime", "search_anime", item.channel)
itemlist = []
patron = r'<a href="(?P<url>[^"]+)"[^>]+>\s*<img src="(?P<thumb>[^"]+)" alt="(?P<title>.*?)(?: Sub| sub| SUB|")'
data = httptools.downloadpage(item.url).data
if search:
patron = r'<a href="(?P<url>[^"]+)"\s*title="(?P<title>.*?)(?: Sub| sub| SUB|")'
patron = r'<a href="([^"]+)"><img.*?src="([^"]+)".*?title="([^"]+)".*?/>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.args == 'newest': item.action = 'findvideos'
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if "Sub Ita Download & Streaming" in scrapedtitle or "Sub Ita Streaming":
if 'episodio' in scrapedtitle.lower():
itemlist.append(episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail))
else:
scrapedtitle, eptype = clean_title(scrapedtitle, simpleClean=True)
cleantitle, eptype = clean_title(scrapedtitle)
patronNext = '<li class="page-item disabled">(?:[^>]+>){4}<a class="page-link" href="([^"]+)'
scrapedurl, total_eps = create_url(scrapedurl, cleantitle)
def itemHook(item):
if 'sub-ita' in item.url:
if item.args != 'newest': item.title = item.title + support.typo('Sub-ITA','_ [] color kod')
item.contentLanguage = 'Sub-ITA'
return item
itemlist.append(
Item(channel=item.channel,
action="episodios",
text_color="azure",
contentType="tvshow",
title=scrapedtitle,
url=scrapedurl,
fulltitle=cleantitle,
show=cleantitle,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Next Page
next_page = scrapertools.find_single_match(data, r'<link rel="next" href="([^"]+)"[^/]+/>')
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="search_anime",
text_bold=True,
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
return locals()
# =================================================================
# -----------------------------------------------------------------
def animeaggiornati(item):
log("animeaggiornati", "animeaggiornati", item.channel)
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if 'Streaming' in scrapedtitle:
cleantitle, eptype = clean_title(scrapedtitle)
# Creazione URL
scrapedurl, total_eps = create_url(scrapedurl, scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="episodios",
text_color="azure",
contentType="tvshow",
title=cleantitle,
url=scrapedurl,
fulltitle=cleantitle,
show=cleantitle,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def check(item):
m = support.match(item, headers=headers, patron=r'Tipologia[^>]+>\s*<a href="([^"]+)"')
item.data = m.data
if 'movie' in m.match:
item.contentType = 'movie'
return findvideos(item)
else:
return episodios(item)
# =================================================================
# -----------------------------------------------------------------
def ultimiep(item):
log("ultimiep", "ultimiep", item.channel)
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if 'Streaming' in scrapedtitle:
itemlist.append(episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# =================================================================
# -----------------------------------------------------------------
def lista_anime(item):
log("lista_anime", "lista_anime", item.channel)
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = r'<li>\s*<strong>\s*<a\s*href="([^"]+?)">([^<]+?)</a>\s*</strong>\s*</li>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapedplot = ""
scrapedthumbnail = ""
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
# Pulizia titolo
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
cleantitle, eptype = clean_title(scrapedtitle, simpleClean=True)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
text_color="azure",
contentType="tvshow",
title=cleantitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=cleantitle,
show=cleantitle,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="lista_anime",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
# =================================================================
# -----------------------------------------------------------------
@support.scrape
def episodios(item):
itemlist = []
anime = True
pagination = 50
data = item.data
data = httptools.downloadpage(item.url).data
patron = '<td style="[^"]*?">\s*.*?<strong>(.*?)</strong>.*?\s*</td>\s*<td style="[^"]*?">\s*<a href="([^"]+?)"[^>]+>\s*<img.*?src="([^"]+?)".*?/>\s*</a>\s*</td>'
matches = re.compile(patron, re.DOTALL).findall(data)
vvvvid_videos = False
for scrapedtitle, scrapedurl, scrapedimg in matches:
if 'nodownload' in scrapedimg or 'nostreaming' in scrapedimg:
continue
if 'vvvvid' in scrapedurl.lower():
if not vvvvid_videos: vvvvid_videos = True
itemlist.append(Item(title='I Video VVVVID Non sono supportati', text_color="red"))
continue
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title=scrapedtitle,
url=urlparse.urljoin(host, scrapedurl),
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=item.plot,
fanart=item.fanart,
thumbnail=item.thumbnail))
# Comandi di servizio
if config.get_videolibrary_support() and len(itemlist) != 0 and not vvvvid_videos:
itemlist.append(
Item(channel=item.channel,
title=config.get_localized_string(30161),
text_color="yellow",
text_bold=True,
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
if '<h6>Streaming</h6>' in data:
patron = r'<td style[^>]+>\s*.*?(?:<span[^>]+)?<strong>(?P<title>[^<]+)<\/strong>.*?<td style[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>(?P<episode>\d+)'
else:
patron = r'<a\s*href="(?P<url>[^"]+)"\s*title="(?P<title>[^"]+)"\s*class="btn btn-dark mb-1">(?P<episode>\d+)'
def itemHook(item):
support.info(item)
if item.url.startswith('//'): item.url= 'https:' + item.url
elif item.url.startswith('/'): item.url= 'https:/' + item.url
return item
action = 'findvideos'
return locals()
# ==================================================================
# -----------------------------------------------------------------
def findvideos(item):
logger.info("kod.animeforce findvideos")
support.info(item)
itemlist = []
if item.extra:
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'%s(.*?)</tr>' % item.extra)
url = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]*>')
if 'vvvvid' in url.lower():
itemlist = [Item(title='I Video VVVVID Non sono supportati', text_color="red")]
return itemlist
if 'http' not in url: url = "".join(['https:', url])
if item.data:
url = support.match(item.data, patron=r'<a\s*href="([^"]+)"\s*title="[^"]+"\s*class="btn btn-dark mb-1">').match
else:
url = item.url
if 'adf.ly' in url:
url = adfly.get_long_url(url)
elif 'bit.ly' in url:
url = httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get("location")
# if 'adf.ly' in item.url:
# from servers.decrypters import adfly
# url = adfly.get_long_url(item.url)
if 'animeforce' in url:
headers.append(['Referer', item.url])
data = httptools.downloadpage(url, headers=headers).data
itemlist.extend(servertools.find_video_items(data=data))
# elif 'bit.ly' in item.url:
# url = support.httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location")
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
url = url.split('&')[0]
data = httptools.downloadpage(url, headers=headers).data
patron = """<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
matches = re.compile(patron, re.DOTALL).findall(data)
headers.append(['Referer', url])
for video in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title,
url=video + '|' + urllib.urlencode(dict(headers)), folder=False))
else:
itemlist.extend(servertools.find_video_items(data=url))
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist
# else:
# url = host
# for u in item.url.split('/'):
# if u and 'animeforce' not in u and 'http' not in u:
# url += '/' + u
# ==================================================================
# if 'php?' in url:
# url = support.httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get("location")
# url = support.match(url, patron=r'class="button"><a href=(?:")?([^" ]+)', headers=headers).match
# else:
# if item.data: url = item.data
# if item.contentType == 'movie': url = support.match()
# url = support.match(url, patron=r'data-href="([^"]+)" target').match
# if not url: url = support.match(url, patron=[r'<source src=(?:")?([^" ]+)',r'name="_wp_http_referer" value="([^"]+)"']).match
# if url.startswith('//'): url = 'https:' + url
# elif url.startswith('/'): url = 'https:/' + url
url = support.match(url, patron=r'data-href="([^"]+)" target').match
if 'vvvvid' in url: itemlist.append(item.clone(action="play", title='VVVVID', url=url, server='vvvvid'))
else: itemlist.append(item.clone(action="play", title=support.config.get_localized_string(30137), url=url, server='directo'))
# =================================================================
# Funzioni di servizio
# -----------------------------------------------------------------
def scrapedAll(url="", patron=""):
data = httptools.downloadpage(url).data
MyPatron = patron
matches = re.compile(MyPatron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
return matches
# =================================================================
# -----------------------------------------------------------------
def create_url(url, title, eptype=""):
logger.info()
if 'download' not in url:
url = url.replace('-streaming', '-download-streaming')
total_eps = ""
if not eptype:
url = re.sub(r'episodio?-?\d+-?(?:\d+-|)[oav]*', '', url)
else: # Solo se è un episodio passa
total_eps = scrapertools.find_single_match(title.lower(), r'\((\d+)-(?:episodio|sub-ita)\)') # Questo numero verrà rimosso dall'url
if total_eps: url = url.replace('%s-' % total_eps, '')
url = re.sub(r'%s-?\d*-' % eptype.lower(), '', url)
url = url.replace('-fine', '')
return url, total_eps
# =================================================================
# -----------------------------------------------------------------
def clean_title(title, simpleClean=False):
logger.info()
title = title.replace("Streaming", "").replace("&", "")
title = title.replace("Download", "")
title = title.replace("Sub Ita", "")
cleantitle = title.replace("#038;", "").replace("amp;", "").strip()
if '(Fine)' in title:
cleantitle = cleantitle.replace('(Fine)', '').strip() + " (Fine)"
eptype = ""
if not simpleClean:
if "episodio" in title.lower():
eptype = scrapertools.find_single_match(title, "((?:Episodio?|OAV))")
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', title).strip()
if 'episodio' not in eptype.lower():
cleantitle = re.sub(r'Episodio?\s*\d+\s*(?:\(\d+\)|)\s*[\(OAV\)]*', '', cleantitle).strip()
if '(Fine)' in title:
cleantitle = cleantitle.replace('(Fine)', '')
return cleantitle, eptype
# =================================================================
# -----------------------------------------------------------------
def episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail):
scrapedtitle, eptype = clean_title(scrapedtitle, simpleClean=True)
cleantitle, eptype = clean_title(scrapedtitle)
# Creazione URL
scrapedurl, total_eps = create_url(scrapedurl, scrapedtitle, eptype)
epnumber = ""
if 'episodio' in eptype.lower():
epnumber = scrapertools.find_single_match(scrapedtitle.lower(), r'episodio?\s*(\d+)')
eptype += ":? %s%s" % (epnumber, (r" \(%s\):?" % total_eps) if total_eps else "")
extra = "<tr>\s*<td[^>]+><strong>(?:[^>]+>|)%s(?:[^>]+>[^>]+>|[^<]*|[^>]+>)</strong>" % eptype
item = Item(channel=item.channel,
action="findvideos",
contentType="tvshow",
title=scrapedtitle,
text_color="azure",
url=scrapedurl,
fulltitle=cleantitle,
extra=extra,
show=cleantitle,
thumbnail=scrapedthumbnail)
return item
# =================================================================
# -----------------------------------------------------------------
def scrapedSingle(url="", single="", patron=""):
data = httptools.downloadpage(url).data
paginazione = scrapertools.find_single_match(data, single)
matches = re.compile(patron, re.DOTALL).findall(paginazione)
scrapertools.printMatches(matches)
return matches
# =================================================================
# -----------------------------------------------------------------
def Crea_Url(pagina="1", azione="ricerca", categoria="", nome=""):
# esempio
# chiamate.php?azione=ricerca&cat=&nome=&pag=
Stringa = host + "/chiamate.php?azione=" + azione + "&cat=" + categoria + "&nome=" + nome + "&pag=" + pagina
log("crea_Url", Stringa)
return Stringa
# =================================================================
# -----------------------------------------------------------------
def log(funzione="", stringa="", canale=""):
logger.debug("[" + canale + "].[" + funzione + "] " + stringa)
# =================================================================
# =================================================================
# riferimenti di servizio
# -----------------------------------------------------------------
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
AnimeFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
CategoriaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
AvantiTxt = config.get_localized_string(30992)
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
return support.server(item, itemlist=itemlist)

View File

@@ -1,62 +0,0 @@
{
"id": "animeleggendari",
"name": "AnimePerTutti",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "animepertutti.png",
"bannermenu": "animepertutti.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare", "IT"]
}
]
}

View File

@@ -1,185 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per animeleggendari
# ------------------------------------------------------------
import re
from core import servertools, httptools, scrapertoolsV2, tmdb, support
from core.item import Item
from core.support import log, menu
from lib.js2py.host import jsfunctions
from platformcode import logger, config
from specials import autoplay, autorenumber
__channel__ = "animeleggendari"
host = config.get_channel_url(__channel__)
# Richiesto per Autoplay
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango']
list_quality = ['default']
checklinks = config.get_setting('checklinks', 'animeleggendari')
checklinks_number = config.get_setting('checklinks_number', 'animeleggendari')
def mainlist(item):
log()
itemlist = []
menu(itemlist, 'Anime Leggendari', 'peliculas', host + '/category/anime-leggendari/')
menu(itemlist, 'Anime ITA', 'peliculas', host + '/category/anime-ita/')
menu(itemlist, 'Anime SUB-ITA', 'peliculas', host + '/category/anime-sub-ita/')
menu(itemlist, 'Anime Conclusi', 'peliculas', host + '/category/serie-anime-concluse/')
menu(itemlist, 'Anime in Corso', 'peliculas', host + '/category/anime-in-corso/')
menu(itemlist, 'Genere', 'genres', host)
menu(itemlist, 'Cerca...', 'search')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
log(texto)
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def last_ep(item):
log('ANIME PER TUTTI')
return support.scrape(item, '<a href="([^"]+)">([^<]+)<', ['url','title'],patron_block='<ul class="mh-tab-content-posts">(.*?)<\/ul>', action='findvideos')
def newest(categoria):
log('ANIME PER TUTTI')
log(categoria)
itemlist = []
item = Item()
try:
if categoria == "anime":
item.url = host
item.action = "last_ep"
itemlist = last_ep(item)
if itemlist[-1].action == "last_ep":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def genres(item):
itemlist = support.scrape(item, '<a href="([^"]+)">([^<]+)<', ['url', 'title'], action='peliculas', patron_block=r'Generi.*?<ul.*?>(.*?)<\/ul>', blacklist=['Contattaci','Privacy Policy', 'DMCA'])
return support.thumb(itemlist)
def peliculas(item):
log()
itemlist = []
blacklist = ['top 10 anime da vedere']
matches, data = support.match(item, r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+')
for url, title, thumb in matches:
title = scrapertoolsV2.decodeHtmlentities(title.strip()).replace("streaming", "")
lang = scrapertoolsV2.find_single_match(title, r"((?:SUB ITA|ITA))")
videoType = ''
if 'movie' in title.lower():
videoType = ' - (MOVIE)'
if 'ova' in title.lower():
videoType = ' - (OAV)'
cleantitle = title.replace(lang, "").replace('(Streaming & Download)', '').replace('( Streaming & Download )', '').replace('OAV', '').replace('OVA', '').replace('MOVIE', '').strip()
if not videoType :
contentType="tvshow"
action="episodios"
else:
contentType="movie"
action="findvideos"
if not title.lower() in blacklist:
itemlist.append(
Item(channel=item.channel,
action=action,
contentType=contentType,
title=support.typo(cleantitle + videoType, 'bold') + support.typo(lang,'_ [] color kod'),
fulltitle=cleantitle,
show=cleantitle,
url=url,
thumbnail=thumb))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist)
support.nextPage(itemlist, item, data, r'<a class="next page-numbers" href="([^"]+)">')
return itemlist
def episodios(item):
log()
itemlist = []
data = httptools.downloadpage(item.url).data
block = scrapertoolsV2.find_single_match(data, r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(.*?)</span></a></div>')
itemlist.append(
Item(channel=item.channel,
action='findvideos',
contentType='episode',
title=support.typo('Episodio 1 bold'),
fulltitle=item.title,
url=item.url,
thumbnail=item.thumbnail))
if block:
matches = re.compile(r'<a href="([^"]+)".*?><span class="pagelink">(\d+)</span></a>', re.DOTALL).findall(data)
for url, number in matches:
itemlist.append(
Item(channel=item.channel,
action='findvideos',
contentType='episode',
title=support.typo('Episodio ' + number,'bold'),
fulltitle=item.title,
url=url,
thumbnail=item.thumbnail))
autorenumber.renumber(itemlist, item)
support.videolibrary
return itemlist
def findvideos(item):
log()
data = ''
matches = support.match(item, 'str="([^"]+)"')[0]
if matches:
for match in matches:
data += str(jsfunctions.unescape(re.sub('@|g','%', match)))
data += str(match)
log('DATA',data)
if 'animepertutti' in data:
log('ANIMEPERTUTTI!')
else:
data = ''
itemlist = support.server(item,data)
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
return itemlist

View File

@@ -2,69 +2,9 @@
"id": "animesaturn",
"name": "AnimeSaturn",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "animesaturn.png",
"banner": "animesaturn.png",
"categories": ["anime"],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "https://www.animesaturn.com",
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
"settings": []
}

View File

@@ -1,379 +1,194 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per AnimeSaturn
# Thanks to 4l3x87
# ----------------------------------------------------------
import re
import urlparse
from core import support
import channelselector
from core import httptools, tmdb, support, scrapertools, jsontools
from core.item import Item
from core.support import log
from platformcode import logger, config
from specials import autoplay, autorenumber
__channel__ = "animesaturn"
host = config.get_setting("channel_host", __channel__)
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'fembed', 'animeworld']
list_quality = ['default', '480p', '720p', '1080p']
host = support.config.get_channel_url()
__channel__ = 'animesaturn'
cookie = support.config.get_setting('cookie', __channel__)
headers = {'X-Requested-With': 'XMLHttpRequest', 'Cookie': cookie}
def get_cookie(data):
global cookie, headers
cookie = support.match(data, patron=r'document.cookie="([^\s]+)').match
support.config.set_setting('cookie', cookie, __channel__)
headers = [['Cookie', cookie]]
def get_data(item):
# support.dbg()
# url = support.match(item.url, headers=headers, follow_redirects=True, only_headers=True).url
data = support.match(item.url, headers=headers, follow_redirects=True).data
if 'ASCookie' in data:
get_cookie(data)
data = get_data(item)
return data
@support.menu
def mainlist(item):
log()
itemlist = []
support.menu(itemlist, 'Novità bold', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host, 'tvshow')
support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host)
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host, args=['tvshow', 'alfabetico'])
support.menu(itemlist, 'Cerca', 'search', host)
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
anime = ['/animelist?load_all=1&d=1',
('ITA',['', 'submenu', '/filter?language%5B0%5D=1']),
('SUB-ITA',['', 'submenu', '/filter?language%5B0%5D=0']),
('Più Votati',['/toplist','menu', 'top']),
('In Corso',['/animeincorso','peliculas','incorso']),
('Ultimi Episodi',['/fetch_pages.php?request=episodes&d=1','peliculas','updated'])]
return locals()
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×', 'x').replace('"', "'")
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_anime(item):
log()
itemlist = []
PERPAGE = 15
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
if '||' in item.url:
series = item.url.split('\n\n')
matches = []
for i, serie in enumerate(series):
matches.append(serie.split('||'))
else:
# Estrae i contenuti
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
matches = support.match(item, patron, headers=headers)[0]
scrapedplot = ""
scrapedthumbnail = ""
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = cleantitle(scrapedtitle).replace('(ita)', '(ITA)')
movie = False
showtitle = title
if '(ITA)' in title:
title = title.replace('(ITA)', '').strip()
showtitle = title
else:
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
infoLabels = {}
if 'Akira' in title:
movie = True
infoLabels['year'] = 1988
if 'Dragon Ball Super Movie' in title:
movie = True
infoLabels['year'] = 2019
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios" if movie == False else 'findvideos',
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=showtitle,
show=showtitle,
contentTitle=showtitle,
plot=scrapedplot,
contentType='episode' if movie == False else 'movie',
originalUrl=scrapedurl,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist)
# Paginazione
if len(matches) >= p * PERPAGE:
support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1)))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item):
log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)')
# movie or series
movie = scrapertools.find_single_match(data, r'\Episodi:</b>\s(\d*)\sMovie')
data = httptools.downloadpage(
host + "/loading_anime?anime_id=" + anime_id,
headers={
'X-Requested-With': 'XMLHttpRequest'
}).data
patron = r'<td style="[^"]+"><b><strong" style="[^"]+">(.+?)</b></strong></td>\s*'
patron += r'<td style="[^"]+"><a href="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
scrapedtitle = cleantitle(scrapedtitle)
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
scrapedtitle = '[B]' + scrapedtitle + '[/B]'
itemlist.append(
Item(
channel=item.channel,
action="findvideos",
contentType="episode",
title=scrapedtitle,
url=urlparse.urljoin(host, scrapedurl),
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=item.plot,
fanart=item.thumbnail,
thumbnail=item.thumbnail))
if ((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie) and item.contentType != 'movie':
item.url = itemlist[0].url
item.contentType = 'movie'
return findvideos(item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist, item)
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
log()
originalItem = item
if item.contentType == 'movie':
episodes = episodios(item)
if len(episodes) > 0:
item.url = episodes[0].url
itemlist = []
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
patron = r'<a href="([^"]+)"><div class="downloadestreaming">'
url = scrapertools.find_single_match(data, patron)
data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
itemlist = support.server(item, data=data)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def ultimiep(item):
log()
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
post = "page=%s" % p if p > 1 else None
data = httptools.downloadpage(
item.url, post=post, headers={
'X-Requested-With': 'XMLHttpRequest'
}).data
patron = r"""<a href='[^']+'><div class="locandina"><img alt="[^"]+" src="([^"]+)" title="[^"]+" class="grandezza"></div></a>\s*"""
patron += r"""<a href='([^']+)'><div class="testo">(.+?)</div></a>\s*"""
patron += r"""<a href='[^']+'><div class="testo2">(.+?)</div></a>"""
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle1, scrapedtitle2 in matches:
scrapedtitle1 = cleantitle(scrapedtitle1)
scrapedtitle2 = cleantitle(scrapedtitle2)
scrapedtitle = scrapedtitle1 + ' - ' + scrapedtitle2 + ''
title = scrapedtitle
showtitle = scrapedtitle
if '(ITA)' in title:
title = title.replace('(ITA)', '').strip()
showtitle = title
else:
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
itemlist.append(
Item(channel=item.channel,
contentType="episode",
action="findvideos",
title=title,
url=scrapedurl,
fulltitle=scrapedtitle1,
show=showtitle,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagine
patronvideos = r'data-page="(\d+)" title="Next">Pagina Successiva'
next_page = scrapertools.find_single_match(data, patronvideos)
if next_page:
support.nextPage(itemlist, item, next_page=(item.url + '{}' + next_page))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
log(categoria)
itemlist = []
item = Item()
item.url = host
item.extra = ''
def search(item, texto):
support.info(texto)
item.url = host + '/animelist?search=' + texto
item.contentType = 'tvshow'
try:
if categoria == "anime":
item.url = "%s/fetch_pages?request=episodios" % host
item.action = "ultimiep"
itemlist = ultimiep(item)
if itemlist[-1].action == "ultimiep":
itemlist.pop()
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
support.logger.error("%s" % line)
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search_anime(item, texto):
log(texto)
def newest(categoria):
support.info()
itemlist = []
data = httptools.downloadpage(host + "/index.php?search=1&key=%s" % texto).data
jsondata = jsontools.load(data)
for title in jsondata:
data = str(httptools.downloadpage("%s/templates/header?check=1" % host, post="typeahead=%s" % title).data)
if 'Anime non esistente' in data:
continue
else:
title = title.replace('(ita)', '(ITA)')
showtitle = title
if '(ITA)' in title:
title = title.replace('(ITA)', '').strip()
showtitle = title
else:
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
url = "%s/anime/%s" % (host, data)
itemlist.append(
Item(
channel=item.channel,
contentType="episode",
action="episodios",
title=title,
url=url,
fulltitle=title,
show=showtitle,
thumbnail=""))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
log(texto)
itemlist = []
item = support.Item()
try:
return search_anime(item, texto)
if categoria == "anime":
item.url = host + '/fetch_pages.php?request=episodes&d=1'
item.args = "updated"
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
support.logger.error("{0}".format(line))
return []
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def list_az(item):
log()
itemlist = []
alphabet = dict()
# Articoli
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
for letter in sorted(alphabet):
itemlist.append(
Item(channel=item.channel,
action="lista_anime",
url='\n\n'.join(alphabet[letter]),
title=letter,
fulltitle=letter))
return itemlist
# ================================================================================================================
@support.scrape
def submenu(item):
data = support.match(item.url + item.args).data
action = 'filter'
patronMenu = r'<h5 class="[^"]+">(?P<title>[^<]+)[^>]+>[^>]+>\s*<select id="(?P<parameter>[^"]+)"[^>]+>(?P<data>.*?)</select>'
def itemlistHook(itemlist):
itemlist.insert(0, item.clone(title=support.typo('Tutti','bold'), url=item.url + item.args, action='peliculas'))
return itemlist[:-1]
return locals()
def filter(item):
itemlist = []
matches = support.match(item.data if item.data else item.url, patron=r'<option value="(?P<value>[^"]+)"[^>]*>(?P<title>[^<]+)').matches
for value, title in matches:
itemlist.append(item.clone(title= support.typo(title,'bold'), url='{}{}&{}%5B0%5D={}'.format(host, item.args, item.parameter, value), action='peliculas', args='filter'))
support.thumb(itemlist, genre=True)
return itemlist
@support.scrape
def menu(item):
patronMenu = r'<div class="col-md-13 bg-dark-as-box-shadow p-2 text-white text-center">(?P<title>[^"<]+)<(?P<other>.*?)(?:"lista-top"|"clearfix")'
action = 'peliculas'
item.args = 'top'
def itemHook(item2):
item2.url = item.url
return item2
return locals()
@support.scrape
def peliculas(item):
anime = True
deflang= 'Sub-ITA'
action = 'check'
page = None
post = "page=" + str(item.page if item.page else 1) if item.page and int(item.page) > 1 else None
data = get_data(item)
# debug = True
if item.args == 'top':
data = item.other
patron = r'light">(?P<title2>[^<]+)</div>\s*(?P<title>[^<]+)[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)">(?:<a[^>]+>|\s*)<img.*?src="(?P<thumb>[^"]+)"'
else:
data = support.match(item, post=post, headers=headers).data
if item.args == 'updated':
page = support.match(data, patron=r'data-page="(\d+)" title="Next">').match
patron = r'<a href="(?P<url>[^"]+)" title="(?P<title>[^"(]+)(?:\s*\((?P<year>\d+)\))?(?:\s*\((?P<lang>[A-Za-z-]+)\))?">\s*<img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s\s*(?P<type>[^\s]+)\s*(?P<episode>\d+)'
typeContentDict = {'Movie':'movie', 'Episodio':'episode'} #item.contentType='episode'
action = 'findvideos'
def itemlistHook(itemlist):
if page:
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),page= page, thumbnail=support.thumb()))
return itemlist
elif 'filter' in item.args:
page = support.match(data, patron=r'totalPages:\s*(\d+)').match
patron = r'<a href="(?P<url>[^"]+)" title="(?P<title>[^"(]+)(?:\s*\((?P<year>\d+)\))?(?:\s*\((?P<lang>[A-Za-z-]+)\))?">\s*<img src="(?P<thumb>[^"]+)"'
def itemlistHook(itemlist):
if item.nextpage: item.nextpage += 1
else: item.nextpage = 2
if page and item.nextpage < int(page):
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), url= '{}&page={}'.format(item.url, item.nextpage), infoLabels={}, thumbnail=support.thumb()))
return itemlist
else:
# pagination = ''
if item.args == 'incorso':
patron = r'<a href="(?P<url>[^"]+)"[^>]+>(?P<title>[^<(]+)(?:\s*\((?P<year>\d+)\))?(?:\s*\((?P<lang>[A-za-z-]+)\))?</a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<plot>[^<]+)<'
else:
# debug=True
patron = r'<img src="(?P<thumb>[^"]+)" alt="(?P<title>[^"\(]+)(?:\((?P<lang>[Ii][Tt][Aa])\))?(?:\s*\((?P<year>\d+)\))?[^"]*"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<a class="[^"]+" href="(?P<url>[^"]+)">[^>]+>[^>]+>[^>]+>\s*<p[^>]+>(?:(?P<plot>[^<]+))?<'
return locals()
def check(item):
movie = support.match(item, patron=r'Episodi:</b> (\d*) Movie')
if movie.match:
episodes = episodios(item)
if len(episodes) > 0:
it = episodes[0].clone(contentType = 'movie', contentTitle=item.fulltitle, contentSerieName='')
return findvideos(it)
else:
item.contentType = 'tvshow'
return episodios(item)
@support.scrape
def episodios(item):
if item.contentType != 'movie': anime = True
patron = r'episodi-link-button">\s*<a href="(?P<url>[^"]+)"[^>]+>\s*(?P<title>[^\d<]+(?P<episode>\d+))\s*</a>'
return locals()
def findvideos(item):
support.info()
itemlist = []
links = []
main_url = support.match(item, patron=r'<a href="([^"]+)">[^>]+>[^>]+>G').match
urls = support.match(support.match(main_url, headers=headers).data, patron=r'<a class="dropdown-item"\s*href="([^"]+)', headers=headers).matches
itemlist.append(item.clone(action="play", title='Primario', url=main_url, server='directo'))
itemlist.append(item.clone(action="play", title='Secondario', url=main_url + '&s=alt', server='directo'))
for url in urls:
link = support.match(url, patron=r'<a href="([^"]+)"[^>]+><button', headers=headers).match
if link:
links.append(link)
return support.server(item, data=links, itemlist=itemlist)
def play(item):
if item.server == 'directo':
item.url = support.match(item.url, patron=r'(?:source type="[^"]+"\s*src=|file:[^"]+)"([^"]+)').match
return[item]

View File

@@ -1,52 +0,0 @@
{
"id": "animespace",
"name": "AnimeSpace",
"active": true,
"adult": false,
"language": [],
"thumbnail": "",
"banner": "",
"categories": [
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE"
]
},
{
"id": "checklinks_number",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,263 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel AnimeSpace -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import logger, config
from specials import autoplay
from specials import renumbertools
__channel__ = "animespace"
host = config.get_channel_url(__channel__)
checklinks = config.get_setting('checklinks', 'animespace')
checklinks_number = config.get_setting('checklinks_number', 'animespace')
IDIOMAS = {'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['directo', 'openload', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios",
action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True),
url=host))
itemlist.append(Item(channel=item.channel, title="Ultimas",
action="list_all",
thumbnail=get_thumb('last', auto=True),
url=host + '/emision'))
itemlist.append(Item(channel=item.channel, title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + '/animes'))
itemlist.append(Item(channel=item.channel, title="Anime",
action="list_all",
thumbnail=get_thumb('anime', auto=True),
url=host + '/categoria/anime'))
itemlist.append(Item(channel=item.channel, title="Películas",
action="list_all",
thumbnail=get_thumb('movies', auto=True),
url=host + '/categoria/pelicula'))
itemlist.append(Item(channel=item.channel, title="OVAs",
action="list_all",
thumbnail='',
url=host + '/categoria/ova'))
itemlist.append(Item(channel=item.channel, title="ONAs",
action="list_all",
thumbnail='',
url=host + '/categoria/ona'))
itemlist.append(Item(channel=item.channel, title="Especiales",
action="list_all",
thumbnail='',
url=host + '/categoria/especial'))
itemlist.append(Item(channel=item.channel, title="Buscar",
action="search",
url=host + '/search?q=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article.*?href="([^"]+)">.*?src="([^"]+)".*?'
patron += '<h3 class="Title">([^<]+)</h3>.*?"fecha">([^<]+)<.*?</i>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
type = type.strip().lower()
url = scrapedurl
thumbnail = scrapedthumbnail
lang = 'VOSE'
title = scrapedtitle
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
new_item= Item(channel=item.channel,
action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
language = lang,
infoLabels={'year':year}
)
if type != 'anime':
new_item.contentTitle=title
else:
new_item.plot=type
new_item.contentSerieName=title
new_item.context = context
itemlist.append(new_item)
# Paginacion
next_page = scrapertools.find_single_match(data,
'"page-item active">.*?</a>.*?<a class="page-link" href="([^"]+)">')
if next_page != "":
actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?')
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=actual_page + next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def new_episodes(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<section class="caps">.*?</section>')
patron = '<article.*?<a href="([^"]+)">.*?src="([^"]+)".*?'
patron += '<span class="episode">.*?</i>([^<]+)</span>.*?<h2 class="Title">([^<]+)</h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, epi, scrapedtitle in matches:
url = scrapedurl
lang = 'VOSE'
title = '%s - %s' % (scrapedtitle, epi)
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail,
action='findvideos', language=lang))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a class="item" href="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl in matches:
episode = scrapertools.find_single_match(scrapedurl, '.*?capitulo-(\d+)')
lang = 'VOSE'
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName)
url = scrapedurl
infoLabels['season'] = season
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
action='findvideos', language=lang, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
return itemlist
def findvideos(item):
import urllib
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id="Opt\d+">.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
server = ''
scrapedurl = scrapedurl.replace('&quot;', '')
new_data = get_source(scrapedurl)
if "/stream/" in scrapedurl:
scrapedurl = scrapertools.find_single_match(new_data, '<source src="([^"]+)"')
server = "directo"
else:
scrapedurl = scrapertools.find_single_match(scrapedurl, '.*?url=([^&]+)?')
scrapedurl = urllib.unquote(scrapedurl)
if scrapedurl != '':
itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play',
language = item.language, infoLabels=item.infoLabels, server=server))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def newest(categoria):
itemlist = []
item = Item()
if categoria == 'anime':
item.url=host
itemlist = new_episodes(item)
return itemlist

View File

@@ -1,37 +0,0 @@
{
"id": "animesubita",
"name": "AnimeSubIta",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "animesubita.png",
"bannermenu": "animesubita.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,343 +0,0 @@
# -*- coding: utf-8 -*-
# Ringraziamo Icarus crew
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per AnimeSubIta
# ------------------------------------------------------------
import re
import urllib
import urlparse
from core import httptools, scrapertools, tmdb, support
from core.item import Item
from platformcode import logger, config
__channel__ = "animesubita"
host = config.get_channel_url(__channel__)
PERPAGE = 20
# ----------------------------------------------------------------------------------------------------------------
def mainlist(item):
logger.info()
itemlist = [Item(channel=item.channel,
action="lista_anime_completa",
title=support.color("Lista Anime", "azure"),
url="%s/lista-anime/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="ultimiep",
title=support.color("Ultimi Episodi", "azure"),
url="%s/category/ultimi-episodi/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_anime",
title=support.color("Anime in corso", "azure"),
url="%s/category/anime-in-corso/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="categorie",
title=support.color("Categorie", "azure"),
url="%s/generi/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title=support.color("Cerca anime ...", "yellow"),
extra="anime",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")
]
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == "anime":
item.url = host
item.action = "ultimiep"
itemlist = ultimiep(item)
if itemlist[-1].action == "ultimiep":
itemlist.pop()
# Continua l'esecuzione in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info()
item.url = host + "/?s=" + texto
try:
return lista_anime(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<li><a title="[^"]+" href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="lista_anime",
title=scrapedtitle.replace('Anime', '').strip(),
text_color="azure",
url=scrapedurl,
thumbnail=item.thumbnail,
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def ultimiep(item):
logger.info("ultimiep")
itemlist = lista_anime(item, False, False)
for itm in itemlist:
title = scrapertools.decodeHtmlentities(itm.title)
# Pulizia titolo
title = title.replace("Streaming", "").replace("&", "")
title = title.replace("Download", "")
title = title.replace("Sub Ita", "").strip()
eptype = scrapertools.find_single_match(title, "((?:Episodio?|OAV))")
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', title).strip()
# Creazione URL
url = re.sub(r'%s-?\d*-' % eptype.lower(), '', itm.url)
if "-streaming" not in url:
url = url.replace("sub-ita", "sub-ita-streaming")
epnumber = ""
if 'episodio' in eptype.lower():
epnumber = scrapertools.find_single_match(title.lower(), r'episodio?\s*(\d+)')
eptype += ":? " + epnumber
extra = "<tr>\s*<td[^>]+><strong>(?:[^>]+>|)%s(?:[^>]+>[^>]+>|[^<]*|[^>]+>)</strong>" % eptype
itm.title = support.color(title, 'azure').strip()
itm.action = "findvideos"
itm.url = url
itm.fulltitle = cleantitle
itm.extra = extra
itm.show = re.sub(r'Episodio\s*', '', title)
itm.thumbnail = item.thumbnail
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_anime(item, nextpage=True, show_lang=True):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'<div class="post-list group">(.*?)</nav><!--/.pagination-->')
# patron = r'<a href="([^"]+)" title="([^"]+)">\s*<img[^s]+src="([^"]+)"[^>]+>' # Patron con thumbnail, Kodi non scarica le immagini dal sito
patron = r'<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = re.sub(r'\s+', ' ', scrapedtitle)
# Pulizia titolo
scrapedtitle = scrapedtitle.replace("Streaming", "").replace("&", "")
scrapedtitle = scrapedtitle.replace("Download", "")
lang = scrapertools.find_single_match(scrapedtitle, r"([Ss][Uu][Bb]\s*[Ii][Tt][Aa])")
scrapedtitle = scrapedtitle.replace("Sub Ita", "").strip()
eptype = scrapertools.find_single_match(scrapedtitle, "((?:Episodio?|OAV))")
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', scrapedtitle)
cleantitle = cleantitle.replace(lang, "").strip()
itemlist.append(
Item(channel=item.channel,
action="episodi",
contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie",
title=color(scrapedtitle.replace(lang, "(%s)" % support.color(lang, "red") if show_lang else "").strip(), 'azure'),
fulltitle=cleantitle,
url=scrapedurl,
show=cleantitle,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if nextpage:
patronvideos = r'<link rel="next" href="([^"]+)"\s*/>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = matches[0]
itemlist.append(
Item(channel=item.channel,
action="lista_anime",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_anime_completa(item):
logger.info()
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'<ul class="lcp_catlist"[^>]+>(.*?)</ul>')
patron = r'<a href="([^"]+)"[^>]+>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
cleantitle = scrapedtitle.replace("Sub Ita Streaming", "").replace("Ita Streaming", "")
itemlist.append(
Item(channel=item.channel,
action="episodi",
contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie",
title=support.color(scrapedtitle, 'azure'),
fulltitle=cleantitle,
show=cleantitle,
url=scrapedurl,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="lista_anime_completa",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodi(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td style="[^"]*?">\s*.*?<strong>(.*?)</strong>.*?\s*</td>\s*<td style="[^"]*?">\s*<a href="([^"]+?)"[^>]+>\s*<img.*?src="([^"]+?)".*?/>\s*</a>\s*</td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl, scrapedimg in matches:
if 'nodownload' in scrapedimg or 'nostreaming' in scrapedimg:
continue
if 'vvvvid' in scrapedurl.lower():
itemlist.append(Item(title='I Video VVVVID Non sono supportati'))
continue
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title=scrapedtitle,
url=urlparse.urljoin(host, scrapedurl),
fulltitle=item.title,
show=scrapedtitle,
plot=item.plot,
fanart=item.thumbnail,
thumbnail=item.thumbnail))
# Comandi di servizio
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
logger.info()
itemlist = []
headers = {'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
if item.extra:
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'%s(.*?)</tr>' % item.extra)
item.url = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]+>')
patron = r'http:\/\/link[^a]+animesubita[^o]+org\/[^\/]+\/.*?(episodio\d*)[^p]+php(\?.*)'
for phpfile, scrapedurl in re.findall(patron, item.url, re.DOTALL):
url = "%s/%s.php%s" % (host, phpfile, scrapedurl)
headers['Referer'] = url
data = httptools.downloadpage(url, headers=headers).data
# ------------------------------------------------
cookies = ""
matches = re.compile('(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""), re.DOTALL).findall(config.get_cookie_data())
for cookie in matches:
name = cookie.split('\t')[5]
value = cookie.split('\t')[6]
cookies += name + "=" + value + ";"
headers['Cookie'] = cookies[:-1]
# ------------------------------------------------
scrapedurl = scrapertools.find_single_match(data, r'<source src="([^"]+)"[^>]+>')
url = scrapedurl + '|' + urllib.urlencode(headers)
itemlist.append(
Item(channel=item.channel,
action="play",
text_color="azure",
title="[%s] %s" % (support.color("Diretto", "orange"), item.title),
fulltitle=item.fulltitle,
url=url,
thumbnail=item.thumbnail,
fanart=item.thumbnail,
plot=item.plot))
return itemlist

View File

@@ -1,36 +0,0 @@
{
"id": "animetubeita",
"name": "Animetubeita",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "http:\/\/i.imgur.com\/rQPx1iQ.png",
"bannermenu": "http:\/\/i.imgur.com\/rQPx1iQ.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,364 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per animetubeita
# ----------------------------------------------------------
import re
import urllib
from core import httptools, scrapertools, tmdb
from core.item import Item
from platformcode import logger, config
__channel__ = "animetubeita"
host = config.get_channel_url(__channel__)
hostlista = host + "/lista-anime/"
hostgeneri = host + "/generi/"
hostcorso = host + "/category/serie-in-corso/"
def mainlist(item):
log("animetubeita", "mainlist", item.channel)
itemlist = [Item(channel=item.channel,
action="lista_home",
title="[COLOR azure]Home[/COLOR]",
url=host,
thumbnail=AnimeThumbnail,
fanart=AnimeFanart),
# Item(channel=item.channel,
# action="lista_anime",
# title="[COLOR azure]A-Z[/COLOR]",
# url=hostlista,
# thumbnail=AnimeThumbnail,
# fanart=AnimeFanart),
Item(channel=item.channel,
action="lista_genere",
title="[COLOR azure]Genere[/COLOR]",
url=hostgeneri,
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="lista_in_corso",
title="[COLOR azure]Serie in Corso[/COLOR]",
url=hostcorso,
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="search",
title="[COLOR lime]Cerca...[/COLOR]",
url=host + "/?s=",
thumbnail=CercaThumbnail,
fanart=CercaFanart)]
return itemlist
def lista_home(item):
log("animetubeita", "lista_home", item.channel)
itemlist = []
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title=".*?">.*?<img.*?src="(.*?)".*?<strong>Titolo</strong></td>.*?<td>(.*?)</td>.*?<td><strong>Trama</strong></td>.*?<td>(.*?)</'
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in scrapedAll(item.url, patron):
title = scrapertools.decodeHtmlentities(scrapedtitle)
title = title.split("Sub")[0]
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
itemlist.append(
Item(channel=item.channel,
action="dl_s",
contentType="tvshow",
title="[COLOR azure]" + title + "[/COLOR]",
fulltitle=fulltitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail,
show=fulltitle,
plot=scrapedplot))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
# ===========================================================
data = httptools.downloadpage(item.url).data
patron = '<link rel="next" href="(.*?)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="lista_home",
title=AvantiTxt,
url=next_page,
thumbnail=AvantiImg,
folder=True))
# ===========================================================
return itemlist
# def lista_anime(item):
# log("animetubeita", "lista_anime", item.channel)
# itemlist = []
# patron = '<li.*?class="page_.*?href="(.*?)">(.*?)</a></li>'
# for scrapedurl, scrapedtitle in scrapedAll(item.url, patron):
# title = scrapertools.decodeHtmlentities(scrapedtitle)
# title = title.split("Sub")[0]
# log("url:[" + scrapedurl + "] scrapedtitle:[" + title + "]")
# itemlist.append(
# Item(channel=item.channel,
# action="dettaglio",
# contentType="tvshow",
# title="[COLOR azure]" + title + "[/COLOR]",
# url=scrapedurl,
# show=title,
# thumbnail="",
# fanart=""))
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# return itemlist
def lista_genere(item):
log("lista_anime_genere", "lista_genere", item.channel)
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data,
'<div class="hentry page post-1 odd author-admin clear-block">(.*?)<div id="disqus_thread">')
patron = '<li class="cat-item cat-item.*?"><a href="(.*?)" >(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="lista_generi",
title='[COLOR lightsalmon][B]' + scrapedtitle + '[/B][/COLOR]',
url=scrapedurl,
fulltitle=scrapedtitle,
show=scrapedtitle,
thumbnail=item.thumbnail))
return itemlist
def lista_generi(item):
log("animetubeita", "lista_generi", item.channel)
itemlist = []
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title=".*?">.*?<img.*?src="(.*?)".*?<strong>Titolo</strong></td>.*?<td>(.*?)</td>.*?<td><strong>Trama</strong></td>.*?<td>(.*?)</'
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in scrapedAll(item.url, patron):
title = scrapertools.decodeHtmlentities(scrapedtitle)
title = title.split("Sub")[0]
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
itemlist.append(
Item(channel=item.channel,
action="dettaglio",
title="[COLOR azure]" + title + "[/COLOR]",
contentType="tvshow",
fulltitle=fulltitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
show=fulltitle,
fanart=scrapedthumbnail,
plot=scrapedplot))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
# ===========================================================
data = httptools.downloadpage(item.url).data
patron = '<link rel="next" href="(.*?)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="lista_generi",
title=AvantiTxt,
url=next_page,
thumbnail=AvantiImg,
folder=True))
# ===========================================================
return itemlist
def lista_in_corso(item):
log("animetubeita", "lista_home", item.channel)
itemlist = []
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title="Link.*?>(.*?)</a></h2>.*?<img.*?src="(.*?)".*?<td><strong>Trama</strong></td>.*?<td>(.*?)</td>'
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot in scrapedAll(item.url, patron):
title = scrapertools.decodeHtmlentities(scrapedtitle)
title = title.split("Sub")[0]
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
itemlist.append(
Item(channel=item.channel,
action="dettaglio",
title="[COLOR azure]" + title + "[/COLOR]",
contentType="tvshow",
fulltitle=fulltitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
show=fulltitle,
fanart=scrapedthumbnail,
plot=scrapedplot))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
# ===========================================================
data = httptools.downloadpage(item.url).data
patron = '<link rel="next" href="(.*?)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="lista_in_corso",
title=AvantiTxt,
url=next_page,
thumbnail=AvantiImg,
folder=True))
# ===========================================================
return itemlist
def dl_s(item):
log("animetubeita", "dl_s", item.channel)
itemlist = []
encontrados = set()
# 1
patron = '<p><center><a.*?href="(.*?)"'
for scrapedurl in scrapedAll(item.url, patron):
if scrapedurl in encontrados: continue
encontrados.add(scrapedurl)
title = "DOWNLOAD & STREAMING"
itemlist.append(Item(channel=item.channel,
action="dettaglio",
title="[COLOR azure]" + title + "[/COLOR]",
url=scrapedurl,
thumbnail=item.thumbnail,
fanart=item.thumbnail,
plot=item.plot,
folder=True))
# 2
patron = '<p><center>.*?<a.*?href="(.*?)"'
for scrapedurl in scrapedAll(item.url, patron):
if scrapedurl in encontrados: continue
encontrados.add(scrapedurl)
title = "DOWNLOAD & STREAMING"
itemlist.append(Item(channel=item.channel,
action="dettaglio",
title="[COLOR azure]" + title + "[/COLOR]",
url=scrapedurl,
thumbnail=item.thumbnail,
fanart=item.thumbnail,
plot=item.plot,
folder=True))
return itemlist
def dettaglio(item):
log("animetubeita", "dettaglio", item.channel)
itemlist = []
headers = {'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
episodio = 1
patron = r'<a href="http:\/\/link[^a]+animetubeita[^c]+com\/[^\/]+\/[^s]+((?:stream|strm))[^p]+php(\?.*?)"'
for phpfile, scrapedurl in scrapedAll(item.url, patron):
title = "Episodio " + str(episodio)
episodio += 1
url = "%s/%s.php%s" % (host, phpfile, scrapedurl)
headers['Referer'] = url
data = httptools.downloadpage(url, headers=headers).data
# ------------------------------------------------
cookies = ""
matches = re.compile('(.animetubeita.com.*?)\n', re.DOTALL).findall(config.get_cookie_data())
for cookie in matches:
name = cookie.split('\t')[5]
value = cookie.split('\t')[6]
cookies += name + "=" + value + ";"
headers['Cookie'] = cookies[:-1]
# ------------------------------------------------
url = scrapertools.find_single_match(data, """<source src="([^"]+)" type='video/mp4'>""")
url += '|' + urllib.urlencode(headers)
itemlist.append(Item(channel=item.channel,
action="play",
title="[COLOR azure]" + title + "[/COLOR]",
url=url,
thumbnail=item.thumbnail,
fanart=item.thumbnail,
plot=item.plot))
return itemlist
def search(item, texto):
log("animetubeita", "search", item.channel)
item.url = item.url + texto
try:
return lista_home(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def scrapedAll(url="", patron=""):
matches = []
data = httptools.downloadpage(url).data
MyPatron = patron
matches = re.compile(MyPatron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
return matches
def scrapedSingle(url="", single="", patron=""):
matches = []
data = httptools.downloadpage(url).data
elemento = scrapertools.find_single_match(data, single)
matches = re.compile(patron, re.DOTALL).findall(elemento)
scrapertools.printMatches(matches)
return matches
def log(funzione="", stringa="", canale=""):
logger.debug("[" + canale + "].[" + funzione + "] " + stringa)
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
AnimeFanart = "http://www.animetubeita.com/wp-content/uploads/21407_anime_scenery.jpg"
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
CategoriaFanart = "http://www.animetubeita.com/wp-content/uploads/21407_anime_scenery.jpg"
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
AvantiTxt = config.get_localized_string(30992)
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"

20
channels/animeunity.json Normal file
View File

@@ -0,0 +1,20 @@
{
"id": "animeunity",
"name": "AnimeUnity",
"active": true,
"language": ["ita", "sub-ita"],
"thumbnail": "animeunity.png",
"banner": "animeunity.png",
"categories": ["anime"],
"settings": [
{
"id": "order",
"type": "list",
"label": "Ordine di Visualizzazione",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [ "Standard", "Lista A-Z", "Lista Z-A", "Popolarità", "Valutazione" ]
}
]
}

258
channels/animeunity.py Normal file
View File

@@ -0,0 +1,258 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per AnimeUnity
# ------------------------------------------------------------
import cloudscraper, json, copy, inspect
from core import jsontools, support, httptools
from platformcode import autorenumber
# support.dbg()
host = support.config.get_channel_url()
response = httptools.downloadpage(host + '/archivio')
csrf_token = support.match(response.data, patron='name="csrf-token" content="([^"]+)"').match
headers = {'content-type': 'application/json;charset=UTF-8',
'x-csrf-token': csrf_token,
'Cookie' : '; '.join([x.name + '=' + x.value for x in response.cookies])}
@support.menu
def mainlist(item):
top = [('Ultimi Episodi', ['', 'news'])]
menu = [('Anime {bullet bold}',['', 'menu', {}, 'tvshow']),
('Film {submenu}',['', 'menu', {'type': 'Movie'}]),
('TV {submenu}',['', 'menu', {'type': 'TV'}, 'tvshow']),
('OVA {submenu} {tv}',['', 'menu', {'type': 'OVA'}, 'tvshow']),
('ONA {submenu} {tv}',['', 'menu', {'type': 'ONA'}, 'tvshow']),
('Special {submenu} {tv}',['', 'menu', {'type': 'Special'}, 'tvshow'])]
search =''
return locals()
def menu(item):
item.action = 'peliculas'
ITA = copy.copy(item.args)
ITA['title'] = '(ita)'
InCorso = copy.copy(item.args)
InCorso['status'] = 'In Corso'
Terminato = copy.copy(item.args)
Terminato['status'] = 'Terminato'
itemlist = [item.clone(title=support.typo('Tutti','bold')),
item.clone(title=support.typo('ITA','bold'), args=ITA),
item.clone(title=support.typo('Genere','bold'), action='genres'),
item.clone(title=support.typo('Anno','bold'), action='years')]
if item.contentType == 'tvshow':
itemlist += [item.clone(title=support.typo('In Corso','bold'), args=InCorso),
item.clone(title=support.typo('Terminato','bold'), args=Terminato)]
itemlist +=[item.clone(title=support.typo('Cerca...','bold'), action='search', thumbnail=support.thumb('search'))]
return itemlist
def genres(item):
support.info()
# support.dbg()
itemlist = []
genres = json.loads(support.match(response.text, patron='genres="([^"]+)').match.replace('&quot;','"'))
for genre in genres:
item.args['genres'] = [genre]
itemlist.append(item.clone(title=support.typo(genre['name'],'bold'), action='peliculas'))
return support.thumb(itemlist)
def years(item):
support.info()
itemlist = []
from datetime import datetime
current_year = datetime.today().year
oldest_year = int(support.match(response.text, patron='anime_oldest_date="([^"]+)').match)
for year in list(reversed(range(oldest_year, current_year + 1))):
item.args['year']=year
itemlist.append(item.clone(title=support.typo(year,'bold'), action='peliculas'))
return itemlist
def search(item, text):
support.info('search', item)
if not item.args:
item.args = {'title':text}
else:
item.args['title'] = text
item.search = text
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info('search log:', line)
return []
def newest(categoria):
support.info(categoria)
itemlist = []
item = support.Item()
item.url = host
try:
itemlist = news(item)
if itemlist[-1].action == 'news':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info(line)
return []
return itemlist
def news(item):
support.info()
item.contentType = 'episode'
itemlist = []
import cloudscraper
session = cloudscraper.create_scraper()
fullJs = json.loads(support.match(session.get(item.url).text, headers=headers, patron=r'items-json="([^"]+)"').match.replace('&quot;','"'))
js = fullJs['data']
for it in js:
if it.get('anime', {}).get('title'):
itemlist.append(
item.clone(title= support.typo(it['anime']['title'] + ' - EP. ' + it['number'], 'bold'),
fulltitle=it['anime']['title'],
thumbnail=it['anime']['imageurl'],
forcethumb = True,
scws_id=it.get('scws_id', ''),
# video_url=it.get('link', ''),
plot=it['anime']['plot'],
action='findvideos')
)
if 'next_page_url' in fullJs:
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),thumbnail=support.thumb(), url=fullJs['next_page_url']))
return itemlist
def peliculas(item):
support.info()
itemlist = []
page = item.page if item.page else 0
item.args['offset'] = page * 30
order = support.config.get_setting('order', item.channel)
if order:
order_list = [ "Standard", "Lista A-Z", "Lista Z-A", "Popolarità", "Valutazione" ]
item.args['order'] = order_list[order]
payload = json.dumps(item.args)
records = httptools.downloadpage(host + '/archivio/get-animes', headers=headers, post=payload).json['records']
# support.dbg()
for it in records:
if not it['title']:
it['title'] = ''
lang = support.match(it['title'], patron=r'\(([It][Tt][Aa])\)').match
title = support.re.sub(r'\s*\([^\)]+\)', '', it['title'])
if 'ita' in lang.lower(): language = 'ITA'
else: language = 'Sub-ITA'
if title:
itm = item.clone(title=support.typo(title,'bold') + support.typo(language,'_ [] color kod') + (support.typo(it['title_eng'],'_ ()') if it['title_eng'] else ''))
else:
itm = item.clone(title=support.typo(it['title_eng'],'bold') + support.typo(language,'_ [] color kod'))
itm.contentLanguage = language
itm.type = it['type']
itm.thumbnail = it['imageurl']
itm.plot = it['plot']
itm.url = '{}/anime/{}-{}'.format(item.url, it.get('id'), it.get('slug'))
if it['episodes_count'] == 1:
itm.contentType = 'movie'
itm.fulltitle = itm.show = itm.contentTitle = title
itm.contentSerieName = ''
itm.action = 'findvideos'
itm.scws_id = it['episodes'][0].get('scws_id', '')
# itm.video_url = it['episodes'][0].get('link', '')
else:
itm.contentType = 'tvshow'
itm.contentTitle = ''
itm.fulltitle = itm.show = itm.contentSerieName = title
itm.action = 'episodios'
itm.episodes = it['episodes'] if 'episodes' in it else it.get('scws_id', '')
itemlist.append(itm)
autorenumber.start(itemlist)
if len(itemlist) >= 30:
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), thumbnail=support.thumb(), page=page + 1))
return itemlist
def episodios(item):
support.info()
itemlist = []
title = 'Parte' if item.type.lower() == 'movie' else 'Episodio'
for it in item.episodes:
itemlist.append(
item.clone(title=support.typo('{}. {} {}'.format(it['number'], title, it['number']), 'bold'),
episode = it['number'],
fulltitle=item.title,
show=item.title,
contentTitle='',
contentSerieName=item.contentSerieName,
thumbnail=item.thumbnail,
plot=item.plot,
action='findvideos',
contentType='episode',
url = '{}/{}'.format(item.url, it['id'])
)
# video_url=it.get('link', ''))
)
if inspect.stack(0)[1][3] not in ['find_episodes']:
autorenumber.start(itemlist, item)
support.videolibrary(itemlist, item)
support.download(itemlist, item)
return itemlist
def findvideos(item):
# if item.scws_id:
# from time import time
# from base64 import b64encode
# from hashlib import md5
#
# client_ip = support.httptools.downloadpage('http://ip-api.com/json/').json.get('query')
#
# expires = int(time() + 172800)
# token = b64encode(md5('{}{} Yc8U6r8KjAKAepEA'.format(expires, client_ip).encode('utf-8')).digest()).decode('utf-8').replace('=', '').replace('+', '-').replace('/', '_')
#
# url = 'https://scws.work/master/{}?token={}&expires={}&n=1'.format(item.scws_id, token, expires)
#
# itemlist = [item.clone(title=support.config.get_localized_string(30137), url=url, server='directo', action='play')]
from core import channeltools
itemlist = [item.clone(title=channeltools.get_channel_parameters(item.channel)['title'],
url=item.url, server='streamingcommunityws')]
return support.server(item, itemlist=itemlist, referer=False)
# return support.server(item, itemlist=itemlist)
#
# def play(item):
# urls = list()
# info = support.match(item.url, patron=r'(http.*?rendition=(\d+)[^\s]+)').matches
#
# if info:
# for url, res in info:
# urls.append(['hls [{}]'.format(res), url])
# return urls

View File

@@ -0,0 +1,21 @@
{
"id": "animeuniverse",
"name": "AnimeHDitalia",
"active": false,
"language": ["ita", "sub-ita"],
"thumbnail": "animeuniverse.png",
"banner": "animeuniverse.png",
"categories": ["anime", "sub-ita"],
"default_off": ["include_in_newest"],
"settings": [
{
"id": "perpage",
"type": "list",
"label": "Elementi per pagina",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": ["20","30","40","50","60","70","80","90","100"]
}
]
}

129
channels/animeuniverse.py Normal file
View File

@@ -0,0 +1,129 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per animeuniverse
# ----------------------------------------------------------
from core import support
host = support.config.get_channel_url()
headers = {}
perpage_list = ['20','30','40','50','60','70','80','90','100']
perpage = perpage_list[support.config.get_setting('perpage' , 'animeuniverse')]
epPatron = r'<td>\s*(?P<title>[^<]+)[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>\s*<img [^>]+Streaming'
@support.menu
def mainlist(item):
anime=['/anime/',
('Tipo',['', 'menu', 'Anime']),
('Anno',['', 'menu', 'Anno']),
('Genere', ['', 'menu','Genere']),
('Ultimi Episodi',['/2/', 'peliculas', 'last']),
('Hentai', ['/hentai/', 'peliculas'])]
return locals()
@support.scrape
def menu(item):
action = 'peliculas'
patronBlock = item.args + r'</a>\s*<ul class="sub-menu">(?P<block>.*?)</ul>'
patronMenu = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
return locals()
def search(item, texto):
support.info(texto)
item.search = texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
def newest(categoria):
support.info(categoria)
item = support.Item()
try:
if categoria == "anime":
item.url = host
item.args = "last"
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
@support.scrape
def peliculas(item):
query = ''
if '/mos/' in item.url:
item.contentType = 'movie'
action='findvideos'
elif item.args == 'last':
query='cat%5D=1&currentquery%5Bcategory__not_in%5D%5B'
searchtext=''
item.contentType = 'episode'
action='findvideos'
else:
item.contentType = 'tvshow'
action='episodios'
if item.search:
query = 's'
searchtext = item.search
if not query:
query='category_name'
searchtext = item.url.split('/')[-2] if item.url != host else ''
if not item.pag: item.pag = 1
anime=True
# blacklist=['Altri Hentai']
data = support.match(host + '/wp-content/themes/animeuniverse/functions/ajax.php', post='sorter=recent&location=&loop=main+loop&action=sort&numarticles='+perpage+'&paginated='+str(item.pag)+'&currentquery%5B'+query+'%5D='+searchtext+'&thumbnail=1').data.replace('\\','')
patron=r'<a href="(?P<url>[^"]+)"><img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)" class="[^"]+" alt="" title="(?P<title>.*?)\s*(?P<lang>Sub ITA|ITA)?(?:"| \[)'
def itemlistHook(itemlist):
if len(itemlist) == int(perpage):
item.pag += 1
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), action='peliculas'))
return itemlist
return locals()
@support.scrape
def episodios(item):
anime = True
pagination = int(perpage)
patron = epPatron
# debug = True
return locals()
def findvideos(item):
itemlist = []
if item.contentType == 'movie':
matches = support.match(item, patron=epPatron).matches
for title, url in matches:
get_video_list(url, title, itemlist)
else:
get_video_list(item.url, support.config.get_localized_string(30137), itemlist)
return support.server(item, itemlist=itemlist)
def get_video_list(url, title, itemlist):
from requests import get
if not url.startswith('http'): url = host + url
url = support.match(get(url).url, string=True, patron=r'file=([^$]+)').match
if 'http' not in url: url = 'http://' + url
itemlist.append(support.Item(title=title, url=url, server='directo', action='play'))
return itemlist

View File

@@ -1,70 +1,29 @@
{
"id": "animeworld",
"name": "AnimeWorld",
"active": true,
"adult": false,
"language": ["ita"],
"id": "animeworld",
"name": "AnimeWorld",
"active": true,
"language": ["ita", "sub-ita"],
"thumbnail": "animeworld.png",
"banner": "animeworld.png",
"categories": ["anime"],
"categories": ["anime", "vos"],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "https://www.animeworld.it",
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"id": "lang",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"label": "Lingua di Ricerca",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["No filtrar","Italiano"]
}
"lvalues": [ "Tutte", "Ita", "Sub-Ita"]
},
{
"id": "order",
"type": "list",
"label": "Ordine di Visualizzazione",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [ "Standard", "Ultime Aggiunte", "Lista A-Z", "Lista A-Z", "Più Vecchi", "Più Recenti", "Più Visti" ]
}
]
}

View File

@@ -1,315 +1,196 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per animeworld
# thanks to fatshotty
# ----------------------------------------------------------
import re
import time
import urllib
import urlparse
from core import httptools, scrapertoolsV2, servertools, tmdb, support, jsontools
from core.support import log
from core.item import Item
from platformcode import logger, config
from specials import autoplay, autorenumber
from core import httptools, support, config, jsontools
__channel__ = "animeworld"
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'Italiano'}
list_language = IDIOMAS.values()
list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo']
list_quality = ['default', '480p', '720p', '1080p']
host = support.config.get_channel_url()
__channel__ = 'animeworld'
cookie = support.config.get_setting('cookie', __channel__)
headers = [['Cookie', cookie]]
def get_cookie(data):
global cookie, headers
cookie = support.match(data, patron=r'document.cookie="([^\s]+)').match
support.config.set_setting('cookie', cookie, __channel__)
headers = [['Cookie', cookie]]
def get_data(item):
# support.dbg()
url = httptools.downloadpage(item.url, headers=headers, follow_redirects=True, only_headers=True).url
data = support.match(url, headers=headers, follow_redirects=True).data
if 'SecurityAW' in data:
get_cookie(data)
data = get_data(item)
return data
def order():
# Seleziona l'ordinamento dei risultati
return str(support.config.get_setting("order", __channel__))
@support.menu
def mainlist(item):
log()
itemlist =[]
support.menu(itemlist, 'ITA submenu bold', 'build_menu', host + '/filter?', args=["anime", 'language[]=1'])
support.menu(itemlist, 'Sub-ITA submenu bold', 'build_menu', host + '/filter?', args=["anime", 'language[]=0'])
support.menu(itemlist, 'Archivio A-Z submenu', 'alfabetico', host+'/az-list', args=["tvshow","a-z"])
support.menu(itemlist, 'In corso submenu', 'video', host+'/', args=["in sala"])
support.menu(itemlist, 'Generi submenu', 'generi', host+'/')
support.menu(itemlist, 'Ultimi Aggiunti bold', 'video', host+'/newest', args=["anime"])
support.menu(itemlist, 'Ultimi Episodi bold', 'video', host+'/updated', args=["novita'"])
support.menu(itemlist, 'Cerca...', 'search')
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
# Crea menu dei generi =================================================
def generi(item):
log()
patron_block = r'</i>\sGeneri</a>\s*<ul class="sub">(.*?)</ul>'
patron = r'<a href="([^"]+)"\stitle="([^"]+)">'
return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='video')
anime=['/filter?sort=',
('ITA',['/filter?dub=1&sort=', 'menu', 'dub=1']),
('SUB-ITA',['/filter?dub=0&sort=', 'menu', 'dub=0']),
('In Corso', ['/ongoing', 'peliculas','noorder']),
('Ultimi Episodi', ['/updated', 'peliculas', 'updated']),
('Nuove Aggiunte',['/newest', 'peliculas','noorder' ]),
('Generi',['/?d=1','genres',])]
return locals()
# Crea Menu Filtro ======================================================
@support.scrape
def genres(item):
action = 'peliculas'
data = get_data(item)
def build_menu(item):
log()
itemlist = []
support.menu(itemlist, 'Tutti bold submenu', 'video', item.url+item.args[1])
matches, data = support.match(item,r'<button class="btn btn-sm btn-default dropdown-toggle" data-toggle="dropdown"> (.*?) <span.*?>(.*?)<\/ul>',r'<form class="filters.*?>(.*?)<\/form>')
log('ANIME DATA =' ,data)
for title, html in matches:
if title not in 'Lingua Ordine':
support.menu(itemlist, title + ' submenu bold', 'build_sub_menu', html, args=item.args)
log('ARGS= ', item.args[0])
log('ARGS= ', html)
return itemlist
patronBlock = r'dropdown[^>]*>\s*Generi\s*<span.[^>]+>(?P<block>.*?)</ul>'
patronMenu = r'<input.*?name="(?P<name>[^"]+)" value="(?P<value>[^"]+)"\s*>[^>]+>(?P<title>[^<]+)</label>'
# Crea SottoMenu Filtro ======================================================
def itemHook(item):
item.url = host + '/filter?' + item.name + '=' + item.value + '&sort='
return item
return locals()
def build_sub_menu(item):
log()
itemlist = []
matches = re.compile(r'<input.*?name="([^"]+)" value="([^"]+)"\s*>[^>]+>([^<]+)<\/label>', re.DOTALL).findall(item.url)
for name, value, title in matches:
support.menu(itemlist, support.typo(title, 'bold'), 'video', host + '/filter?' + '&' + name + '=' + value + '&' + item.args[1])
return itemlist
# Novità ======================================================
@support.scrape
def menu(item):
action = 'submenu'
data = get_data(item)
patronMenu=r'<button[^>]+>\s*(?P<title>[A-Za-z0-9]+)\s*<span.[^>]+>(?P<other>.*?)</ul>'
def itemlistHook(itemlist):
itemlist.insert(0, item.clone(title=support.typo('Tutti','bold'), action='peliculas'))
itemlist.append(item.clone(title=support.typo('Cerca...','bold'), action='search', search=True, thumbnail=support.thumb('search.png')))
return itemlist
return locals()
@support.scrape
def submenu(item):
action = 'peliculas'
data = item.other
# debug=True
patronMenu = r'<input.*?name="(?P<name>[^"]+)" value="(?P<value>[^"]+)"\s*>[^>]+>(?P<title>[^<]+)<\/label>'
def itemHook(item):
item.url = '{}/filter?{}={}&{}{}'.format(host, item.name, item.value, item.args, ('&sort=' if item.name != 'sort' else ''))
return item
return locals()
def newest(categoria):
log()
itemlist = []
item = Item()
support.info(categoria)
item = support.Item()
lang = config.get_setting('lang', channel=item.channel)
try:
if categoria == "anime":
item.url = host + '/newest'
item.action = "video"
itemlist = video(item)
if itemlist[-1].action == "video":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# Cerca ===========================================================
def search(item, texto):
log(texto)
item.url = host + '/search?keyword=' + texto
try:
return video(item)
item.url = host
item.args = "updated"
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
support.logger.error("{0}".format(line))
return []
# Lista A-Z ====================================================
def alfabetico(item):
return support.scrape(item, '<a href="([^"]+)" title="([^"]+)">', ['url', 'title'], patron_block=r'<span>.*?A alla Z.<\/span>.*?<ul>(.*?)<\/ul>', action='lista_anime')
def lista_anime(item):
log()
itemlist = []
matches ,data = support.match(item, r'<div class="item"><a href="([^"]+)".*?src="([^"]+)".*?data-jtitle="([^"]+)".*?>([^<]+)<\/a><p>(.*?)<\/p>')
for scrapedurl, scrapedthumb, scrapedoriginal, scrapedtitle, scrapedplot in matches:
if scrapedoriginal == scrapedtitle:
scrapedoriginal=''
else:
scrapedoriginal = support.typo(scrapedoriginal,' -- []')
year = ''
lang = ''
infoLabels = {}
if '(' in scrapedtitle:
year = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([0-9]+\))')
lang = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([a-zA-Z]+\))')
infoLabels['year'] = year
title = scrapedtitle.replace(year,'').replace(lang,'').strip()
original = scrapedoriginal.replace(year,'').replace(lang,'').strip()
if lang: lang = support.typo(lang,'_ color kod')
longtitle = '[B]' + title + '[/B]' + lang + original
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
contentType="episode",
action="episodios",
title=longtitle,
url=scrapedurl,
thumbnail=scrapedthumb,
fulltitle=title,
show=title,
infoLabels=infoLabels,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist)
# Next page
support.nextPage(itemlist, item, data, r'<a class="page-link" href="([^"]+)" rel="next"')
return itemlist
def search(item, text):
support.info(text)
if item.search:
item.url = '{}/filter?{}&keyword={}&sort='.format(host, item.args, text)
else:
lang = ['?', '?dub=1&', '?dub=0&'][config.get_setting('lang', channel=item.channel)]
item.url = '{}/filter{}&keyword={}&sort='.format(host, lang, text)
item.contentType = 'tvshow'
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
def video(item):
log()
itemlist = []
@support.scrape
def peliculas(item):
data = get_data(item)
anime = True
if item.args not in ['noorder', 'updated'] and not item.url[-1].isdigit(): item.url += order() # usa l'ordinamento di configura canale
data = get_data(item)
matches, data = support.match(item, r'<a href="([^"]+)" class[^>]+><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>', headers=headers)
if item.args == 'updated':
item.contentType='episode'
patron=r'<div class="inner">\s*<a href="(?P<url>[^"]+)" class[^>]+>\s*<img.*?src="(?P<thumb>[^"]+)" alt?="(?P<title>[^\("]+)(?:\((?P<lang>[^\)]+)\))?"[^>]+>[^>]+>\s*(?:<div class="[^"]+">(?P<type>[^<]+)</div>)?(?:[^>]+>){2,4}\s*<div class="ep">[^\d]+(?P<episode>\d+)[^<]*</div>'
action='findvideos'
else:
patron= r'<div class="inner">\s*<a href="(?P<url>[^"]+)" class[^>]+>\s*<img.*?src="(?P<thumb>[^"]+)" alt?="(?P<title>[^\("]+)(?:\((?P<year>\d+)\) )?(?:\((?P<lang>[^\)]+)\))?(?P<title2>[^"]+)?[^>]+>[^>]+>(?:\s*<div class="(?P<l>[^"]+)">[^>]+>)?\s*(?:<div class="[^"]+">(?P<type>[^<]+)</div>)?'
action='episodios'
for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches:
# Cerca Info come anno o lingua nel Titolo
year = ''
lang = ''
if '(' in scrapedtitle:
year = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([0-9]+\))')
lang = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([a-zA-Z]+\))')
# Rimuove Anno e Lingua nel Titolo
title = scrapedtitle.replace(year,'').replace(lang,'').strip()
original = scrapedoriginal.replace(year,'').replace(lang,'').strip()
# Compara Il Titolo con quello originale
if original == title:
original=''
else:
original = support.typo(scrapedoriginal,'-- []')
# cerca info supplementari
ep = ''
ep = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ep">(.*?)<')
if ep != '':
ep = ' - ' + ep
ova = ''
ova = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ova">(.*?)<')
if ova != '':
ova = ' - (' + ova + ')'
ona = ''
ona = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ona">(.*?)<')
if ona != '':
ona = ' - (' + ona + ')'
movie = ''
movie = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="movie">(.*?)<')
if movie != '':
movie = ' - (' + movie + ')'
special = ''
special = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="special">(.*?)<')
if special != '':
special = ' - (' + special + ')'
# Concatena le informazioni
lang = support.typo('Sub-ITA', '_ [] color kod') if '(ita)' not in lang.lower() else ''
info = ep + lang + year + ova + ona + movie + special
# Crea il title da visualizzare
long_title = '[B]' + title + '[/B]' + info + original
# Controlla se sono Episodi o Film
if movie == '':
contentType = 'tvshow'
action = 'episodios'
else:
contentType = 'movie'
action = 'findvideos'
itemlist.append(
Item(channel=item.channel,
contentType=contentType,
action=action,
title=long_title,
url=scrapedurl,
fulltitle=title,
show=title,
thumbnail=scrapedthumb,
context = autoplay.context))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist)
# Next page
support.nextPage(itemlist, item, data, r'<a\sclass="page-link"\shref="([^"]+)"\srel="next"\saria-label="Successiva')
return itemlist
# Controlla la lingua se assente
patronNext=r'<a href="([^"]+)" class="[^"]+" id="go-next'
typeContentDict={'movie':['movie', 'special']}
typeActionDict={'findvideos':['movie', 'special']}
def itemHook(item):
if not item.contentLanguage:
if 'dub=1' in item.url or item.l == 'dub':
item.contentLanguage = 'ITA'
item.title += support.typo(item.contentLanguage,'_ [] color kod')
else:
item.contentLanguage = 'Sub-ITA'
item.title += support.typo(item.contentLanguage,'_ [] color kod')
return item
return locals()
@support.scrape
def episodios(item):
log()
itemlist = []
patron_block = r'<div class="widget servers".*?>(.*?)<div id="download"'
patron = r'<li><a [^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+" href="([^"]+)"[^>]+>([^<]+)<'
matches = support.match(item, patron, patron_block)[0]
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(
channel=item.channel,
action="findvideos",
contentType="episode",
title='[B] Episodio ' + scrapedtitle + '[/B]',
url=urlparse.urljoin(host, scrapedurl),
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=item.plot,
fanart=item.thumbnail,
thumbnail=item.thumbnail))
autorenumber.renumber(itemlist, item,'bold')
support.videolibrary(itemlist, item)
return itemlist
data = get_data(item)
anime = True
pagination = 50
patronBlock= r'<div class="server\s*active\s*"(?P<block>.*?)(?:<div class="server|<link)'
patron = r'<li[^>]*>\s*<a.*?href="(?P<url>[^"]+)"[^>]*>(?P<episode>[^-<]+)(?:-(?P<episode2>[^<]+))?'
def itemHook(item):
item.number = support.re.sub(r'\[[^\]]+\]', '', item.title)
item.title += support.typo(item.fulltitle,'-- bold')
return item
action='findvideos'
return locals()
def findvideos(item):
log()
import time
support.info(item)
itemlist = []
matches, data = support.match(item, r'class="tab.*?data-name="([0-9]+)">([^<]+)</span', headers=headers)
videoData = ''
for serverid, servername in matches:
block = scrapertoolsV2.find_multiple_matches(data,'data-id="'+serverid+'">(.*?)<div class="server')
id = scrapertoolsV2.find_single_match(str(block),r'<a data-id="([^"]+)" data-base="'+item.fulltitle+'"')
dataJson = httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, id, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
json = jsontools.load(dataJson)
log('JSON= ',json)
urls = []
# resp = support.match(get_data(item), headers=headers, patron=r'data-name="(\d+)">([^<]+)<')
resp = support.match(get_data(item), headers=headers, patron=r'data-name="(\d+)">([^<]+)<')
data = resp.data
videoData +='\n'+json['grabber']
if serverid == '28':
itemlist.append(
Item(
channel=item.channel,
action="play",
title='diretto',
quality='',
url=json['grabber'],
server='directo',
show=item.show,
contentType=item.contentType,
folder=False))
return support.server(item, videoData, itemlist)
for ID, name in resp.matches:
if not item.number: item.number = support.match(item.title, patron=r'(\d+) -').match
match = support.match(data, patronBlock=r'data-name="' + ID + r'"[^>]+>(.*?)(?:<div class="(?:server|download)|link)', patron=r'data-id="([^"]+)" data-episode-num="' + (item.number if item.number else '1') + '"' + r'.*?href="([^"]+)"').match
if match:
epID, epurl = match
# if 'vvvvid' in name.lower():
# urls.append(support.match(host + '/api/episode/ugly/serverPlayerAnimeWorld?id=' + epID, headers=headers, patron=r'<a.*?href="([^"]+)"', debug=True).match)
if 'animeworld' in name.lower():
url = support.match(data, patron=r'href="([^"]+)"\s*id="alternativeDownloadLink"', headers=headers).match
title = support.match(url, patron=r'http[s]?://(?:www.)?([^.]+)', string=True).match
itemlist.append(item.clone(action="play", title=title, url=url, server='directo'))
else:
dataJson = support.match(host + '/api/episode/info?id=' + epID + '&alt=0', headers=headers).data
json = jsontools.load(dataJson)
title = support.match(json['grabber'], patron=r'server\d+.([^.]+)', string=True).match
if title: itemlist.append(item.clone(action="play", title=title, url=json['grabber'].split('=')[-1], server='directo'))
else: urls.append(json['grabber'])
# support.info(urls)
return support.server(item, urls, itemlist)

37
channels/aniplay.json Normal file
View File

@@ -0,0 +1,37 @@
{
"id": "aniplay",
"name": "AniPlay",
"active": true,
"language": ["ita", "sub-ita"],
"thumbnail": "aniplay.png",
"banner": "aniplay.png",
"categories": ["anime", "vos"],
"settings": [
{
"id": "sort",
"type": "list",
"label": "Ordine di Visualizzazione",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [ "Popolarità", "Titolo", "Numero Episodi", "Data di inizio", "Data di fine", "Data di aggiunta"]
},
{
"id": "order",
"type": "bool",
"label": "Visualizza in ordine Discendente?",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "perpage",
"type": "list",
"label": "Numero di elementi per pagina",
"default": 1,
"enabled": true,
"visible": true,
"lvalues": ["10", "20", "30", "40", "50", "60", "80", "90"]
}
]
}

331
channels/aniplay.py Normal file
View File

@@ -0,0 +1,331 @@
from platformcode import config, logger, autorenumber
from core import httptools, scrapertools, support, tmdb, jsontools
from inspect import stack
import sys
if sys.version_info[0] >= 3:
from concurrent import futures
else:
from concurrent_py2 import futures
host = config.get_channel_url()
sort = ['views', 'title', 'episodeNumber', 'startDate', 'endDate', 'createdDate'][config.get_setting('sort', 'aniplay')]
order = 'asc' if config.get_setting('order', 'aniplay') else 'desc'
perpage = [10, 20, 30 ,40, 50, 60, 70, 80, 90][config.get_setting('perpage', 'aniplay')]
@support.menu
def mainlist(item):
anime=['/api/anime/advanced-search',
('A-Z', ['/api/anime/advanced-search', 'submenu_az', '']),
('Anno', ['', 'submenu_year', '']),
('Top', ['', 'submenu_top', '']),
('Ultimi aggiunti', ['', 'latest_added', ''])]
return locals()
def submenu_az(item):
itemlist = []
for letter in ['0-9'] + list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
itemlist.append(item.clone(title = support.typo(letter, 'bold'),
url= host + '/api/anime/find-by-char',
action= 'peliculas',
variable= '&character=' + letter,
thumbnail=support.thumb('az')))
return itemlist
def submenu_year(item):
itemlist = []
from datetime import date
current = date.today().year
first = int(httptools.downloadpage('{}/api/anime/advanced-search?page=0&size=1&sort=startDate,asc&sort=id'.format(host)).json[0]['startDate'].split('-')[0]) -1
for year in range(current, first, -1):
itemlist.append(item.clone(title = support.typo(year, 'bold'),
action= 'submenu_season',
variable= year,
thumbnail=support.thumb('year')))
return itemlist
def submenu_top(item):
itemlist = []
links = {'Top del giorno':'daily-top', 'Top della settimana':'weekly-top', 'Top del mese':'monthly-top'}
for label in links:
link = links[label]
itemlist.append(item.clone(title = support.typo(label, 'bold'),
action= 'submenu_top_of',
variable= link))
return itemlist
def submenu_season(item):
itemlist = []
seasons = {'winter':'Inverno', 'spring':'Primavera', 'summer':'Estate', 'fall':'Autunno'}
url= '{}/api/seasonal-view?page=0&size=36&years={}'.format(host, item.variable)
js = httptools.downloadpage(url).json[0]['seasonalAnime']
for season in js:
s = season['season'].split('.')[-1]
title = seasons[s]
itemlist.append(item.clone(title=title,
url = '{}/api/seasonal-view/{}-{}'.format(host, s, item.variable),
thumbnail = support.thumb(s),
action = 'peliculas',
variable=''))
return itemlist
def submenu_top_of(item):
itemlist = []
url= '{}/api/home/{}'.format(host, item.variable)
js = httptools.downloadpage(url).json
for anime in js:
fulltitle = anime['animeTitle']
title = fulltitle.split('(')[0].strip()
scrapedlang = scrapertools.find_single_match(fulltitle, r'\(([^\)]+)')
lang = scrapedlang.upper() if scrapedlang else 'Sub-ITA'
long_title = support.typo(title, 'bold') + support.typo(lang, '_ [] color kod')
itemlist.append(item.clone(title=long_title,
url = '{}/anime/{}'.format(host, anime['animeId']),
video_url = '{}/api/anime/{}'.format(host, anime['animeId']),
thumbnail = get_thumbnail(anime, 'animeHorizontalImages'),
action = 'episodios',
variable=anime['animeId']))
return itemlist
def search(item, texto):
support.info(texto)
item.url = host + '/api/anime/advanced-search'
item.variable = '&query=' + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
def newest(categoria):
support.info(categoria)
item = support.Item()
try:
if categoria == "anime":
return latest_added(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
def latest_added(item):
itemlist = []
page = item.page if item.page else 0
url= '{}/api/home/latest-episodes?page={}'.format(host, page)
js = httptools.downloadpage(url).json
for episode in js:
title = episode['title'] if episode['title'] else ''
animeTitle, lang = get_lang(episode['animeTitle'])
quality = 'Full HD' if episode['fullHd'] else 'HD'
long_title = support.typo('{}. {}{}'.format(int(float(episode['episodeNumber'])), title + ' - ' if title else '', animeTitle), 'bold') + support.typo(lang, '_ [] color kod') + support.typo(quality, '_ [] color kod')
image = get_thumbnail(episode, 'episodeImages')
itemlist.append(item.clone(title=long_title,
fulltitle=title,
url='{}/play/{}'.format(host, episode['id']),
contentType = 'episode',
contentTitle = title,
contentSerieName = animeTitle,
contentLanguage = lang,
quality = quality,
contentEpisodeNumber = int(float(episode['episodeNumber'])),
video_url = '{}/api/episode/{}'.format(host, episode['id']),
thumbnail = image,
fanart = image,
action = 'findvideos'))
if stack()[1][3] not in ['newest']:
support.nextPage(itemlist, item.clone(page = page + 1))
return itemlist
def peliculas(item):
logger.debug()
itemlist = []
page = item.page if item.page else 0
js = httptools.downloadpage('{}?page={}&size={}{}&sort={},{}&sort=id'.format(item.url, page, perpage, item.variable, sort, order)).json
for it in js:
logger.debug(jsontools.dump(js))
title, lang = get_lang(it['title'])
long_title = support.typo(title, 'bold') + support.typo(lang, '_ [] color kod')
itemlist.append(item.clone(title = long_title,
fulltitle = title,
show = title,
contentLanguage = lang,
contentType = 'movie' if it['type'] == 'Movie' else 'tvshow',
contentTitle = title,
contentSerieName = title if it['type'] == 'Serie' else '',
action ='findvideos' if it['type'] == 'Movie' else 'episodios',
plot = it['storyline'],
url = '{}/anime/{}'.format(host, it['id']),
video_url = '{}/api/anime/{}'.format(host, it.get('animeId', it.get('id'))),
thumbnail = get_thumbnail(it),
fanart = get_thumbnail(it, 'horizontalImages')))
autorenumber.start(itemlist)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(itemlist) == perpage:
support.nextPage(itemlist, item.clone(page = page + 1))
return itemlist
def episodios(item):
logger.debug()
itemlist = []
if not item.video_url:
item.video_url = item.url.replace('/anime/', '/api/anime/')
# url = '{}/api/anime/{}'.format(host, item.id)
json = httptools.downloadpage(item.video_url, CF=False ).json
if type(json) == list:
item.show_renumber = False
itemlist = list_episodes(item, json)
elif json.get('seasons'):
seasons = json['seasons']
seasons.sort(key=lambda s: s['episodeStart'])
for it in seasons:
title = it['name']
itemlist.append(item.clone(title = title,
video_url = '{}/api/anime/{}/season/{}'.format(host, it['animeId'], it['id']),
contentType = 'season',
action = 'list_episodes',
plot = json['storyline'],
year = it['yearStart'],
show_renumber = True))
# If the call come from the videolibrary or autorenumber, shows the episodes
if stack()[1][3] in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']:
itlist = []
with futures.ThreadPoolExecutor() as executor:
eplist = []
for ep in itemlist:
ep.show_renumber = False
eplist.append(executor.submit(list_episodes, ep))
for res in futures.as_completed(eplist):
if res.result():
itlist.extend(res.result())
itemlist = itlist
elif json.get('episodes'):
itemlist = list_episodes(item, json)
# add renumber option
if stack()[1][3] not in ['find_episodes'] and itemlist and itemlist[0].contentType == 'episode':
autorenumber.start(itemlist, item)
# add add to videolibrary menu
if stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']:
support.videolibrary(itemlist, item)
return itemlist
def list_episodes(item, json=None):
itemlist = []
if not json:
json = httptools.downloadpage(item.video_url, CF=False ).json
episodes = json['episodes'] if 'episodes' in json else json
episodes.sort(key=lambda ep: int(ep['episodeNumber'].split('.')[0]))
for it in episodes:
quality = 'Full HD' if it['fullHd'] else 'HD'
if item.contentSeason:
episode = '{}x{:02d}'.format(item.contentSeason, int(it['episodeNumber'].split('.')[0]))
else:
episode = '{:02d}'.format(int(it['episodeNumber'].split('.')[0]))
title = support.typo('{}. {}'.format(episode, it['title']), 'bold')
image = get_thumbnail(it, 'episodeImages')
itemlist.append(item.clone(title = title,
url= '{}/play/{}'.format(host, it['id']),
video_url= '{}/api/episode/{}'.format(host, it['id']),
contentType = 'episode',
contentEpisodeNumber = int(it['episodeNumber'].split('.')[0]),
contentSeason = item.contentSeason if item.contentSeason else '',
action = 'findvideos',
quality = quality,
thumbnail = image,
fanart= image))
# Renumber episodes only if shown in the menu
if item.show_renumber:
autorenumber.start(itemlist, item)
return itemlist
def findvideos(item):
logger.debug()
res = httptools.downloadpage(item.video_url, CF=False ).json
if res.get('episodes', []):
res = httptools.downloadpage('{}/api/episode/{}'.format(host, res['episodes'][0]['id'])).json
item.url = res['videoUrl']
item.server = 'directo'
if '.m3u' in item.url:
item.manifest = 'hls'
return support.server(item, itemlist=[item])
def get_thumbnail(data, prop = 'verticalImages', key = 'full'):
"""
" Returns the vertical image as per given key and prop
" possibile key values are:
" - small
" - full
" - blurred
" - medium
" possibile prop values are:
" - verticalImages
" - animeHorizontalImages
" - animeVerticalImages
" - horizontalImages
" - episodeImages
"""
value = None
verticalImages = data.get(prop, [])
if verticalImages:
first = verticalImages[0]
if first:
value = first.get('image' + key.capitalize(), '')
return value
def get_lang(value):
title = value.split('(')[0] if value else ''
scrapedlang = scrapertools.find_single_match(value, r'\(([^\)]+)')
lang = scrapedlang.upper() if scrapedlang else 'Sub-ITA'
return title, lang

View File

@@ -1,22 +0,0 @@
{
"id": "bleachportal",
"name": "BleachPortal",
"language": ["ita"],
"active": true,
"adult": false,
"fanart": "http://i39.tinypic.com/35ibvcx.jpg",
"thumbnail": "http://www.bleachportal.it/images/index_r1_c1.jpg",
"banner": "http://cgi.di.uoa.gr/~std05181/images/bleach.jpg",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,116 +0,0 @@
# -*- coding: utf-8 -*-
# Ringraziamo Icarus crew
# ------------------------------------------------------------
# XBMC Plugin
# Canale per http://bleachportal.it
# ------------------------------------------------------------
import re
from core import scrapertools, httptools
from core.item import Item
from platformcode import logger
from platformcode import config
host = "http://www.bleachportal.it"
def mainlist(item):
logger.info("[BleachPortal.py]==> mainlist")
itemlist = [Item(channel=item.channel,
action="episodi",
title="[COLOR azure] Bleach [/COLOR] - [COLOR deepskyblue]Lista Episodi[/COLOR]",
url=host + "/streaming/bleach/stream_bleach.htm",
thumbnail="http://i45.tinypic.com/286xp3m.jpg",
fanart="http://i40.tinypic.com/5jsinb.jpg",
extra="bleach"),
Item(channel=item.channel,
action="episodi",
title="[COLOR azure] D.Gray Man [/COLOR] - [COLOR deepskyblue]Lista Episodi[/COLOR]",
url=host + "/streaming/d.gray-man/stream_dgray-man.htm",
thumbnail="http://i59.tinypic.com/9is3tf.jpg",
fanart="http://wallpapercraft.net/wp-content/uploads/2016/11/Cool-D-Gray-Man-Background.jpg",
extra="dgrayman")]
return itemlist
def episodi(item):
logger.info("[BleachPortal.py]==> episodi")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td>?[<span\s|<width="\d+%"\s]+?class="[^"]+">\D+([\d\-]+)\s?<[^<]+<[^<]+<[^<]+<[^<]+<.*?\s+?.*?<span style="[^"]+">([^<]+).*?\s?.*?<a href="\.*(/?[^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
animetitle = "Bleach" if item.extra == "bleach" else "D.Gray Man"
for scrapednumber, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.decode('latin1').encode('utf8')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title="[COLOR azure]%s Ep: [COLOR deepskyblue]%s[/COLOR][/COLOR]" % (animetitle, scrapednumber),
url=item.url.replace("stream_bleach.htm",scrapedurl) if "stream_bleach.htm" in item.url else item.url.replace("stream_dgray-man.htm", scrapedurl),
plot=scrapedtitle,
extra=item.extra,
thumbnail=item.thumbnail,
fanart=item.fanart,
fulltitle="[COLOR red]%s Ep: %s[/COLOR] | [COLOR deepskyblue]%s[/COLOR]" % (animetitle, scrapednumber, scrapedtitle)))
if item.extra == "bleach":
itemlist.append(
Item(channel=item.channel,
action="oav",
title="[B][COLOR azure] OAV e Movies [/COLOR][/B]",
url=item.url.replace("stream_bleach.htm", "stream_bleach_movie_oav.htm"),
extra=item.extra,
thumbnail=item.thumbnail,
fanart=item.fanart))
return list(reversed(itemlist))
def oav(item):
logger.info("[BleachPortal.py]==> oav")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td>?[<span\s|<width="\d+%"\s]+?class="[^"]+">-\s+(.*?)<[^<]+<[^<]+<[^<]+<[^<]+<.*?\s+?.*?<span style="[^"]+">([^<]+).*?\s?.*?<a href="\.*(/?[^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapednumber, scrapedtitle, scrapedurl in matches:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title="[COLOR deepskyblue] " + scrapednumber + " [/COLOR]",
url=item.url.replace("stream_bleach_movie_oav.htm", scrapedurl),
plot=scrapedtitle,
extra=item.extra,
thumbnail=item.thumbnail,
fulltitle="[COLOR red]" + scrapednumber + "[/COLOR] | [COLOR deepskyblue]" + scrapedtitle + "[/COLOR]"))
return list(reversed(itemlist))
def findvideos(item):
logger.info("[BleachPortal.py]==> findvideos")
itemlist = []
if "bleach//" in item.url:
item.url = re.sub(r'\w+//', "", item.url)
data = httptools.downloadpage(item.url).data
if "bleach" in item.extra:
video = scrapertools.find_single_match(data, 'file: "(.*?)",')
else:
video = scrapertools.find_single_match(data, 'file=(.*?)&').rsplit('/', 1)[-1]
itemlist.append(
Item(channel=item.channel,
action="play",
title="[[COLOR orange]Diretto[/COLOR]] [B]%s[/B]" % item.title,
url=item.url.replace(item.url.split("/")[-1], "/" + video),
thumbnail=item.thumbnail,
fulltitle=item.fulltitle))
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "bravoporn",
"name": "bravoporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.bravoporn.com/v/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,90 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'http://www.bravoporn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host +"/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/c/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/s/?q=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="th">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<span>([^"]+)</span>\s*(\d+) movies.*?</strong>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/"
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class=".*?video_block"><a href="([^"]+)".*?'
patron += '<img src="([^"]+)".*?alt="([^"]+)".*?'
patron += '<span class="time">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
thumbnail = "https:" + scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<source src="([^"]+)" type=\'video/mp4\' title="HQ" />'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl))
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "camwhoresbay",
"name": "camwhoresbay",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.camwhoresbay.com/images/porntrex.ico",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,115 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'https://www.camwhoresbay.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
item.url = "%s/search/%s/" % (host, texto.replace("+", "-"))
item.extra = texto
try:
return lista(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="thumb" src="([^"]+)".*?'
patron += '<div class="videos">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return sorted(itemlist, key=lambda i: i.title)
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="video-item ">.*?'
patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?'
patron += 'data-original="([^"]+)".*?'
patron += '<i class="fa fa-clock-o"></i>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = scrapedtitle, fanart=thumbnail))
if item.extra:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
if next_page:
if "from_videos=" in item.url:
next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
"&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page and not next_page.startswith("#"):
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "canalporno",
"name": "Canalporno",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://i.imgur.com/gAbPcvT.png?1",
"banner": "canalporno.png",
"categories": [
"adult"
]
}

View File

@@ -1,88 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
from platformcode import config
host = "http://www.canalporno.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="findvideos", title="Útimos videos", url=host))
itemlist.append(item.clone(action="categorias", title="Listado Categorias",
url=host + "/categorias"))
itemlist.append(item.clone(action="search", title="Buscar", url=host + "/search/?q=%s"))
return itemlist
def search(item, texto):
logger.info()
try:
item.url = item.url % texto
itemlist = findvideos(item)
return sorted(itemlist, key=lambda it: it.title)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<img src="([^"]+)".*?alt="([^"]+)".*?<h2><a href="([^"]+)">.*?' \
'<div class="duracion"><span class="ico-duracion sprite"></span> ([^"]+) min</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, title, url, time in matches:
scrapedtitle = time + " - " + title
scrapedurl = host + url
scrapedthumbnail = thumbnail
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail))
patron = '<div class="paginacion">.*?<span class="selected">.*?<a href="([^"]+)">([^"]+)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title in matches:
url = host + url
title = "Página %s" % title
itemlist.append(item.clone(action="findvideos", title=title, url=url))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="ordenar-por ordenar-por-categoria">'
'(.*?)<\/ul>')
#patron = '<div class="muestra-categorias">.*?<a class="thumb" href="([^"]+)".*?<img class="categorias" src="([^"]+)".*?<div class="nombre">([^"]+)</div>'
patron = "<li><a href='([^']+)'\s?title='([^']+)'>.*?<\/a><\/li>"
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title in matches:
url = host + url
#thumbnail = "http:" + thumbnail
itemlist.append(item.clone(action="findvideos", title=title, url=url))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
itemlist.append(item.clone(url=url, server="directo"))
return itemlist

View File

@@ -1,70 +1,11 @@
{
"id": "casacinema",
"name": "Casacinema",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/casacinema.png",
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/casacinema.png",
"categories": ["tvshow", "movie"],
"language": ["ita", "sub-ita"],
"active": false,
"thumbnail": "casacinema.png",
"banner": "casacinema.png",
"categories": ["tvshow", "movie","vos"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -1,340 +1,145 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Kodi on Demand - Kodi Addon
# Canale per casacinema
# Canale per 'casacinema'
# ------------------------------------------------------------
import re
import urlparse
from channelselector import thumb, get_thumb
from core import scrapertools, scrapertoolsV2, httptools, tmdb, support
from core.item import Item
from platformcode import logger, config
from specials import autoplay
__channel__ = "casacinema"
host = config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'wstream', 'speedvideo']
list_quality = ['HD', 'SD']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'casacinema')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'casacinema')
headers = [['Referer', '%s/genere/serie-tv' % host]]
from core import support
host = support.config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
logger.info("kod.casacinema mainlist")
film = ['/category/film',
('Generi', ['', 'genres', 'genres']),
]
autoplay.init(item.channel, list_servers, list_quality)
tvshow = ['/category/serie-tv',
('Novità', ['/aggiornamenti-serie-tv', 'peliculas', '']),
]
itemlist = [Item(channel=item.channel,
title="[B]Film[/B]",
action="peliculas",
extra="movie",
url="%s/genere/film" % host),
Item(channel=item.channel,
title="[B]Film - HD[/B]",
action="peliculas",
extra="movie",
url="%s/?s=[HD]" % host),
Item(channel=item.channel,
title="[B] > Categorie[/B]",
action="categorias",
extra="movie",
url="%s/genere/film" % host),
Item(channel=item.channel,
title="[B]Film Sub - Ita[/B]",
action="peliculas",
extra="movie",
url="%s/genere/sub-ita" % host),
Item(channel=item.channel,
title="[COLOR blue]Cerca Film...[/COLOR]",
action="search",
extra="movie",),
Item(channel=item.channel,
title="[B]Serie TV[/B]",
extra="tvshow",
action="peliculas_tv",
url="%s/genere/serie-tv" % host),
Item(channel=item.channel,
title="[COLOR blue]Cerca Serie TV...[/COLOR]",
action="search",
extra="tvshow")]
search = ''
autoplay.show_option(item.channel, itemlist)
return locals()
# auto thumb
itemlist=thumb(itemlist)
return itemlist
@support.scrape
def genres(item):
action = 'peliculas'
blacklist = ['PRIME VISIONI', 'ULTIME SERIE TV', 'ULTIMI FILM']
patronMenu = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></li>'
patronBlock = r'<div class="container home-cats">(?P<block>.*?)<div class="clear">'
return locals()
def check(item):
item.data = support.match(item).data
if 'episodi e stagioni' in item.data.lower():
support.info('select = ### è una serie ###')
item.contentType = 'tvshow'
return episodios(item)
else:
support.info('select = ### è un film ###')
item.contentType = 'movie'
return findvideos(item)
def search(item, text):
support.info(text)
text = text.replace(' ', '+')
item.url = host + '/?a=b&s=' + text
item.args = 'search'
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
support.info('search log:', line)
return []
def newest(categoria):
logger.info("[casacinema.py] newest" + categoria)
itemlist = []
item = Item()
item = support.Item()
item.args = 'newest'
try:
if categoria == "film":
item.url = host + '/genere/film'
item.extra = "movie"
item.action = "peliculas"
itemlist = peliculas(item)
if categoria == 'series':
item.contentType = 'tvshow'
item.url = host+'/aggiornamenti-serie-tv'
if itemlist[-1].action == "peliculas":
itemlist.pop()
else:
item.contentType = 'movie'
item.url = host+'/category/film'
# Continua la ricerca in caso di errore
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
support.info("%s" % line)
return []
return itemlist
def search(item, texto):
logger.info("[casacinema.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
if item.extra == "tvshow":
return peliculas_tv(item)
if item.extra == "movie":
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
@support.scrape
def peliculas(item):
logger.info("kod.casacinema peliculas")
if item.contentType == 'movie':
action = 'findvideos'
elif item.contentType == 'tvshow':
action = 'episodios'
pagination = ''
else:
action = 'check'
itemlist = []
if item.args == 'newest':
patron = r'<li><a href="(?P<url>[^"]+)"[^=]+="(?P<thumb>[^"]+)"><div>\s*?<div[^>]+>(?P<title>[^\(\[<]+)(?:\[(?P<quality1>HD)\])?[ ]?(?:\(|\[)?(?P<lang>[sS]ub-[iI][tT][aA])?(?:\)|\])?[ ]?(?:\[(?P<quality>.+?)\])?[ ]?(?:\((?P<year>\d+)\))?<(?:[^>]+>.+?(?:title="Nuovi episodi">(?P<episode>\d+x\d+)[ ]?(?P<lang2>Sub-Ita)?|title="IMDb">(?P<rating>[^<]+)))?'
else:
patron = r'<li><a href="(?P<url>[^"]+)"[^=]+="(?P<thumb>[^"]+)"><div>\s*?<div[^>]+>(?P<title>[^\(\[<]+)(?P<title2>\([\D*]+\))?(?:\[(?P<quality1>HD)\])?\s?(?:[\(\[])?(?P<lang>[sS]ub-[iI][tT][aA])?(?:[\)\]])?\s?(?:\[(?P<quality>.+?)\])?\s?(?:\((?P<year>\d+)\))?(?:\(\D{2}\s\d{4}\))?<'
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
logger.info('DATA=' +data)
# Estrae i contenuti
patron = '<li><a href="([^"]+)"[^=]+="([^"]+)"><div>\s*<div[^>]+>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
cleantitle = re.sub(r'[-]*\s*[Ii]l [Ff]ilm\s*[-]*?', '', title).strip()
cleantitle = cleantitle.replace('[HD]', '').strip()
year = scrapertools.find_single_match(title, r'\((\d{4})\)')
infolabels = {}
if year:
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
infolabels['year'] = year
scrapedplot = ""
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=title,
text_color="azure",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=cleantitle,
show=cleantitle,
plot=scrapedplot,
infoLabels=infolabels,
extra=item.extra,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
## Paginación
next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)".*?>Pagina')
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR blue]" + config.get_localized_string(30992) + " >[/COLOR]",
url=next_page,
extra=item.extra,
thumbnail=get_thumb('next.png')))
return itemlist
patronNext = r'<a href="([^"]+)"\s*>Pagina'
def peliculas_tv(item):
logger.info("kod.casacinema peliculas")
def itemHook(item):
if item.quality1:
item.quality = item.quality1
item.title += support.typo(item.quality, '_ [] color kod')
if item.lang2:
item.contentLanguage = item.lang2
item.title += support.typo(item.lang2, '_ [] color kod')
if item.args == 'novita':
item.title = item.title
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = '<li><a href="([^"]+)"[^=]+="([^"]+)"><div>\s*<div[^>]+>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
cleantitle = re.sub(r'[-]*\s*[Ss]erie [Tt]v\s*[-]*?', '', title).strip()
cleantitle = cleantitle.replace('[HD]', '').replace('[SD]', '').strip()
year = scrapertools.find_single_match(title, r'\((\d{4})\)')
infolabels = {}
if year:
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
infolabels['year'] = year
scrapedplot = ""
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tvshow",
title=title,
text_color="azure",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=cleantitle,
show=cleantitle,
plot=scrapedplot,
infoLabels=infolabels,
extra=item.extra,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
## Paginación
next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Pagina') ### <- Regex rimosso spazio - precedente <li><a href="([^"]+)" >Pagina
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas_tv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
extra=item.extra,
thumbnail=get_thumb('next.png')))
return itemlist
def categorias(item):
logger.info("kod.casacinema categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
# Narrow search by selecting only the combo
bloque = scrapertools.find_single_match(data, 'Categorie(.*?)</ul>')
# The categories are the options for the combo
patron = '<a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
extra=item.extra,
url=urlparse.urljoin(host, scrapedurl)))
return itemlist
return item
return locals()
@support.scrape
def episodios(item):
def load_episodios(html, item, itemlist, lang_title):
patron = '.*?<a href="[^"]+"[^o]+ofollow[^>]+>[^<]+</a><(?:b|/)[^>]+>'
matches = re.compile(patron).findall(html)
for data in matches:
# Estrae i contenuti
scrapedtitle = scrapertoolsV2.htmlclean(re.sub(r'(<a [^>]+>)*(<\/a>.*)*(Speedvideo)*', '', data)).strip()
if scrapedtitle != 'Categorie':
scrapedtitle = scrapedtitle.replace('&#215;', 'x')
scrapedtitle = scrapedtitle.replace('×', 'x')
scrapedtitle = scrapedtitle.replace(';', '')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
url=data,
thumbnail=item.thumbnail,
extra=item.extra,
fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
show=item.show))
logger.info("[casacinema.py] episodios")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.decodeHtmlentities(data)
data = scrapertools.find_single_match(data, '<p>(?:<strong>|)(.*?)<div id="disqus_thread">')
lang_titles = []
starts = []
patron = r"Stagione.*?(?:ITA|\d+)"
matches = re.compile(patron, re.IGNORECASE).finditer(data)
for match in matches:
season_title = match.group()
if season_title != '':
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
starts.append(match.end())
i = 1
len_lang_titles = len(lang_titles)
while i <= len_lang_titles:
inizio = starts[i - 1]
fine = starts[i] if i < len_lang_titles else -1
html = data[inizio:fine]
lang_title = lang_titles[i - 1]
load_episodios(html, item, itemlist, lang_title)
i += 1
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios" + "###" + item.extra,
show=item.show))
return itemlist
if item.data:
data = item.data
action = 'findvideos'
item.contentType = 'tvshow'
blacklist = ['']
patron = r'"season-no">(?P<season>\d+)x(?P<episode>\d+)(?:[^>]+>){5}\s*(?P<title>[^<]+)(?P<data>.*?)</table>'
patronBlock = r'<span>(?:.+?Stagione*.+?(?P<lang>[Ii][Tt][Aa]|[Ss][Uu][Bb][\-]?[iI][tT][aA]))?.*?</span>.*?class="content(?P<block>.*?)(?:"accordion-item|<script>)'
return locals()
def findvideos(item):
logger.info("kod.casacinema findvideos")
data = item.url if item.extra == "tvshow" else httptools.downloadpage(item.url, headers=headers).data
html = httptools.downloadpage(data).data
patron = '"http:\/\/shrink-service\.it\/[^\/]+\/[^\/]+\/([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(html)
for url in matches:
if url is not None:
data = data
else:
continue
return support.server(item, data=data)
if item.contentType != 'movie':
links = support.match(item.data, patron=r'href="([^"]+)"').matches
else:
matchData = item.data if item.data else support.match(item.url, headers=headers).data
links = support.match(matchData, patron=r'data-id="([^"]+)"').matches
return support.server(item, links)

View File

@@ -1,36 +0,0 @@
{
"id": "casacinemaInfo",
"name": "La casa del cinema",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "",
"banner": "",
"categories": ["movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,151 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per casacinema
# ------------------------------------------------------------
from core import scrapertoolsV2, httptools, servertools, tmdb, support
from core.item import Item
from platformcode import logger, config
from specials import autoplay
__channel__ = "casacinemainfo"
host = config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'wstream', 'speedvideo']
list_quality = ['1080p', '720', '480p', '360p']
checklinks = config.get_setting('checklinks', 'casacinema')
checklinks_number = config.get_setting('checklinks_number', 'casacinema')
def mainlist(item):
logger.info("alfa.casacinema mainlist")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
title="Film",
action="peliculas",
extra="movie",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="In sala",
action="peliculas",
extra="movie",
url="%s/category/in-sala/" % host,
thumbnail="http://jcrent.com/apple%20tv%20final/HD.png"),
Item(channel=item.channel,
title="Novità",
action="peliculas",
extra="movie",
url="%s/category/nuove-uscite/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="Sub - Ita",
action="peliculas",
extra="movie",
url="%s/category/sub-ita/" % host,
thumbnail="http://i.imgur.com/qUENzxl.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info("[casacinemaInfo.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
data = httptools.downloadpage(item.url).data
itemlist = []
patron = '<li class="col-md-12 itemlist">.*?<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)".*?Film dell\\\'anno: ([0-9]{4}).*?<p class="text-list">([^<>]+)</p>'
matches = scrapertoolsV2.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches:
title = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
cleantitle = title.replace('[Sub-ITA]', '').strip()
infoLabels = {"plot": scrapertoolsV2.decodeHtmlentities(scrapedplot), "year": scrapedyear}
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
infoLabels=infoLabels,
fulltitle=cleantitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas(item):
logger.info("[casacinemaInfo.py] peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '<div class="col-mt-5 postsh">[^<>]+<div class="poster-media-card">[^<>]+<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)"'
matches = scrapertoolsV2.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
cleantitle = title.replace('[Sub-ITA]', '').strip()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=cleantitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
## Paginación
next_page = scrapertoolsV2.find_single_match(data, '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"') ### <- Regex rimosso spazio - precedente <li><a href="([^"]+)" >Pagina -> Continua. riga 221
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
def findvideos(item):
logger.info("[casacinemaInfo.py] findvideos")
itemlist = support.hdpass_get_servers(item)
# Requerido para Filtrar enlaces
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,14 +0,0 @@
{
"id": "cat3plus",
"name": "Cat3plus",
"active": true,
"adult": true,
"language": [],
"thumbnail": "https://i.imgur.com/SJxXKa2.png",
"fanart": "https://i.imgur.com/ejCwTxT.jpg",
"banner": "https://i.imgur.com/bXUyk6m.png",
"categories": [
"movie",
"vo"
]
}

View File

@@ -1,129 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel SleazeMovies -*-
# -*- Created for Alfa-addon -*-
# -*- By Sculkurt -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://www.cat3plus.com/'
headers = [
['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
['Accept-Encoding', 'gzip, deflate'],
['Referer', host]
]
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True)))
itemlist.append(item.clone(title="Años", action="years", url=host, thumbnail=get_thumb('year', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True)))
return itemlist
def years(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = "<a dir='ltr' href='([^']+)'>([^<]+)</a>"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<h2 class='post-title entry-title'><a href='([^']+)'>([^(]+).*?\(([^)]+).*?"
patron += 'src="([^"]+).*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, year, img in matches:
itemlist.append(Item(channel = item.channel,
title = scrapedtitle,
url = scrapedurl,
action = "findvideos",
thumbnail = img,
contentTitle = scrapedtitle,
contentType = "movie",
infoLabels = {'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
# Extraer la marca de siguiente página
next_page = scrapertools.find_single_match(data, "<a class='blog-pager-older-link' href='([^']+)'")
if next_page != "":
itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
item.url = host + "/search?q=" + texto
item.extra = "busqueda"
try:
return list_all(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<h2>\s*<a href="([^"]+)" target="_blank">.*?</a></h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
data = httptools.downloadpage(url, headers={'Referer': item.url}).data
itemlist.extend(servertools.find_video_items(data=data))
for video in itemlist:
video.channel = item.channel
video.contentTitle = item.contentTitle
video.title = video.server.capitalize()
# Opción "Añadir esta pelicula a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel = item.channel,
title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url = item.url,
action = "add_pelicula_to_library",
extra = "findvideos",
contentTitle = item.contentTitle,
thumbnail = item.thumbnail
))
return itemlist

View File

@@ -1,36 +1,10 @@
{
"id": "cb01anime",
"name": "Cb01anime",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://i.imgur.com/bHoUMo2.png",
"banner": "http://i.imgur.com/bHoUMo2.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}
"id": "cb01anime",
"name": "Cb01anime",
"language": ["ita", "vos", "sub-ita"],
"active": false,
"thumbnail": "cb01anime.png",
"banner": "cb01anime.png",
"categories": ["anime"],
"settings": []
}

View File

@@ -1,276 +1,139 @@
# -*- coding: utf-8 -*-
# Ringraziamo Icarus crew
# ------------------------------------------------------------
# XBMC Plugin
# Canale per cineblog01 - anime
# ------------------------------------------------------------
import re
from core import httptools, scrapertools, servertools, tmdb
from core.item import Item
from platformcode import logger, config
from core import support
__channel__ = "cb01anime"
host = config.get_channel_url(__channel__)
#esclusione degli articoli 'di servizio'
blacklist = ['AVVISO IMPORTANTE CB01.ROCKS', 'Lista Alfabetica Completa Anime/Cartoon', 'CB01.UNO ▶ TROVA LINDIRIZZO UFFICIALE']
host = support.config.get_channel_url() + '/cb01-anime-cartoon'
# -----------------------------------------------------------------
Blacklist = ['AVVISO IMPORTANTE CB01.ROCKS', 'Lista Alfabetica Completa Anime/Cartoon', 'CB01.UNO ▶ TROVA LINDIRIZZO UFFICIALE','Lista Richieste Up &amp; Re-Up']
headers = [['Referer', host]]
@support.menu
def mainlist(item):
logger.info("[cb01anime.py] mainlist")
# Main options
itemlist = [Item(channel=item.channel,
action="list_titles",
title="[COLOR azure]Anime - Novita'[/COLOR]",
url=host + '/anime',
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
Item(channel=item.channel,
action="genere",
title="[COLOR azure]Anime - Per Genere[/COLOR]",
url=host + '/anime',
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/Genres.png"),
Item(channel=item.channel,
action="alfabetico",
title="[COLOR azure]Anime - Per Lettera A-Z[/COLOR]",
url=host + '/anime',
thumbnail="http://i.imgur.com/IjCmx5r.png"),
Item(channel=item.channel,
action="listacompleta",
title="[COLOR azure]Anime - Lista Completa[/COLOR]",
url="%s/anime/lista-completa-anime-cartoon/" % host,
thumbnail="http://i.imgur.com/IjCmx5r.png"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca Anime[/COLOR]",
extra="anime",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
anime = [('Genere',['','menu', '2']),
('Per Lettera',['','menu', '1']),
('Per Anno',['','menu', '3']),
('Ultimi Anime Aggiornati',['','peliculas', 'newest'])]
return locals()
# =================================================================
# -----------------------------------------------------------------
def genere(item):
logger.info("[cb01anime.py] genere")
return build_itemlist(item, '<select name="select2"(.*?)</select>', '<option value="([^"]+)">([^<]+)</option>',
"list_titles")
@support.scrape
def menu(item):
blacklist = ['Anime per Genere', 'Anime per Anno', 'Anime per Lettera']
patronBlock = r'<select name="select%s"(?P<block>.*?)</select>' % item.args
patronMenu = r'<option value="(?P<url>[^"]+)">(?P<title>[^<]+)</option>'
action = 'peliculas'
def itemHook(item):
item.url = item.url.replace('cb01-anime/','cb01-anime-cartoon/')
return item
return locals()
def alfabetico(item):
logger.info("[cb01anime.py] alfabetico")
return build_itemlist(item, '<option value=\'-1\'>Anime per Lettera</option>(.*?)</select>',
'<option value="([^"]+)">\(([^<]+)\)</option>', "list_titles")
def listacompleta(item):
logger.info("[cb01anime.py] listacompleta")
return build_itemlist(item,
'<a href="#char_5a" title="Go to the letter Z">Z</a></span></div>(.*?)</ul></div><div style="clear:both;"></div></div>',
'<li><a href="' + host + '/([^"]+)"><span class="head">([^<]+)</span></a></li>', "episodios")
def build_itemlist(item, re_bloque, re_patron, iaction):
itemlist = []
data = httptools.downloadpage(item.url).data
# Narrow search by selecting only the combo
bloque = scrapertools.find_single_match(data, re_bloque)
# The categories are the options for the combo
matches = re.compile(re_patron, re.DOTALL).findall(bloque)
scrapertools.printMatches(matches)
for url, titulo in matches:
itemlist.append(
Item(channel=item.channel,
action=iaction,
contentType="tvshow",
title=titulo,
fulltitle=titulo,
text_color="azure",
show=titulo,
url=host + url,
plot=""))
return itemlist
# =================================================================
# -----------------------------------------------------------------
def search(item, texto):
logger.info("[cb01anime.py] " + item.url + " search " + texto)
item.url = host + "/anime/?s=" + texto
return list_titles(item)
# =================================================================
# -----------------------------------------------------------------
def list_titles(item):
logger.info("[cb01anime.py] mainlist")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patronvideos = r'<div class="span4">\s*<a href="([^"]+)">'
patronvideos += r'<img src="([^"]+)"[^>]+><\/a>[^>]+>[^>]+>'
patronvideos += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(.*?)<\/a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapertools.htmlclean(scrapedtitle).strip()
if not scrapedtitle in blacklist:
if 'lista richieste' in scrapedtitle.lower(): continue
patron = r'(?:\[[Ff][Uu][Ll]{2}\s*[Ii][Tt][Aa]\]|\[[Ss][Uu][Bb]\s*[Ii][Tt][Aa]\])'
cleantitle = re.sub(patron, '', scrapedtitle).strip()
## ------------------------------------------------
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
## ------------------------------------------------
# Añade al listado de XBMC
itemlist.append(
Item(channel=item.channel,
action="listacompleta" if "Lista Alfabetica Completa Anime/Cartoon" in scrapedtitle else "episodios",
contentType="tvshow",
title=scrapedtitle,
fulltitle=cleantitle,
text_color="azure",
show=cleantitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
viewmode="movie_with_plot"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Put the next page mark
support.info(texto)
item.url = host + "/search/" + texto
try:
next_page = scrapertools.find_single_match(data, "<link rel='next' href='([^']+)'")
itemlist.append(
Item(channel=item.channel,
action="list_titles",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return peliculas(item)
except:
pass
import sys
for line in sys.exc_info():
support.info('search log:', line)
return []
def newest(categoria):
support.info(categoria)
itemlist = []
item = support.Item()
try:
if categoria == "anime":
item.url = host
item.args = 'newest'
itemlist = peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
return itemlist
@support.scrape
def peliculas(item):
# debug=True
blacklist = Blacklist
item.contentType = 'tvshow'
if item.args == 'newest':
patron = r'<div id="blockvids">\s*<ul>\s*<li>\s*<a href="(?P<url>[^"]+)"[^>]+><img[^>]+src="(?P<thumb>[^"]+)"[^>]*>(?:[^>]+>){4}(?P<title>[^\[]+)\[(?P<lang>[^\]]+)\]'
else:
patron = r'<div class="span4">\s*<a href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+><\/a>(?:[^>]+>){7}\s*<h1>(?P<title>[^<\[]+)(?:\[(?P<lang>[^\]]+)\])?</h1></a>.*?-->(?:.*?<br(?: /)?>)?\s*(?P<plot>[^<]+)'
patronNext = r'<link rel="next" href="([^"]+)"'
action = 'check'
return locals()
# =================================================================
def check(item):
# support.dbg()
item.url = support.match(item, patron=r'(?:<p>|/>)(.*?)(?:<br|</td>|</p>)', patronBlock=r'Streaming:(.*?)</tr>').matches
if 'Episodio' in str(item.url):
item.contentType = 'tvshow'
item.action ='episodios'
return episodios(item)
else:
item.contentType = 'movie'
item.action = 'findvideos'
return findvideos(item)
# -----------------------------------------------------------------
@support.scrape
def episodios(item):
logger.info("[cb01anime.py] episodios")
support.info('EPISODIOS ', item.data)
data = ''
matches = item.data
season = 1
s = 1
e = 0
sp = 0
itemlist = []
for match in item.url:
if 'stagione' in match.lower():
find_season = support.match(match, patron=r'Stagione\s*(\d+)').match
season = int(find_season) if find_season else season + 1 if 'prima' not in match.lower() else season
else:
try: title = support.match(match, patron=r'<a[^>]+>([^<]+)</a>').match
except: title = ''
if title:
if 'episodio' in title.lower():
ep = support.match(match, patron=r'Episodio ((?:\d+.\d|\d+|\D+))').match
check = ep.isdigit()
if check or '.' in ep:
if '.' in ep:
sp += 1
title = '0' + 'x' + str(sp).zfill(2) + ' - ' + title
else:
ep = int(ep)
if season > s and ep > 1:
s += 1
e = ep - 1
title = str(season) + 'x' + str(ep-e).zfill(2) + ' - ' + title
data += title + '|' + match + '\|'
else:
title += ' #movie'
data += title + '|' + match + '\|'
def itemHook(item):
if '#movie' in item.title:
item.contentType='movie'
item.title = item.title.replace(' #movie','')
return item
# Carica la pagina
data = httptools.downloadpage(item.url).data
# data = scrapertools.decodeHtmlentities(data)
patron = r'(?P<title>[^\|]+)\|(?P<url>[^\|]+)\|'
action = 'findvideos'
return locals()
patron1 = '(?:<p>|<td bgcolor="#ECEAE1">)<span class="txt_dow">(.*?)(?:</p>)?(?:\s*</span>)?\s*</td>'
patron2 = '<a.*?href="([^"]+)"[^>]*>([^<]+)</a>'
matches1 = re.compile(patron1, re.DOTALL).findall(data)
if len(matches1) > 0:
for match1 in re.split('<br />|<p>', matches1[0]):
if len(match1) > 0:
# Estrae i contenuti
titulo = None
scrapedurl = ''
matches2 = re.compile(patron2, re.DOTALL).finditer(match1)
for match2 in matches2:
if titulo is None:
titulo = match2.group(2)
scrapedurl += match2.group(1) + '#' + match2.group(2) + '|'
if titulo is not None:
title = item.title + " " + titulo
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title=title,
extra=scrapedurl,
fulltitle=item.fulltitle,
show=item.show))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
# =================================================================
# -----------------------------------------------------------------
def findvideos(item):
logger.info("[cb01anime.py] findvideos")
itemlist = []
for match in item.extra.split(r'|'):
match_split = match.split(r'#')
scrapedurl = match_split[0]
if len(scrapedurl) > 0:
scrapedtitle = match_split[1]
title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
itemlist.append(
Item(channel=item.channel,
action="play",
title=title,
url=scrapedurl,
fulltitle=item.fulltitle,
show=item.show,
ontentType=item.contentType,
folder=False))
return itemlist
# =================================================================
# -----------------------------------------------------------------
def play(item):
logger.info("[cb01anime.py] play")
if '/goto/' in item.url:
item.url = item.url.split('/goto/')[-1].decode('base64')
data = item.url
logger.debug("##### Play data ##\n%s\n##" % data)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.show
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist
return support.server(item, item.url)

View File

@@ -1,65 +1,10 @@
{
"id": "cineblog01",
"name": "CB01",
"language": ["ita"],
"language": ["ita", "sub-ita"],
"active": true,
"adult": false,
"thumbnail": "cb01.png",
"banner": "cb01.png",
"categories": ["tvshow", "movie", "vosi"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero di link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Italiano"
]
}
]
}
"categories": ["tvshow", "movie", "vos", "documentary"],
"settings": []
}

View File

@@ -2,98 +2,88 @@
# ------------------------------------------------------------
# Canale per cineblog01
# ------------------------------------------------------------
import re
from core import scrapertoolsV2, httptools, servertools, tmdb, support
from core.item import Item
from lib import unshortenit
from core import scrapertools, httptools, servertools, support
from platformcode import logger, config
from specials import autoplay
#impostati dinamicamente da getUrl()
host = ""
headers = ""
def findhost():
global host, headers
permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers
host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango', 'wstream']
list_quality = ['HD', 'SD', 'default']
checklinks = config.get_setting('checklinks', 'cineblog01')
checklinks_number = config.get_setting('checklinks_number', 'cineblog01')
# esclusione degli articoli 'di servizio'
blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO &#x25b6; TROVA L&#8217;INDIRIZZO UFFICIALE ',
'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬',
'Openload: la situazione. Benvenuto Verystream', 'Openload: lo volete ancora?']
def findhost(url):
host = httptools.downloadpage(url, follow_redirect=True).url
if host == 'https://cb01.uno/':
host = support.match(host, patron=r'<a href="([^"]+)').match
return host
host = config.get_channel_url(findhost)
headers = [['Referer', host]]
@support.menu
def mainlist(item):
findhost()
film = [
('HD', ['', 'menu', 'Film HD Streaming']),
('Genere', ['', 'menu', 'Film per Genere']),
('Anni', ['', 'menu', 'Film per Anno']),
('Popolari per Genere', ['', 'menu', 'Film Popolari']),
('Ultimi Aggiunti', ['/ultimi-100-film-aggiunti/', 'peliculas', 'newest']),
('Popolari', ['/category/film-popolari/']),
('Italiani', ['/category/nazione/italia/'])
# ('Film in Lista', ['/lista-film/', 'peliculas', 'newest'])
]
tvshow = ['/serietv/',
('Per Lettera', ['/serietv/', 'menu', 'Serie-TV x Lettera']),
('Per Genere', ['/serietv/', 'menu', 'Serie-TV x Genere']),
('Per anno', ['/serietv/', 'menu', 'Serie-TV x Anno']),
('Ultime Aggiunte', ['/serietv/ultime-100-serie-tv-aggiunte/', 'peliculas', 'newest'])
]
docu = [('Documentari {bullet bold}', ['/category/documentario/', 'peliculas']),
('HD {submenu} {documentari}', ['/category/hd-alta-definizione/documentario-hd/', 'peliculas'])
]
autoplay.init(item.channel, list_servers, list_quality)
# Main options
itemlist = []
support.menu(itemlist, 'Ultimi 100 Film Aggiornati bold', 'last', host + '/lista-film-ultimi-100-film-aggiornati/')
support.menu(itemlist, 'Film bold', 'peliculas', host)
support.menu(itemlist, 'HD submenu', 'menu', host, args="Film HD Streaming")
support.menu(itemlist, 'Per genere submenu', 'menu', host, args="Film per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host, args="Film per Anno")
support.menu(itemlist, 'Cerca film... submenu', 'search', host, args='film')
support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serietv/', contentType='tvshow')
support.menu(itemlist, 'Aggiornamenti serie tv', 'last', host + '/serietv/aggiornamento-quotidiano-serie-tv/', contentType='tvshow')
support.menu(itemlist, 'Per Lettera submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Lettera")
support.menu(itemlist, 'Per Genere submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Anno")
support.menu(itemlist, 'Cerca serie... submenu', 'search', host + '/serietv/', contentType='tvshow', args='serie')
autoplay.show_option(item.channel, itemlist)
return itemlist
return locals()
@support.scrape
def menu(item):
findhost()
itemlist= []
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', '', data)
block = scrapertoolsV2.find_single_match(data, item.args + r'<span.*?><\/span>.*?<ul.*?>(.*?)<\/ul>')
support.log('MENU BLOCK= ',block)
patron = r'href="?([^">]+)"?>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(
channel=item.channel,
title=scrapedtitle,
contentType=item.contentType,
action='peliculas',
url=host + scrapedurl
)
)
return support.thumb(itemlist)
# debug = True
patronBlock = item.args + r'<span.*?><\/span>.*?<ul.*?>(?P<block>.*?)<\/ul>'
patronMenu = r'href="?(?P<url>[^">]+)"?[^>]+>(?P<title>[^<»]+)'
action = 'peliculas'
return locals()
def newest(categoria):
support.info(categoria)
item = support.Item()
try:
if categoria == "series":
item.contentType = 'tvshow'
item.url = host + '/serietv/' # aggiornamento-quotidiano-serie-tv/'
else:
item.contentType = 'movie'
item.url = host + '/ultimi-100-film-aggiunti/'
item.args = "newest"
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def search(item, text):
support.log(item.url, "search" ,text)
logger.info("search", text)
if item.contentType == 'tvshow': item.url = host + '/serietv'
else: item.url = host
try:
item.url = item.url + "/?s=" + text
item.url = item.url + "/search/" + text.replace(' ', '+')
return peliculas(item)
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -101,179 +91,137 @@ def search(item, text):
return []
def newest(categoria):
findhost()
itemlist = []
item = Item()
item.contentType = 'movie'
item.url = host + '/lista-film-ultimi-100-film-aggiunti/'
return support.scrape(item, r'<a href=([^>]+)>([^<([]+)(?:\[([A-Z]+)\])?\s\(([0-9]{4})\)<\/a>',
['url', 'title', 'quality', 'year'],
patron_block=r'Ultimi 100 film aggiunti:.*?<\/td>')
def last(item):
support.log()
itemlist = []
infoLabels = {}
quality = ''
PERPAGE = 20
page = 1
if item.page:
page = item.page
if item.contentType == 'tvshow':
matches = support.match(item, r'<a href="([^">]+)".*?>([^(:(|[)]+)([^<]+)<\/a>', '<article class="sequex-post-content.*?</article>', headers)[0]
else:
matches = support.match(item, r'<a href=([^>]+)>([^(:(|[)]+)([^<]+)<\/a>', r'<strong>Ultimi 100 film Aggiornati:<\/a><\/strong>(.*?)<td>', headers)[0]
for i, (url, title, info) in enumerate(matches):
if (page - 1) * PERPAGE > i: continue
if i >= page * PERPAGE: break
add = True
title = title.rstrip()
if item.contentType == 'tvshow':
for i in itemlist:
if i.url == url: # togliamo i doppi
add = False
else:
infoLabels['year'] = scrapertoolsV2.find_single_match(info, r'\(([0-9]+)\)')
quality = scrapertoolsV2.find_single_match(info, r'\[([A-Z]+)\]')
if quality:
longtitle = title + support.typo(quality,'_ [] color kod')
else:
longtitle = title
if add:
itemlist.append(
Item(channel=item.channel,
action='findvideos' if item.contentType == 'movie' else 'episodios',
contentType=item.contentType,
title=longtitle,
fulltitle=title,
show=title,
quality=quality,
url=url,
infoLabels=infoLabels
)
)
support.pagination(itemlist, item, page, PERPAGE)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@support.scrape
def peliculas(item):
support.log()
if item.contentType == 'movie' or '/serietv/' not in item.url:
patron = r'<div class="?card-image"?>.*?<img src="?([^" ]+)"? alt.*?<a href="?([^" >]+)(?:\/|")>([^<[(]+)(?:\[([A-Za-z0-9/-]+)])? (?:\(([0-9]{4})\))?.*?<strong>([^<>&]+).*?DURATA ([0-9]+).*?<br(?: /)?>([^<>]+)'
listGroups = ['thumb', 'url', 'title', 'quality', 'year', 'genre', 'duration', 'plot']
#debug = True
# esclusione degli articoli 'di servizio'
# curYear = datetime.date.today().year
# blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO &#x25b6; TROVA L&#8217;INDIRIZZO UFFICIALE ',
# 'Aggiornamento Quotidiano Serie TV', 'AVVISO!!!',
# 'Openload: la situazione. Benvenuto Verystream', 'Openload: lo volete ancora?',
# 'OSCAR ' + str(curYear) + ' &#x25b6; VOTA IL TUO FILM PREFERITO! &#x1f3ac;',
# 'Auguri di Buon Natale e Felice Anno Nuovo! &#8211; ' + str(curYear) + '!']
if 'newest' in item.args:
pagination = ''
patronBlock = r'sequex-page-left(?P<block>.*?)sequex-page-right'
if '/serietv/' not in item.url:
patron = r'src="?(?P<thumb>[^ "]+)"? alt="?(?P<title>.*?)(?:\[(?P<quality>[a-zA-Z]+(?:[/]?3D)?)\]\s*)?(?:\[(?P<lang>Sub-ITA|ITA)\]\s*)?(?:\[(?P<quality2>[a-zA-Z]+(?:[/]?3D)?)\]\s*)?\((?P<year>\d{4})[^\)]*\)[^>]*>.*?<a href=(?:")?(?P<url>[^" ]+)(?:")?.*?rpwe-summary[^>]*>(?P<genre>\w+) [^ ]+ DURATA (?P<duration>[0-9]+)[^ ]+ [^ ]+ [A-Z ]+ (?P<plot>[^<]+)<'
action = 'findvideos'
else:
patron = r'src=(?:")?(?P<thumb>[^ "]+)(?:")? alt=(?:")?(?P<title>.*?)(?: &#8211; \d+&#215;\d+)?(?:>|"| &#8211; )(?:(?P<lang>Sub-ITA|ITA))?[^>]*>.*?<a href=(?:")?(?P<url>[^" ]+)(?:")?.*?rpwe-summary[^>]*>(?P<genre>[^\(]*)\((?P<year>\d{4})[^\)]*\) (?P<plot>[^<]+)<'
action = 'episodios'
elif '/serietv/' not in item.url:
patron = r'(?<!sticky )hentry.*?<div class="card-image">\s*<a[^>]+>\s*<img src="(?P<thumb>[^" ]+)" alt[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="?(?P<url>[^" >]+)(?:\/|"|\s+)>(?P<title>[^<[(]+)(?:\[(?P<quality>[a-zA-Z]+(?:[/]?3D)?)\]\s*)?(?:\[(?P<lang>Sub-ITA|ITA)\]\s*)?(?:\[(?P<quality2>[a-zA-Z/]+)\]\s*)? (?:\((?P<year>[0-9]{4})\))?[^>]+>[^>]+>[^>]+>[^>]+>(?P<genre>[^<>&‖“]+)(?:[^ ]+\s*DURATA\s*(?P<duration>[0-9]+)[^>]+>[^>]+>[^>]+>(?P<plot>[^<>]+))?'
action = 'findvideos'
else:
patron = r'div class="card-image">.*?<img src="([^ ]+)" alt.*?<a href="([^ >]+)">([^<[(]+)<\/a>.*?<strong><span style="[^"]+">([^<>0-9(]+)\(([0-9]{4}).*?</(?:p|div)>(.*?)</div'
listGroups = ['thumb', 'url', 'title', 'genre', 'year', 'plot']
patron = r'(?<!sticky )hentry.*?card-image[^>]*>\s*<a href=(?:")?(?P<url>[^" >]+)(?:")?\s*>\s*<img src=(?:")?(?P<thumb>[^" ]+)(?:")? alt="(?P<title>.*?)(?: &#8211; \d+&#215;\d+)?(?:"| &#8211; )(?:(?P<lang>Sub-ITA|ITA))?[^>]*>[^>]+>[^>]+>[^>]*>[^>]+>[^>]+>[^>]*>[^>]+>[^>]+>[^>]*>[^>]+>[^>]+>[^>]*>(?P<genre>[^\(]+)\((?P<year>\d{4})[^>]*>[^>]+>[^>]+>[^>]+>(?:<p>)?(?P<plot>[^<]+)'
action = 'episodios'
item.contentType = 'tvshow'
return support.scrape(item, patron_block=[r'<div class="?sequex-page-left"?>(.*?)<aside class="?sequex-page-right"?>',
'<div class="?card-image"?>.*?(?=<div class="?card-image"?>|<div class="?rating"?>)'],
patron=patron, listGroups=listGroups,
patronNext='<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">', blacklist=blacklist, action=action)
patronNext = '<a class="?page-link"? href="?([^>"]+)"?><i class="fa fa-angle-right">'
def itemHook(item):
if item.quality2:
item.quality = item.quality2
item.title += support.typo(item.quality2, '_ [] color kod')
return item
return locals()
@support.scrape
def episodios(item):
itemlist = []
@support.scrape
def folder(item, url):
"""
Quando c'è un link ad una cartella contenente più stagioni
"""
if url:
data = support.match(url).data
actLike = 'episodios'
addVideolibrary = False
downloadEnabled = False
data = httptools.downloadpage(item.url).data
matches = scrapertoolsV2.find_multiple_matches(data,
r'(<div class="sp-head[a-z ]*?" title="Espandi">[^<>]*?</div>.*?)<div class="spdiv">\[riduci\]</div>')
patron = r'<tr><td>(?P<title>[^<]+)<td><span [^>].+?><a [^>]+href="(?P<url>[^"]+)[^>]+>'
sceneTitle = True
# debug = True
for match in matches:
support.log(match)
blocks = scrapertoolsV2.find_multiple_matches(match, '(?:<p>)(.*?)(?:</p>|<br)')
season = scrapertoolsV2.find_single_match(match, r'title="Espandi">.*?STAGIONE\s+\d+([^<>]+)').strip()
def itemHook(item):
item.serieFolder = True
return item
return locals()
for block in blocks:
episode = scrapertoolsV2.find_single_match(block, r'([0-9]+(?:&#215;|×)[0-9]+)').strip()
seasons_n = scrapertoolsV2.find_single_match(block, r'<strong>STAGIONE\s+\d+([^<>]+)').strip()
# debugBlock=True
data = support.match(item.url, headers=headers).data
folderItemlist = folder(item, scrapertools.find_single_match(data, r'TUTT[EA] L[EA] \w+\s+(?:&#8211;|-)\s+<a href="?([^" ]+)'))
if seasons_n:
season = seasons_n
patronBlock = r'(?P<block>sp-head[^>]+>\s*(?:STAGION[EI]\s*(?:(?:DA)?\s*[0-9]+\s*A)?\s*[0-9]+|MINISSERIE)(?::\s*PARTE\s*[0-9]+)? - (?P<lang>[^-<]+)(?:- (?P<quality>[^-<]+))?.*?<\/div>.*?)spdiv[^>]*>'
patron = r'(?:/>|<p>|<strong>)(?P<other>.*?(?P<episode>[0-9]+(?:&#215;|×)[0-9]+)\s*(?P<title2>.*?)?(?:\s*&#8211;|\s*-|\s*<).*?)(?:<\/p>|<br)'
def itemlistHook(itemlist):
title_dict = {}
itlist = []
for i in itemlist:
i.url = item.url
i.title = re.sub(r'\.(\D)',' \\1', i.title)
match = support.match(i.title, patron=r'(\d+.\d+)').match.replace('x','')
i.order = match
if match not in title_dict:
title_dict[match] = i
elif match in title_dict and i.contentLanguage == title_dict[match].contentLanguage \
or i.contentLanguage == 'ITA' and not title_dict[match].contentLanguage \
or title_dict[match].contentLanguage == 'ITA' and not i.contentLanguage:
title_dict[match].url = i.url
else:
title_dict[match + '1'] = i
if not episode: continue
for key, value in title_dict.items():
itlist.append(value)
season = re.sub(r'&#8211;|', "-", season)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title="[B]" + episode + "[/B] " + season,
fulltitle=episode + " " + season,
show=episode + " " + season,
url=block,
extra=item.extra,
thumbnail=item.thumbnail,
infoLabels=item.infoLabels
))
itlist = sorted(itlist, key=lambda it: (it.contentLanguage, int(it.order)))
support.videolibrary(itemlist, item)
itlist.extend(folderItemlist)
return itemlist
return itlist
return locals()
def findvideos(item):
findhost()
if item.serieFolder:
return support.server(item, data=item.url)
if item.contentType == "episode":
return findvid_serie(item)
def load_links(itemlist, re_txt, color, desc_txt, quality=""):
streaming = scrapertoolsV2.find_single_match(data, re_txt).replace('"', '')
support.log('STREAMING=', streaming)
patron = '<td><a.*?href=(.*?) (?:target|rel)[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(streaming)
def load_links(itemlist, re_txt, desc_txt, quality=""):
streaming = scrapertools.find_single_match(data, re_txt).replace('"', '')
logger.debug('STREAMING=', streaming)
matches = support.match(streaming, patron = r'<td><a.*?href=([^ ]+) [^>]+>([^<]+)<').matches
for scrapedurl, scrapedtitle in matches:
logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle))
itemlist.append(
Item(channel=item.channel,
action="play",
title=scrapedtitle,
url=scrapedurl,
server=scrapedtitle,
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=quality,
contentType=item.contentType,
folder=False))
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, server=scrapedtitle, quality=quality))
support.log()
logger.debug()
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
data = re.sub('\n|\t','',data)
# Extract the quality format
patronvideos = '>([^<]+)</strong></div>'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
QualityStr = ""
for match in matches:
QualityStr = scrapertoolsV2.decodeHtmlentities(match.group(1))[6:]
data = re.sub('\n|\t', '', data)
# Estrae i contenuti - Streaming
load_links(itemlist, '<strong>Streaming:</strong>(.*?)<tableclass=cbtable height=30>', "orange", "Streaming", "SD")
load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable', "Streaming", "SD")
# Estrae i contenuti - Streaming HD
load_links(itemlist, '<strong>Streaming HD[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "yellow", "Streaming HD", "HD")
load_links(itemlist, '<strong>Streamin?g HD[^<]+</strong>(.*?)cbtable', "Streaming HD", "HD")
# Estrae i contenuti - Streaming 3D
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "pink", "Streaming 3D")
load_links(itemlist, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable', "Streaming 3D")
return support.server(item, itemlist=itemlist)
# Extract the quality format
patronvideos = r'([\w.]+)</strong></div></td>'
return support.server(item, itemlist=itemlist, patronTag=patronvideos)
# Estrae i contenuti - Download
# load_links(itemlist, '<strong>Download:</strong>(.*?)<tableclass=cbtable height=30>', "aqua", "Download")
@@ -283,94 +231,12 @@ def findvideos(item):
def findvid_serie(item):
def load_vid_series(html, item, itemlist, blktxt):
logger.info('HTML' + html)
patron = '<a href="([^"]+)"[^=]+="_blank"[^>]+>(.*?)</a>'
# Estrae i contenuti
matches = re.compile(patron, re.DOTALL).finditer(html)
for match in matches:
scrapedurl = match.group(1)
scrapedtitle = match.group(2)
# title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
itemlist.append(
Item(channel=item.channel,
action="play",
title=scrapedtitle,
url=scrapedurl,
server=scrapedtitle,
fulltitle=item.fulltitle,
show=item.show,
contentType=item.contentType,
folder=False))
logger.debug()
data = re.sub(r'((?:<p>|<strong>)?[^\d]*\d*(?:&#215;|×)[0-9]+[^<]+)', '', item.other)
support.log()
itemlist = []
lnkblk = []
lnkblkp = []
data = item.url
# First blocks of links
if data[0:data.find('<a')].find(':') > 0:
lnkblk.append(data[data.find(' - ') + 3:data[0:data.find('<a')].find(':') + 1])
lnkblkp.append(data.find(' - ') + 3)
else:
lnkblk.append(' ')
lnkblkp.append(data.find('<a'))
# Find new blocks of links
patron = r'<a\s[^>]+>[^<]+</a>([^<]+)'
matches = re.compile(patron, re.DOTALL).finditer(data)
for match in matches:
sep = match.group(1)
if sep != ' - ':
lnkblk.append(sep)
i = 0
if len(lnkblk) > 1:
for lb in lnkblk[1:]:
lnkblkp.append(data.find(lb, lnkblkp[i] + len(lnkblk[i])))
i = i + 1
for i in range(0, len(lnkblk)):
if i == len(lnkblk) - 1:
load_vid_series(data[lnkblkp[i]:], item, itemlist, lnkblk[i])
else:
load_vid_series(data[lnkblkp[i]:lnkblkp[i + 1]], item, itemlist, lnkblk[i])
return support.server(item, itemlist=itemlist)
return support.server(item, data=data)
def play(item):
support.log()
itemlist = []
### Handling new cb01 wrapper
if host[9:] + "/film/" in item.url:
iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
support.log("/film/ wrapper: ", iurl)
if iurl:
item.url = iurl
if '/goto/' in item.url:
item.url = item.url.split('/goto/')[-1].decode('base64')
item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw')
logger.debug("##############################################################")
if "go.php" in item.url:
data = httptools.downloadpage(item.url).data
if "window.location.href" in data:
try:
data = scrapertoolsV2.find_single_match(data, 'window.location.href = "([^"]+)";')
except IndexError:
data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
data, c = unshortenit.unwrap_30x_only(data)
else:
data = scrapertoolsV2.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
logger.debug("##### play go.php data ##\n%s\n##" % data)
else:
data = support.swzz_get_url(item)
return servertools.find_video_items(data=data)
logger.debug()
return servertools.find_video_items(item, data=item.url)

View File

@@ -2,85 +2,10 @@
"id": "cinemalibero",
"name": "Cinemalibero",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
"banner": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
"categories": ["tvshow", "movie","anime"],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "https://www.cinemalibero.fun/",
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
"active": false,
"thumbnail": "cinemalibero.png",
"banner": "cinemalibero.png",
"categories": ["movie","tvshow","anime"],
"not_active": ["include_in_newest_anime", "include_in_newest_peliculas"],
"settings": []
}

View File

@@ -1,61 +1,192 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per CinemaLibero - First Version
# Canale per 'cinemaLibero'
# ------------------------------------------------------------
import re
from core import scrapertools, servertools, httptools, support
from core import tmdb
from core import httptools, support, scrapertools
from core.item import Item
from lib import unshortenit
from platformcode import config
from platformcode import logger
from specials import autoplay
import channelselector
from core.support import typo
from platformcode import config, logger
import sys
# Necessario per Autoplay
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['akstream', 'wstream', 'openload', 'streamango']
list_quality = ['default']
from platformcode.logger import debug
if sys.version_info[0] >= 3:
from concurrent import futures
else:
from concurrent_py2 import futures
# Necessario per Verifica Link
checklinks = config.get_setting('checklinks', 'cinemalibero')
checklinks_number = config.get_setting('checklinks_number', 'cinemalibero')
__channel__ = "cinemalibero"
host = config.get_channel_url(__channel__)
# rimanda a .today che contiene tutti link a .plus
# def findhost(url):
# permUrl = httptools.downloadpage('https://www.cinemalibero.online/', follow_redirects=False).headers
# try:
# import urlparse
# except:
# import urllib.parse as urlparse
# p = list(urlparse.urlparse(permUrl['location'].replace('https://www.google.com/search?q=site:', '')))
# if not p[0]:
# p[0] = 'https'
# return urlparse.urlunparse(p)
host = config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
logger.info('[cinemalibero.py] mainlist')
autoplay.init(item.channel, list_servers, list_quality) # Necessario per Autoplay
# Menu Principale
itemlist = []
support.menu(itemlist, 'Film bold', 'video', host+'/category/film/')
support.menu(itemlist, 'Generi submenu', 'genres', host)
support.menu(itemlist, 'Cerca film submenu', 'search', host)
support.menu(itemlist, 'Serie TV bold', 'video', host+'/category/serie-tv/', contentType='episode')
support.menu(itemlist, 'Anime submenu', 'video', host+'/category/anime-giapponesi/', contentType='episode')
support.menu(itemlist, 'Cerca serie submenu', 'search', host, contentType='episode')
support.menu(itemlist, 'Sport bold', 'video', host+'/category/sport/')
film = ['/category/film/',
('Novità', ['', 'peliculas', 'update']),
('Generi', ['', 'genres'])]
autoplay.show_option(item.channel, itemlist) # Necessario per Autoplay (Menu Configurazione)
tvshow = ['/category/serie-tv/']
support.channel_config(item, itemlist)
return itemlist
anime = ['/category/anime-giapponesi/']
## Sport = [(support.typo('Sport', 'bullet bold'), ['/category/sport/', 'peliculas', 'sport', 'tvshow'])]
news = [('Ultimi episodi Serie/Anime', ['/aggiornamenti-serie-tv/', 'peliculas', 'update', 'tvshow'])]
search = ''
return locals()
@support.scrape
def peliculas(item):
# debug = True
action = 'check'
patronBlock = r'<div class="container">.*?class="col-md-12[^"]*?">(?P<block>.*?)<div class=(?:"container"|"bg-dark ")>'
if item.args == 'newest':
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>(?P<title>[^<]+)<[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>.+?(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
pagination = 25
elif item.contentType == 'movie':
# action = 'findvideos'
patron = r'<a href="(?P<url>[^"]+)" title="(?P<title>.+?)(?:[ ]\[(?P<lang>[sSuUbB\-iItTaA]+)\])?(?:[ ]\((?P<year>\d{4})?\))?"\s*alt="[^"]+"\s*class="[^"]+"(?: style="background-image: url\((?P<thumb>.+?)\)">)?\s*<div class="voto">[^>]+>[^>]+>.(?P<rating>[\d\.a-zA-Z\/]+)?[^>]+>[^>]+>[^>]+>(?:<div class="genere">(?P<quality>[^<]+)</div>)?'
if item.args == 'update':
patronBlock = r'<section id="slider">(?P<block>.*?)</section>'
patron = r'<a href="(?P<url>(?:https:\/\/.+?\/(?P<title>[^\/]+[a-zA-Z0-9\-]+)(?P<year>\d{4})?))/".+?url\((?P<thumb>[^\)]+)\)">'
elif item.contentType == 'tvshow':
# action = 'episodios'
if item.args == 'update':
patron = r'<a href="(?P<url>[^"]+)"[^<]+?url\((?P<thumb>.+?)\)">\s*?<div class="titolo">(?P<title>.+?)(?: &#8211; Serie TV)?(?:\([sSuUbBiItTaA\-]+\))?[ ]?(?P<year>\d{4})?</div>\s*?(?:<div class="genere">)?(?:[\w]+?\.?\s?[\s|S]?[\dx\-S]+?\s\(?(?P<lang>[iItTaA]+|[sSuUbBiItTaA\-]+)\)?\s?(?P<quality>[HD]+)?|.+?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?</div>)'
pagination = 25
else:
patron = r'<a href="(?P<url>[^"]+)"\s*title="(?P<title>[^"\(]+)(?:"|\()(?:(?P<year>\d+)[^"]+)?.*?url\((?P<thumb>[^\)]+)\)(?:.*?<div class="voto">[^>]+>[^>]+>\s*(?P<rating>[^<]+))?.*?<div class="titolo">[^>]+>(?:<div class="genere">[^ ]*(?:\s\d+)?\s*(?:\()?(?P<lang>[^\)< ]+))?'
else:
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>.+?)(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
def itemHook(item):
if 'sub' in item.contentLanguage.lower() and not 'ita' in item.contentLanguage.lower():
item.contentLanguage= 'Sub-ITA'
item.title = re.sub('[Ss]ub(?:-)?', item.contentLanguage, item.title)
if item.lang2:
if len(item.lang2)<3:
item.lang2 = 'ITA'
item.contentLanguage = item.lang2
item.title += support.typo(item.lang2, '_ [] color kod')
if item.args == 'update':
item.title = item.title.replace('-', ' ')
# if item.args == 'search':
# item.contentType = 'tvshow' if 'serie-' in item.url else 'movie'
return item
patronNext = r'<a class="next page-numbers".*?href="([^"]+)">'
return locals()
@support.scrape
def episodios(item):
data = item.data
# debug=True
if item.args == 'anime':
logger.debug("Anime :", item)
patron = r'<a target=(?P<url>[^>]+>(?P<title>Episodio\s(?P<episode>\d+))(?::)?(?:(?P<title2>[^<]+))?.*?(?:<br|</p))|(?P<data>.+)'
patronBlock = r'(?:Stagione (?P<season>\d+))?(?:</span><br />|</span></p>|strong></p>)(?P<block>.*?)(?:<div style="margin-left|<span class="txt_dow">)'
item.contentType = 'tvshow'
elif item.args == 'sport':
logger.debug("Sport :", item)
patron = r'(?:/>|<p>)\s*(?P<title>[^-]+)-(?P<data>.+?)(?:<br|</p)'
patronBlock = r'</strong>\s*</p>(?P<block>.*?</p>)'
item.contentType = 'tvshow'
elif item.args == 'serie' or item.contentType == 'tvshow':
logger.debug("Serie :", item)
patron = r'(?:/>|<p>)\s*(?:(?P<episode>\d+(?:x|×|&#215;)\d+|Puntata \d+)(?:-(?P<episode2>\d+))?[;]?[ ]?(?P<title>[^<-]+))?(?P<data>.*?)(?:<br|</p)'
patronBlock = r'Stagione\s(?:[Uu]nica)?(?:(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?.*?</strong>(?P<block>.+?)(?:strong>|<div class="at-below)'
item.contentType = 'tvshow'
else:
patron = r'(?P<title>\s*[0-9]{2}/[0-9]{2}/[0-9]{4})(?P<data>.*?)(?:<br|</p)'
def itemHook(it):
if not scrapertools.find_single_match(it.title, r'(\d+x\d+)'):
it.title = re.sub(r'(\d+) -', '1x\\1', it.title)
return it
def itemlistHook(itl):
ret = []
if item.args == 'sport':
return itl
# support.dbg()
for it in itl:
ep = scrapertools.find_single_match(it.title, r'(\d+x\d+)')
if not ep and 'http' in it.data: # stagione intera
# from lib import unshortenit
# data = unshortenit.findlinks(it.data)
episodes = {}
def get_ep(s):
srv_mod = __import__('servers.%s' % s.server, None, None, ["servers.%s" % s.server])
if hasattr(srv_mod, 'get_filename'):
title = srv_mod.get_filename(s.url)
if item.args == 'anime':
ep = title
else:
ep = scrapertools.get_season_and_episode(title)
if ep:
if ep not in episodes:
episodes[ep] = []
episodes[ep].append(s)
servers = support.server(item, it.data, CheckLinks=False, Download=False, Videolibrary=False)
# for s in servers:
# get_ep(s)
# ottengo l'episodio dal nome del file
with futures.ThreadPoolExecutor() as executor:
for s in servers:
executor.submit(get_ep, s)
# logger.debug(it.contentLanguage)
if item.args != 'anime':
for ep in episodes:
ret.append(it.clone(title=typo(ep, 'bold') + typo(it.contentLanguage, '_ [] color kod bold'),
servers=[srv.tourl() for srv in episodes[ep]], contentSeason=int(ep.split('x')[0]), contentEpisodeNumber=int(ep.split('x')[1])))
else:
ret.extend([it.clone(title=typo(ep, 'bold') + typo(it.contentLanguage, '_ [] color kod bold'),
servers=[srv.tourl() for srv in episodes[ep]]) for ep in episodes])
elif ep:
ret.append(it)
return sorted(ret, key=lambda i: i.title)
return locals()
@support.scrape
def genres(item):
action='peliculas'
patron_block=r'<div id="bordobar" class="dropdown-menu(?P<block>.*?)</li>'
patronMenu=r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"'
return locals()
def search(item, texto):
logger.info("[cinemalibero.py] " + item.url + " search " + texto)
logger.debug(item.url,texto)
texto = texto.replace(' ', '+')
item.url = host + "/?s=" + texto
# item.contentType = 'tv'
item.args = 'search'
try:
return video(item)
# Continua la ricerca in caso di errore
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -63,264 +194,99 @@ def search(item, texto):
return []
def genres(item):
return support.scrape(item, patron_block=r'<div id="bordobar" class="dropdown-menu(.*?)</li>', patron=r'<a class="dropdown-item" href="([^"]+)" title="([A-z]+)"', listGroups=['url', 'title'], action='video')
def video(item):
logger.info('[cinemalibero.py] video')
def newest(categoria):
logger.debug('newest ->', categoria)
itemlist = []
item = Item()
item.args = 'newest'
try:
if categoria == 'series' or categoria == 'anime':
item.args = 'update'
item.url = host+'/aggiornamenti-serie-tv/'
item.contentType = 'tvshow'
item.action = 'peliculas'
itemlist = peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error('newest log: ', (line))
return []
if host not in item.url:
item.url = host + item.url
# Carica la pagina
data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')
block = scrapertools.find_single_match(data, '<div class="container">.*?class="col-md-12[^"]*?">(.*?)<div class=(?:"container"|"bg-dark ")>')
# Estrae i contenuti
matches = re.compile(r'<div class="col-lg-3">(.*?)<\/a><\/div>', re.DOTALL).findall(block)
for match in matches:
url = scrapertools.find_single_match(match, r'href="([^"]+)"')
long_title = scrapertools.find_single_match(match, r'<div class="titolo">([^<]+)<\/div>')
thumb = scrapertools.find_single_match(match, r'url=\((.*?)\)')
quality = scrapertools.find_single_match(match, r'<div class="voto">([^<]+)<\/div>')
genere = scrapertools.find_single_match(match, r'<div class="genere">([^<]+)<\/div>')
year = scrapertools.find_single_match(long_title, r'\(([0-9)]+)') or scrapertools.find_single_match(long_title, r'\) ([0-9)]+)')
lang = scrapertools.find_single_match(long_title, r'\(([a-zA-Z)]+)')
title = re.sub(r'\(.*','',long_title)
title = re.sub(r'(?:\(|\))','',title)
if genere:
genere = ' - [' + genere + ']'
if year:
long_title = title + ' - ('+ year + ')' + genere
if lang:
long_title = '[B]' + title + '[/B]' + ' - ('+ lang + ')' + genere
else:
long_title = '[B]' + title + '[/B]'
# Seleziona fra Serie TV e Film
if item.contentType == 'movie':
tipologia = 'movie'
action = 'findvideos'
elif item.contentType == 'episode':
tipologia = 'tv'
action = 'episodios'
else:
tipologia = 'movie'
action = 'select'
itemlist.append(
Item(channel=item.channel,
action=action,
contentType=item.contentType,
title=long_title,
fulltitle=title,
quality=quality,
url=url,
thumbnail=thumb,
infoLabels={'year': year},
show=title))
# Next page
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers".*?href="([^"]+)">')
if next_page != '':
itemlist.append(
Item(channel=item.channel,
action='video',
title='[B]' + config.get_localized_string(30992) + ' &raquo;[/B]',
url=next_page,
contentType=item.contentType,
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def select(item):
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<\/div>')
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
logger.info('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
extra='serie',
contentType='episode'))
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
if re.findall('episodio', block, re.IGNORECASE):
logger.info('select = ### è un anime ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
extra='anime',
contentType='episode'))
def check(item):
data = support.match(item.url, headers=headers).data
if data:
ck = str(support.match(data, patronBlock=r'Genere:(.*?)</span>', patron=r'tag">([^<]+)').matches).lower()
if 'serie tv' in ck or 'anime' in ck or 'wrestling wwe' in ck :# in ['serie tv', 'wrestling wwe', 'anime']:
if 'anime' in ck:
item.args = 'anime'
elif 'sport' in ck or 'wrestling' in ck:
item.args = 'sport'
else:
item.args = 'serie'
item.contentType = 'tvshow'
item.data = data
itemlist = episodios(item)
if not itemlist:
item.data = data
return findvideos(item)
else:
logger.info('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
item.contentType = 'movie'
item.data = data
# item.action = 'findvideos'
return findvideos(item)
return itemlist
def findvideos(item):
def filter_ep(s):
srv_mod = __import__('servers.%s' % s.server, None, None, ["servers.%s" % s.server])
if hasattr(srv_mod, 'get_filename'):
title = srv_mod.get_filename(s.url)
# support.dbg()
if scrapertools.get_season_and_episode(title) == str(item.contentSeason) + "x" + str(
item.contentEpisodeNumber).zfill(2):
servers.append(s)
logger.debug()
# support.dbg()
if item.servers:
return support.server(item, itemlist=[Item().fromurl(s) for s in item.servers])
if not item.data:
item.data = httptools.downloadpage(item.url)
data = scrapertools.find_single_match(item.data, '<div class="at-above-post addthis_tool"(.*?)(?:<div class="at-below-post|[dD][oO][wW][nN][lL][oO][aA][dD])')
if data:
item.data = data
servers = []
# if item.args == 'anime':
# if item.urls: # this is a episode
# return support.server(item, itemlist=[Item(url=support.unshortenit.FileCrypt().unshorten(u)) for u in item.urls])
# itemlist = []
# episodes = {}
# for uri in support.unshortenit.FileCrypt().find(item.data):
# for ep in support.unshortenit.FileCrypt(uri).list_files():
# ep = ('.'.join(ep[0].split('.')[:-1]), ep[1]) # remove extension
# if not ep[0] in episodes:
# episodes[ep[0]] = []
# episodes[ep[0]].append(ep[1])
# for ep in episodes.keys():
# itemlist.append(item.clone(title=ep, urls=episodes[ep], action='findvideos', data=''))
# return itemlist
total_servers = support.server(item, data=item.data)
if item.contentType == 'episode' and len(set([srv.server for srv in total_servers])) < len([srv.server for srv in total_servers]):
# i link contengono più puntate, cerco quindi quella selezionata
with futures.ThreadPoolExecutor() as executor:
for s in total_servers:
if s.server:
executor.submit(filter_ep, s)
else:
servers.append(s)
return servers
else:
logger.info('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
def findvideos(item): # Questa def. deve sempre essere nominata findvideos
logger.info('[cinemalibero.py] findvideos')
itemlist = []
if item.args == 'direct':
return servertools.find_video_items(item)
if item.contentType == 'episode':
data = item.url.lower()
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>*?<\/h2>(.*?)<\/div>')
urls = re.findall('<a.*?href="([^"]+)"', block, re.DOTALL)
else:
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub(r'\n|\t','',data).lower()
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>(.*?)<strong>')
urls = re.findall('<a href="([^"]+)".*?class="external"', block, re.DOTALL)
logger.info('URLS'+ str(urls))
if urls:
data =''
for url in urls:
url, c = unshortenit.unshorten(url)
data += url + '\n'
logger.info('DATA'+ data)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle + ' - [COLOR limegreen][[/COLOR]'+videoitem.title+' [COLOR limegreen]][/COLOR]'
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
# Link Aggiungi alla Libreria
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
itemlist.append(
Item(channel=item.channel, title='[COLOR lightblue][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action='add_pelicula_to_library', extra='findservers', contentTitle=item.contentTitle))
# Necessario per filtrare i Link
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Necessario per FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Necessario per AutoPlay
autoplay.start(itemlist, item)
return itemlist
def episodios(item): # Questa def. deve sempre essere nominata episodios
logger.info('[cinemalibero.py] episodios')
itemlist = []
extra = ''
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)at-below-post')
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
# logger.info('select = ### è una serie ###')
extra='serie'
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
if re.findall('episodi', block, re.IGNORECASE):
# logger.info('select = ### è un anime ###')
extra='anime'
block = re.sub(r'<h2>.*?<\/h2>','',block)
block = block.replace('<p>','').replace('<p style="text-align: left;">','').replace('<','<').replace('-<','<').replace('&#8211;<','<').replace('&#8211; <','<').replace('<strong>','<stop><start><strong>')+'<stop>'
block = re.sub(r'stagione completa.*?<\/p>','',block,flags=re.IGNORECASE)
if extra == 'serie':
block = block.replace('<br /> <a','<a')
matches = re.compile(r'<start>.*?(?:stagione|Stagione)(.*?)<\/(?:strong|span)><\/p>(.*?)<stop>', re.DOTALL).findall(block)
if not matches:
matches = scrapertools.find_multiple_matches(block, r'<a href="([^"]+)"[^>]+>(Episodio [0-9]+)</a>')
for scrapedurl, scrapedtitle in matches:
scrapedtitle = re.sub(r'Episodio ([0-9]+)', r'Episodio 1x\1', scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
show=item.fulltitle,
url=scrapedurl,
args='direct'))
else:
for lang, html in matches:
lang = re.sub('<.*?>','',lang)
html = html.replace('<br />','\n').replace('</p>', '\n')
matches = re.compile(r'([^<]+)([^\n]+)\n', re.DOTALL).findall(html)
for scrapedtitle, html in matches:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle + ' - (' + lang + ')',
fulltitle=scrapedtitle,
show=item.fulltitle,
url=html))
elif extra == 'anime':
block = re.sub(r'<start.*?(?:download:|Download:).*?<stop>','',block)
block = re.sub(r'(?:mirror|Mirror)[^<]+<','',block)
block = block.replace('<br />','\n').replace('/a></p>','\n')
block = re.sub(r'<start.*?(?:download|Download).*?\n','',block)
matches = re.compile('<a(.*?)\n', re.DOTALL).findall(block)
for html in matches:
scrapedtitle = scrapertools.find_single_match(html, r'>(.*?)<\/a>')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
show=item.fulltitle,
url=html))
else:
logger.info('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
show=item.fulltitle,
contentType='movie'))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
return total_servers

View File

@@ -1,71 +0,0 @@
{
"id": "cinemastreaming",
"name": "Cinemastreaming",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": "https://www.telegramitalia.it/wp-content/uploads/2018/02/IMG_20180222_214809_805.jpg",
"banner": "https://www.telegramitalia.it/wp-content/uploads/2018/02/IMG_20180222_214809_805.jpg",
"categories": ["tvshow", "movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -1,191 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per cinemastreaming
# ------------------------------------------------------------
import re
from core import scrapertools, httptools, scrapertoolsV2, support
from core.item import Item
from specials import autoplay
from platformcode import config
__channel__ = "cinemastreaming"
host = config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango']
list_quality = ['1080p', '1080p 3D', 'SD', 'CAM', 'default']
headers = [['Referer', host]]
def mainlist(item):
support.log()
# Menu Principale
itemlist = []
support.menu(itemlist, 'Film bold', 'peliculas', host + '/film/')
support.menu(itemlist, 'Per genere submenu', 'menu', host, args="Film per Genere")
support.menu(itemlist, 'Anime bold', 'peliculas', host + '/category/anime/')
support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serie-tv/', contentType='episode')
support.menu(itemlist, 'Ultime Uscite submenu', 'peliculas', host + "/stagioni/", "episode", args='latests')
support.menu(itemlist, 'Ultimi Episodi submenu', 'peliculas_latest_ep', host + "/episodi/", "episode", args='lateste')
support.menu(itemlist, '[COLOR blue]Cerca...[/COLOR]', 'search')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def peliculas(item):
support.log()
list_groups = ["url", "thumb", "title", "year", "rating", "duration"]
patron = r'<article.*?"TPost C".*?href="([^"]+)".*?img.*?src="([^"]+)".*?<h3.*?>([^<]+).*?Year">'
if item.args == "latests":
patron += r'([^<]+)'
else:
patron += r'(\d{4}).*?AAIco-star.*?>([^<]+).*?AAIco-access_time">([^<]+).*?Qlty'
patron_next = r'page-numbers current.*?href="([^"]+)"'
if item.contentType == "movie":
patron += r'\">([^<]+)'
list_groups.append("quality")
action = "findvideos" if item.contentType == "movie" else "episodios"
return support.scrape(item, patron, list_groups, patronNext=patron_next, action=action)
def peliculas_latest_ep(item):
patron = r'<article.*?"TPost C".*?href="([^"]+)".*?img.*?src="([^"]+)"'
patron += r'.*?class="ClB">([^<]+)<\/span>([^<]+).*?<h3.*?>([^<]+)'
data = httptools.downloadpage(item.url).data
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedthumbnail, scrapednum, scrapedep, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title="[B]" + scrapednum + "[/B]" + scrapedep + " - " + scrapedtitle,
fulltitle=scrapedep + " " + scrapedtitle,
show=scrapedep + " " + scrapedtitle,
url=scrapedurl,
extra=item.extra,
thumbnail="http:" + scrapedthumbnail,
infoLabels=item.infoLabels
))
support.nextPage(itemlist, item, data, r'page-numbers current.*?href="([^"]+)"')
return itemlist
def peliculas_menu(item):
itemlist = peliculas(item)
return itemlist[:-1]
def episodios(item):
patron = r'<td class="MvTbTtl"><a href="([^"]+)">(.*?)<\/a>.*?>\d{4}<'
list_groups = ["url", "title", "year"]
itemlist = support.scrape(item, patron, list_groups)
for itm in itemlist:
fixedtitle = scrapertools.get_season_and_episode(itm.url)
itm.title = fixedtitle + " - " + itm.title
itm.fulltitle = fixedtitle + " - " + itm.fulltitle
return itemlist
def menu(item):
patron_block = r'<ul class="sub-menu">.*?</ul>'
patron = r'menu-category-list"><a href="([^"]+)">([^<]+)<'
list_groups = ["url", "title"]
return support.scrape(item, patron, list_groups, blacklist="Anime", action="peliculas_menu", patron_block=patron_block)
def search(item, texto):
support.log("s=", texto)
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except Exception, e:
import traceback
traceback.print_stack()
support.log(str(e))
return []
def newest(categoria):
support.log("newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "series":
item.url = host + "/episodi/"
item.action = "peliculas"
item.args = "lateste"
item.contentType = "episode"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except Exception, e:
import traceback
traceback.print_stack()
support.log(str(e))
return []
return itemlist
def findvideos(item):
if item.quality.lower() in ["ended", "canceled", "returning series"]:
return episodios(item)
itemlist = []
data = scrapertoolsV2.decodeHtmlentities(httptools.downloadpage(item.url).data)
btns = re.compile(r'data-tplayernv="Opt.*?><span>([^<]+)</span><span>([^<]+)</span>', re.DOTALL).findall(data)
matches = re.compile(r'<iframe.*?src="([^"]+trembed=[^"]+)', re.DOTALL).findall(data)
for i, scrapedurl in enumerate(matches):
scrapedurl = scrapertoolsV2.decodeHtmlentities(scrapedurl)
patron = r'<iframe.*?src="([^"]+)"'
link_data = httptools.downloadpage(scrapedurl).data
url = scrapertoolsV2.find_single_match(link_data, patron)
itemlist.append(
Item(channel=item.channel,
action="play",
contentType=item.contentType,
title="[B]" + btns[i][0] + "[/B] - " + btns[i][1],
fulltitle=btns[i][0] + " " + btns[i][1],
show=btns[i][0] + " " + btns[i][1],
url=url,
extra=item.extra,
infoLabels=item.infoLabels,
server=btns[i][0],
contentQuality=btns[i][1].replace('Italiano - ', ''),
))
if item.contentType == "movie":
support.videolibrary(itemlist, item)
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,36 +1,11 @@
{
"id": "cinetecadibologna",
"name": "Cinetecadibologna",
"name": "Cineteca di Bologna",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://cinestore.cinetecadibologna.it/pics/logo.gif",
"banner": "http://cinestore.cinetecadibologna.it/pics/logo.gif",
"thumbnail": "cinetecadibologna.png",
"banner": "cinetecadibologna.png",
"categories": ["documentary"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Includi in Novità - Documentari",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
"not_active":["include_in_newest_peliculas", "include_in_newest_series", "include_in_newest_anime", "include_in_global_search"],
"settings": []
}

View File

@@ -1,155 +1,74 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per cinetecadibologna
# ------------------------------------------------------------
import re
import urlparse
from core import httptools, scrapertools
from core.item import Item
from platformcode import logger, config
host = "http://cinestore.cinetecadibologna.it"
from core import support
host = support.config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
logger.info("kod.cinetecadibologna mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Elenco Film - Cineteca di Bologna[/COLOR]",
action="peliculas",
url="%s/video/alfabetico_completo" % host,
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"),
Item(channel=item.channel,
title="[COLOR azure]Epoche - Cineteca di Bologna[/COLOR]",
action="epoche",
url="%s/video/epoche" % host,
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"),
Item(channel=item.channel,
title="[COLOR azure]Percorsi Tematici - Cineteca di Bologna[/COLOR]",
action="percorsi",
url="%s/video/percorsi" % host,
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif")]
return itemlist
film = ['/video/alfabetico_completo',
('Anni',['/video/epoche', 'menu']),
('Registi',['/video/registi', 'menu']),
('Attori',['/video/attori', 'menu']),
('Percorsi Tematici',['/video/percorsi','menu'])]
return locals()
@support.scrape
def menu(item):
action = 'peliculas'
if 'epoche' in item.url:
patronMenu =r'<li>\s*<a href="(?P<url>[^"]+)">(?P<title>[^>]+)<'
elif 'percorsi' in item.url:
patron = r'<div class="cover_percorso">\s*<a href="(?P<url>[^"]+)">\s*<img src="(?P<thumb>[^"]+)"[^>]+>\s*[^>]+>(?P<title>.*?)<'
else:
patron = r'<h2>\s*<a href="(?P<url>[^,"]+),[^"]+"\s*>(?P<title>[^<]+)<'
patronNext = r'<div class="dx">\s*<a href="(.*?)">pagina suc'
return locals()
def search(item, text):
support.info(text)
item.args = 'noorder'
item.url = host + '/ricerca/type_ALL/ricerca_' + text
item.contentType = 'movie'
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
@support.scrape
def peliculas(item):
logger.info("kod.cinetecadibologna peliculas")
itemlist = []
if 'alfabetico' in item.url:
patron = r'<img src="(?P<thumb>[^"]+)"[^>]+>\s*[^>]+>\s*<div[^>]+>\s*<div[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>(?:\[)?(?P<title>[^\]<]+)(?:\]|<)'
else:
if 'type_ALL' in item.url: patronBlock = r'Video:(?P<block>.*?)(?:<div class=""|<!--)'
elif not 'NomePersona' in item.url: patronBlock = r'<h3>Film</h3>(?P<block>.*?)<div class="list_wrapper'
patron = r'<a href="(?P<url>[^"]+)"\s*class="[^"]+"\s*title="(?:\[)?(?P<title>[^\]"]+)(?:\])?"\s*rel="(?P<thumb>[^"]+)"'
patronNext = r'<div class="dx">\s*<a href="(.*?)">pagina suc'
return locals()
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = '<img src="([^"]+)"[^>]+>\s*[^>]+>\s*<div[^>]+>\s*<div[^>]+>[^>]+>\s*<a href="([^"]+)"[^>]+>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedthumbnail = host + scrapedthumbnail
scrapedurl = host + scrapedurl
if not "/video/" in scrapedurl:
continue
html = scrapertools.cache_page(scrapedurl)
start = html.find("Sinossi:")
end = html.find('<div class="sx_col">', start)
scrapedplot = html[start:end]
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle,
title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<div class="footerList clearfix">\s*<div class="sx">\s*[^>]+>[^g]+gina[^>]+>\s*[^>]+>\s*<div class="dx">\s*<a href="(.*?)">pagina suc'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url= scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def epoche(item):
logger.info("kod.cinetecadibologna categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
# Narrow search by selecting only the combo
bloque = scrapertools.find_single_match(data, '<h1 class="pagetitle">Epoche</h1>(.*?)</ul>')
# The categories are the options for the combo
patron = '<a href="([^"]+)">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + scrapedurl
scrapedplot = ""
if scrapedtitle.startswith(("'")):
scrapedtitle = scrapedtitle.replace("'", "Anni '")
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail="http://www.cinetecadibologna.it/pics/cinema-ritrovato-alcinema.png",
plot=scrapedplot))
return itemlist
def percorsi(item):
logger.info("kod.cinetecadibologna categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<div class="cover_percorso">\s*<a href="([^"]+)">\s*<img src="([^"]+)"[^>]+>\s*[^>]+>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedurl = host + scrapedurl
scrapedplot = ""
scrapedthumbnail = host + scrapedthumbnail
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot))
return itemlist
def findvideos(item):
logger.info("kod.cinetecadibologna findvideos")
support.info()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
matches = support.match(item, patron=r'filename: "(.*?)"').matches
patron = 'filename: "(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
itemlist.append(item.clone(action="play", title=support.config.get_localized_string(30137), server='directo', url=host + url))
for video in matches:
video = host + video
itemlist.append(
Item(
channel=item.channel,
action="play",
title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
url=video,
folder=False))
return itemlist
return support.server(item, itemlist=itemlist)

View File

@@ -1,12 +0,0 @@
{
"id": "cinetemagay",
"name": "Cinetemagay",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "cinetemagay.png",
"banner": "cinetemagay.png",
"categories": [
"adult"
]
}

View File

@@ -1,127 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
from core import httptools
from core import servertools
from core.item import Item
from platformcode import config, logger
IMAGES_PATH = os.path.join(config.get_runtime_path(), 'resources', 'images', 'cinetemagay')
def strip_tags(value):
return re.sub(r'<[^>]*?>', '', value)
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay latinoamericano",
url="http://cinegaylatinoamericano.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://www.americaeconomia.com/sites/default/files/imagecache/foto_nota/homosexual1.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="Cine y cortos gay",
url="http://cineycortosgay.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://www.elmolar.org/wp-content/uploads/2015/05/cortometraje.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay online (México)",
url="http://cinegayonlinemexico.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTmmqL6tS2Ced1VoxlGQT0q-ibPEz1DCV3E1waHFDI5KT0pg1lJ"))
itemlist.append(Item(channel=item.channel, action="lista", title="Sentido gay",
url="http://www.sentidogay.blogspot.com.es//feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://1.bp.blogspot.com/-epOPgDD_MQw/VPGZGQOou1I/AAAAAAAAAkI/lC25GrukDuo/s1048/SentidoGay.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="PGPA",
url="http://pgpa.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://themes.googleusercontent.com/image?id=0BwVBOzw_-hbMNTRlZjk2YWMtYTVlMC00ZjZjLWI3OWEtMWEzZDEzYWVjZmQ4"))
return itemlist
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patronvideos = '&lt;img .*?src=&quot;(.*?)&quot;'
patronvideos += "(.*?)<link rel='alternate' type='text/html' href='([^']+)' title='([^']+)'.*?>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
scrapedtitle = match[3]
scrapedtitle = scrapedtitle.replace("&apos;", "'")
scrapedtitle = scrapedtitle.replace("&quot;", "'")
scrapedtitle = scrapedtitle.replace("&amp;amp;", "'")
scrapedtitle = scrapedtitle.replace("&amp;#39;", "'")
scrapedurl = match[2]
scrapedthumbnail = match[0]
imagen = ""
scrapedplot = match[1]
tipo = match[1]
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
scrapedplot = "<" + scrapedplot
scrapedplot = scrapedplot.replace("&gt;", ">")
scrapedplot = scrapedplot.replace("&lt;", "<")
scrapedplot = scrapedplot.replace("</div>", "\n")
scrapedplot = scrapedplot.replace("<br />", "\n")
scrapedplot = scrapedplot.replace("&amp;", "")
scrapedplot = scrapedplot.replace("nbsp;", "")
scrapedplot = strip_tags(scrapedplot)
itemlist.append(
Item(channel=item.channel, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedurl + scrapedplot, folder=True))
variable = item.url.split("index=")[1]
variable = int(variable)
variable += 100
variable = str(variable)
variable_url = item.url.split("index=")[0]
url_nueva = variable_url + "index=" + variable
itemlist.append(
Item(channel=item.channel, action="lista", title="Ir a la página siguiente (desde " + variable + ")",
url=url_nueva, thumbnail="", plot="Pasar a la página siguiente (en grupos de 100)\n\n" + url_nueva))
return itemlist
def detail(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = data.replace("%3A", ":")
data = data.replace("%2F", "/")
data = data.replace("%3D", "=")
data = data.replace("%3", "?")
data = data.replace("%26", "&")
descripcion = ""
plot = ""
patrondescrip = 'SINOPSIS:(.*?)'
matches = re.compile(patrondescrip, re.DOTALL).findall(data)
if len(matches) > 0:
descripcion = matches[0]
descripcion = descripcion.replace("&nbsp;", "")
descripcion = descripcion.replace("<br/>", "")
descripcion = descripcion.replace("\r", "")
descripcion = descripcion.replace("\n", " ")
descripcion = descripcion.replace("\t", " ")
descripcion = re.sub("<[^>]+>", " ", descripcion)
descripcion = descripcion
try:
plot = unicode(descripcion, "utf-8").encode("iso-8859-1")
except:
plot = descripcion
# Busca los enlaces a los videos de servidores
video_itemlist = servertools.find_video_items(data=data)
for video_item in video_itemlist:
itemlist.append(Item(channel=item.channel, action="play", server=video_item.server,
title=item.title + " " + video_item.title, url=video_item.url, thumbnail=item.thumbnail,
plot=video_item.url, folder=False))
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "cliphunter",
"name": "cliphunter",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.cliphunter.com/gfx/new/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,109 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'https://www.cliphunter.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/categories/All"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular/ratings/yesterday"))
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">\s*<img src=\'([^\']+)\'/>.*?<span>([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)"/>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<img class=".*?" src="([^"]+)".*?<div class="tr">(.*?)</div>.*?<a href="([^"]+)\s*" class="vttl.*?">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedtime,scrapedurl,scrapedtitle in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=thumbnail, contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '"url"\:"(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("\/", "/")
title = scrapedurl
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
return itemlist

View File

@@ -1,15 +0,0 @@
{
"id": "coomelonitas",
"name": "Coomelonitas",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.coomelonitas.com/wp-content/themes/3xTheme/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,66 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
from platformcode import config
host ='http://www.coomelonitas.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host+ "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="all"(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for match in matches:
title = scrapertools.find_single_match(match,'title="([^"]+)"')
url = scrapertools.find_single_match(match,'<a href="([^"]+)"')
plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>')
thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie") )
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
if next_page!="":
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "cumlouder",
"name": "Cumlouder",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "cumlouder.png",
"banner": "cumlouder.png",
"categories": [
"adult"
]
}

View File

@@ -1,211 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
config.set_setting("url_error", False, "cumlouder")
itemlist.append(item.clone(title="Últimos videos", action="videos", url="https://www.cumlouder.com/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url="https://www.cumlouder.com/categories/"))
itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url="https://www.cumlouder.com/girls/"))
itemlist.append(item.clone(title="Listas", action="series", url="https://www.cumlouder.com/series/"))
itemlist.append(item.clone(title="Buscar", action="search", url="https://www.cumlouder.com/search?q=%s"))
return itemlist
def search(item, texto):
logger.info()
item.url = item.url % texto
item.action = "videos"
try:
return videos(item)
except:
import traceback
logger.error(traceback.format_exc())
return []
def pornstars_list(item):
logger.info()
itemlist = []
for letra in "abcdefghijklmnopqrstuvwxyz":
itemlist.append(item.clone(title=letra.upper(), url=urlparse.urljoin(item.url, letra), action="pornstars"))
return itemlist
def pornstars(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<a girl-url="[^"]+" class="[^"]+" href="([^"]+)" title="([^"]+)">[^<]+'
patron += '<img class="thumb" src="([^"]+)" [^<]+<h2[^<]+<span[^<]+</span[^<]+</h2[^<]+'
patron += '<span[^<]+<span[^<]+<span[^<]+</span>([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(item.clone(title="%s (%s)" % (title, count), url=url, action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = get_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a tag-url=.*?href="([^"]+)" title="([^"]+)".*?<img class="thumb" src="([^"]+)".*?<span class="cantidad">([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(
item.clone(title="%s (%s videos)" % (title, count), url=url, action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def series(item):
logger.info()
itemlist = []
data = get_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a onclick=.*?href="([^"]+)".*?\<img src="([^"]+)".*?h2 itemprop="name">([^<]+).*?p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumbnail, title, count in matches:
itemlist.append(
item.clone(title="%s (%s) " % (title, count), url=urlparse.urljoin(item.url, url), action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def videos(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<a class="muestra-escena" href="([^"]+)" title="([^"]+)"[^<]+<img class="thumb" src="([^"]+)".*?<span class="minutos"> <span class="ico-minutos sprite"></span> ([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, duration in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin("https://www.cumlouder.com", url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(item.clone(title="%s (%s)" % (title, duration), url=urlparse.urljoin(item.url, url),
action="play", thumbnail=thumbnail, contentThumbnail=thumbnail,
contentType="movie", contentTitle=title))
# Paginador
nextpage = scrapertools.find_single_match(data, '<ul class="paginador"(.*?)</ul>')
matches = re.compile('<a href="([^"]+)" rel="nofollow">Next »</a>', re.DOTALL).findall(nextpage)
if not matches:
matches = re.compile('<li[^<]+<a href="([^"]+)">Next »</a[^<]+</li>', re.DOTALL).findall(nextpage)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def play(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<source src="([^"]+)" type=\'video/([^\']+)\' label=\'[^\']+\' res=\'([^\']+)\' />'
url, type, res = re.compile(patron, re.DOTALL).findall(data)[0]
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
elif not url.startswith("http"):
url = "http:" + url.replace("&amp;", "&")
itemlist.append(
Item(channel='cumlouder', action="play", title='Video' + res, fulltitle=type.upper() + ' ' + res, url=url,
server="directo", folder=False))
return itemlist
def get_data(url_orig):
try:
if config.get_setting("url_error", "cumlouder"):
raise Exception
response = httptools.downloadpage(url_orig)
if not response.data or "urlopen error [Errno 1]" in str(response.code):
raise Exception
except:
config.set_setting("url_error", True, "cumlouder")
import random
server_random = ['nl', 'de', 'us']
server = server_random[random.randint(0, 2)]
url = "https://%s.hideproxy.me/includes/process.php?action=update" % server
post = "u=%s&proxy_formdata_server=%s&allowCookies=1&encodeURL=0&encodePage=0&stripObjects=0&stripJS=0&go=" \
% (urllib.quote(url_orig), server)
while True:
response = httptools.downloadpage(url, post, follow_redirects=False)
if response.headers.get("location"):
url = response.headers["location"]
post = ""
else:
break
return response.data

View File

@@ -1,15 +0,0 @@
{
"id": "czechvideo",
"name": "Czechvideo",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://czechvideo.org/templates/Default/images/black75.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -1,89 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from platformcode import config
host = 'http://czechvideo.org'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/tags/%s/" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<div class="category">(.*?)</ul>')
patron = '<li><a href="(.*?)".*?>(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="short-story">.*?'
patron += '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)".*?'
patron += 'div class="short-time">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<del><a href="([^"]+)">Next</a></del>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

Some files were not shown because too many files have changed in this diff Show More