Compare commits
4082 Commits
stable
...
nuovo_upda
| Author | SHA1 | Date | |
|---|---|---|---|
| 22b61234a9 | |||
| f129d57f30 | |||
| 607f3c8128 | |||
| 75e0f8bfa0 | |||
| 50954f50ec | |||
| 59ad6be60e | |||
| b22139ba2c | |||
| bde83602d6 | |||
|
|
84a6d7ec93 | ||
|
|
3ffa260525 | ||
|
|
edb24c906f | ||
|
|
8d6ef2aebd | ||
|
|
df6b7c70d8 | ||
|
|
e0cc7d9edb | ||
|
|
e93b58895c | ||
|
|
ae13adb02f | ||
|
|
bcbb42b4c0 | ||
|
|
771d2d571a | ||
|
|
bbdb4375c5 | ||
|
|
88cdae47ff | ||
|
|
f1d055d4fa | ||
|
|
7d641bf83f | ||
|
|
eb14c3c0ac | ||
|
|
bb4e63471e | ||
|
|
73c5b73b64 | ||
|
|
ffffcf691d | ||
|
|
cc8353ddca | ||
|
|
b100f758af | ||
|
|
43b173c7a2 | ||
|
|
2c901fdbf1 | ||
|
|
d378d31677 | ||
|
|
3d25d8d098 | ||
|
|
1818994ded | ||
|
|
da7d766531 | ||
|
|
7d46b74bee | ||
|
|
bc90ba67be | ||
|
|
4bd441bf4d | ||
|
|
d049be14e0 | ||
|
|
5e4cf944b1 | ||
|
|
2a150f7978 | ||
|
|
125e62e763 | ||
|
|
b8a2ea0dc4 | ||
|
|
2f6c6cf453 | ||
|
|
6bc86936b2 | ||
|
|
404c9558eb | ||
|
|
7ce8acd5eb | ||
|
|
2f4edf4d79 | ||
|
|
6c5746a2ad | ||
|
|
521f168ab9 | ||
|
|
26e5fb068e | ||
|
|
895b81edb7 | ||
|
|
412025c514 | ||
|
|
29915fa6e4 | ||
|
|
48fd712f81 | ||
|
|
c8ffa002a3 | ||
|
|
430b7174bf | ||
|
|
251d7f2687 | ||
|
|
c8047932e2 | ||
|
|
f6e2410a2d | ||
|
|
f84527c700 | ||
|
|
f7e467c23a | ||
|
|
1f160b96f9 | ||
|
|
6c08f820e6 | ||
|
|
cc07517afb | ||
|
|
72f850ef3b | ||
|
|
26c8122500 | ||
|
|
1b7fc47781 | ||
|
|
db0802f5bc | ||
|
|
3ce79ced23 | ||
|
|
8f98b6486a | ||
|
|
ef1c9a7736 | ||
|
|
8150bace6c | ||
|
|
987755c058 | ||
|
|
a44c567ad7 | ||
|
|
74c43e5755 | ||
|
|
cc5619d7b2 | ||
|
|
58c402bcc6 | ||
|
|
575629a3ac | ||
|
|
8d72b692a4 | ||
|
|
41e6e87115 | ||
|
|
75b3db40c4 | ||
|
|
28e811b576 | ||
|
|
3f1351e11b | ||
|
|
913b8d7bed | ||
|
|
72de374f9e | ||
|
|
272f0b68e7 | ||
|
|
8b8105adef | ||
|
|
a9992b9ea6 | ||
|
|
1ca9c7031c | ||
|
|
68937da460 | ||
|
|
afe008ea89 | ||
|
|
3a183b82f8 | ||
|
|
a172fe86f3 | ||
|
|
d484a64fb2 | ||
|
|
299327d47f | ||
|
|
bf2e80dbcd | ||
|
|
79efa0a193 | ||
|
|
cea5feb59d | ||
|
|
9d28622ce8 | ||
|
|
be05eb6ec3 | ||
|
|
ae00b017a8 | ||
|
|
b5e2b26f9f | ||
|
|
86768ed294 | ||
|
|
ca3fb3e7ff | ||
|
|
d5c22b2043 | ||
|
|
1c6069eba5 | ||
|
|
d24eced956 | ||
|
|
e3cf5c6e5d | ||
|
|
ca1159e163 | ||
|
|
f1f8330419 | ||
|
|
22159f5f28 | ||
|
|
cd15000d71 | ||
|
|
aa501e2f7d | ||
|
|
9bed2d29c9 | ||
|
|
ec5a56f91e | ||
|
|
2c8dd243a6 | ||
|
|
7e5964d840 | ||
|
|
b9aed889af | ||
|
|
511f95298d | ||
|
|
b6c81c8b4e | ||
|
|
559ae1edd2 | ||
|
|
bdc025b87d | ||
|
|
2acd018503 | ||
|
|
dfff40552d | ||
|
|
09036d4d59 | ||
|
|
32f6781af5 | ||
|
|
b96b63448c | ||
|
|
803b77b9b7 | ||
|
|
6de3ae422c | ||
|
|
349752a380 | ||
|
|
6421ba1229 | ||
|
|
529c6f3669 | ||
|
|
ae34d43429 | ||
|
|
03a332eb75 | ||
|
|
9b25357d46 | ||
|
|
111a5a58d5 | ||
|
|
3e233c0658 | ||
|
|
8d7867d084 | ||
|
|
4328677965 | ||
|
|
018062037b | ||
|
|
1f3eea05ef | ||
|
|
d1d991b887 | ||
|
|
95251821f1 | ||
|
|
44109843af | ||
|
|
3ba2b28ef7 | ||
|
|
f1e7263a07 | ||
|
|
2191660bac | ||
|
|
6ddec976b9 | ||
|
|
a3999d2746 | ||
|
|
2fe75c1470 | ||
|
|
e4edd5872d | ||
|
|
7ab21e01c5 | ||
|
|
c3fa9f22aa | ||
|
|
2a1394767c | ||
|
|
647406ea5b | ||
|
|
43f343555d | ||
|
|
5b04e52af4 | ||
|
|
880b228dee | ||
|
|
f4f0dbfe97 | ||
|
|
d6c0efaba2 | ||
|
|
bbcc6e126e | ||
|
|
280dc6f217 | ||
|
|
7ae3f7e30e | ||
|
|
956526affc | ||
|
|
f44ab32bab | ||
|
|
1ab05e540c | ||
|
|
27d06bf875 | ||
|
|
a415644995 | ||
|
|
3d480a9b15 | ||
|
|
682e9e8b91 | ||
|
|
3b7f277270 | ||
|
|
e14f01ec80 | ||
|
|
08eb178410 | ||
|
|
46c191c725 | ||
|
|
3dbf1ee755 | ||
|
|
7faf8c726a | ||
|
|
604896083c | ||
|
|
afcd204167 | ||
|
|
14b7d8fefa | ||
|
|
4d2968a308 | ||
|
|
074adf7e73 | ||
|
|
95f28f0187 | ||
|
|
5b55e3c9f2 | ||
|
|
036252990f | ||
|
|
9aed9468d7 | ||
|
|
75b8e42fd8 | ||
|
|
29c483618f | ||
|
|
fb74d5ccb3 | ||
|
|
d4bbb01b9c | ||
|
|
14872314b6 | ||
|
|
b38631d19b | ||
|
|
34e0cde285 | ||
|
|
7c09dba22a | ||
|
|
6314317766 | ||
|
|
0d218b592e | ||
|
|
57d2aaadb3 | ||
|
|
c97c70a00e | ||
|
|
141e24e8fa | ||
|
|
c1c2aa6f0c | ||
|
|
b3ebe95278 | ||
|
|
f913513d47 | ||
|
|
03166d3226 | ||
|
|
b2288673dc | ||
|
|
7431f78b58 | ||
|
|
58ca87ce64 | ||
|
|
60807b637b | ||
|
|
93f8386df6 | ||
|
|
96acd612d6 | ||
|
|
8400b15c60 | ||
|
|
e5bb7fbca9 | ||
|
|
c85f0e28be | ||
|
|
68048f1d61 | ||
|
|
d42310a1ac | ||
|
|
1ca2b1e981 | ||
|
|
76550bcebb | ||
|
|
e1ce48e859 | ||
|
|
8cc7cd3ae6 | ||
|
|
db36530e97 | ||
|
|
84b7e9e5f2 | ||
|
|
f66c75a0e0 | ||
|
|
b4b2c4aa43 | ||
|
|
7d92a9bc41 | ||
|
|
a01a097b88 | ||
|
|
18ee7b3c17 | ||
|
|
62d63483d4 | ||
|
|
5d611224c6 | ||
|
|
17d4615015 | ||
|
|
f3127f54f5 | ||
|
|
b7e28501a7 | ||
|
|
58e01dc49d | ||
|
|
769306faa3 | ||
|
|
14660b5aaa | ||
|
|
a7dbe4f287 | ||
|
|
839885c5c7 | ||
|
|
14488c4c54 | ||
|
|
0cfe48cc74 | ||
|
|
9055e2ac02 | ||
|
|
b46cec8041 | ||
|
|
aabaa8d78c | ||
|
|
0333e1262d | ||
|
|
d628d9108d | ||
|
|
66a79e4e21 | ||
|
|
0a25af3aba | ||
|
|
c59bb0da6f | ||
|
|
f65d05b52f | ||
|
|
9af9a564f9 | ||
|
|
a88e915d0f | ||
|
|
2a2e0d8c60 | ||
|
|
36be150ad4 | ||
|
|
a6f9413ee1 | ||
|
|
ff4b428e1b | ||
|
|
e19b5245d8 | ||
|
|
29e27940d5 | ||
|
|
2717037824 | ||
|
|
cfc7ee7855 | ||
|
|
a507fa3143 | ||
|
|
9d0fe1fdf7 | ||
|
|
f6bc183fba | ||
|
|
bd61acb55f | ||
|
|
6bf5f01442 | ||
|
|
d5af2723b7 | ||
|
|
4ab3f80e05 | ||
|
|
4364ae20be | ||
|
|
647abfc5b0 | ||
|
|
f20f223da0 | ||
|
|
5f12bdf049 | ||
|
|
f528dd9556 | ||
|
|
b611c4ca3a | ||
|
|
990017a533 | ||
|
|
1cdc11d84e | ||
|
|
e40145fae2 | ||
|
|
44903392e1 | ||
|
|
89a5513852 | ||
|
|
184d497aec | ||
|
|
b1983b754b | ||
|
|
36665f517b | ||
|
|
43526800bb | ||
|
|
e1c9cd232f | ||
|
|
4263b76650 | ||
|
|
24443db5ee | ||
|
|
fcb1ecfde7 | ||
|
|
49e22dbe3b | ||
|
|
1433ee2a32 | ||
|
|
3b7beb6daf | ||
|
|
09be82638d | ||
|
|
52aadb2352 | ||
|
|
92aa849d16 | ||
|
|
fdfa364943 | ||
|
|
c1832406c3 | ||
|
|
a511eaa355 | ||
|
|
1bedea3214 | ||
|
|
14c77d2d8d | ||
|
|
5ed63ea2ed | ||
|
|
8a048464a4 | ||
|
|
14ce21a646 | ||
|
|
39dbbc655e | ||
|
|
9a821de508 | ||
|
|
14ee433eb5 | ||
|
|
605b74e8f8 | ||
|
|
fc96554986 | ||
|
|
24580fec0b | ||
|
|
c1ce1abcae | ||
|
|
2c294785da | ||
|
|
9fb3b3c261 | ||
|
|
dc04b19ce7 | ||
|
|
c0f14dbe4f | ||
|
|
1dfc394565 | ||
|
|
11b6e8bc75 | ||
|
|
1bf4acac39 | ||
|
|
3e9a5813af | ||
|
|
74de895999 | ||
|
|
a846c17e79 | ||
|
|
5bf69c93b9 | ||
|
|
4fd7b80203 | ||
|
|
b1e46658f0 | ||
|
|
fcf3f8cba3 | ||
|
|
09a5a07923 | ||
|
|
726e524c1f | ||
|
|
0eaada8ba4 | ||
|
|
41e4a9f02d | ||
|
|
295d6560cc | ||
|
|
9ec6f09ca3 | ||
|
|
36042ac2bc | ||
|
|
00c9a72966 | ||
|
|
13a14dd3e5 | ||
|
|
2787c980db | ||
|
|
ef018fce11 | ||
|
|
f18aab9fa8 | ||
|
|
f0ab97a6fd | ||
|
|
043f12afe8 | ||
|
|
f5a6384653 | ||
|
|
8982993835 | ||
|
|
84e22ca681 | ||
|
|
54816418b3 | ||
|
|
8748cd39e9 | ||
|
|
c77e6105ec | ||
|
|
66f562b5f6 | ||
|
|
2f37557f97 | ||
|
|
265f00c20d | ||
|
|
7cacbf1b22 | ||
|
|
7bf554d333 | ||
|
|
e13b8fc202 | ||
|
|
e91e29b95d | ||
|
|
d7ab122d31 | ||
|
|
17f8cab8d7 | ||
|
|
5c56c5e1ec | ||
|
|
bb49bf1405 | ||
|
|
b98ac40d66 | ||
|
|
d44f23497e | ||
|
|
0536c4dfde | ||
|
|
237a8cad77 | ||
|
|
e78e0006b0 | ||
|
|
e94885cedc | ||
|
|
75b2937c41 | ||
|
|
75fd8e04c7 | ||
|
|
9ca822fb7b | ||
|
|
b625462cbc | ||
|
|
09d29418b9 | ||
|
|
340f202c6d | ||
|
|
3fe8478a00 | ||
|
|
4a37dc5207 | ||
|
|
43fc32c5cd | ||
|
|
1f1bf35493 | ||
|
|
2968299354 | ||
|
|
136782b5fc | ||
|
|
5d5080ca7f | ||
|
|
1c322fc604 | ||
|
|
fba9e0f007 | ||
|
|
f78a60d612 | ||
|
|
43d43543b7 | ||
|
|
7e1769bfe5 | ||
|
|
9c07dceab7 | ||
|
|
54bab829b5 | ||
|
|
204ae57e2b | ||
|
|
770e819ad6 | ||
|
|
b2ec377d56 | ||
|
|
e2f195ebd3 | ||
|
|
c94b32da26 | ||
|
|
5ff98c23ce | ||
|
|
707d8c7fd1 | ||
|
|
6abf9d0d09 | ||
|
|
88ccec0b15 | ||
|
|
0a4cba7489 | ||
|
|
b11fb3dec3 | ||
|
|
84951a6801 | ||
|
|
84bc6f6135 | ||
|
|
94e107e17c | ||
|
|
8ae182536c | ||
|
|
f545d74a75 | ||
|
|
b23480664b | ||
|
|
8648014368 | ||
|
|
3da43a418b | ||
|
|
bb9ec6dcdc | ||
|
|
d78abb98ee | ||
|
|
2ef54dd4be | ||
|
|
c30e1311d6 | ||
|
|
61dac80edd | ||
|
|
e059f6e4c6 | ||
|
|
cd7ecd2c48 | ||
|
|
9128f26657 | ||
|
|
183c285b6f | ||
|
|
6a0a46dcff | ||
|
|
3e019573c9 | ||
|
|
22bfc53eaa | ||
|
|
04639802d5 | ||
|
|
46658a2595 | ||
|
|
650489ac46 | ||
|
|
abdfd25011 | ||
|
|
ee837ace97 | ||
|
|
6c43f5101b | ||
|
|
9109d6ec55 | ||
|
|
f6a10e1eda | ||
|
|
1542f6493a | ||
|
|
10f0899e81 | ||
|
|
d5ec41e305 | ||
|
|
0be70c79b1 | ||
|
|
36b8912a18 | ||
|
|
b66d292ff2 | ||
|
|
d723f0552b | ||
|
|
4cfb971475 | ||
|
|
b0f3e4022c | ||
|
|
7e2c5a6b4b | ||
|
|
a832a90dfa | ||
|
|
be782638d1 | ||
|
|
d3e6779ea5 | ||
|
|
c3ba3a1be6 | ||
|
|
7feac8a061 | ||
|
|
e937b70a65 | ||
|
|
51042bf9df | ||
|
|
ef6e3eecb4 | ||
|
|
518d99893a | ||
|
|
56869b48d0 | ||
|
|
4a8b3fd1b6 | ||
|
|
de8d6ff46d | ||
|
|
667f7f31ff | ||
|
|
1102ad2191 | ||
|
|
88c011ec75 | ||
|
|
d83541b5cb | ||
|
|
24c5489d2d | ||
|
|
50372763dc | ||
|
|
a5c265ac40 | ||
|
|
77dc4168ff | ||
|
|
d20384c9cf | ||
|
|
fb80f47bc8 | ||
|
|
f590067983 | ||
|
|
5383448d44 | ||
|
|
9e991c2d49 | ||
|
|
555c0491f2 | ||
|
|
1fda900526 | ||
|
|
17d90cd906 | ||
|
|
d4129b0da2 | ||
|
|
80261ee7ce | ||
|
|
2f5cb8d959 | ||
|
|
1bfc5841f8 | ||
|
|
89e13e71f8 | ||
|
|
2d67edd310 | ||
|
|
352df6dea6 | ||
|
|
636371b658 | ||
|
|
c190f9e3d7 | ||
|
|
812b4a5b08 | ||
|
|
22337380aa | ||
|
|
83aab2a115 | ||
|
|
c9798071f3 | ||
|
|
1cc2790198 | ||
|
|
ed82c22823 | ||
|
|
0042142878 | ||
|
|
422d21d2a0 | ||
|
|
2c4e0330ba | ||
|
|
20b5a52d40 | ||
|
|
d300adbdb4 | ||
|
|
aec6f6811e | ||
|
|
86bff2be1e | ||
|
|
bfff10a459 | ||
|
|
d93ac324ce | ||
|
|
96b2a4dbfd | ||
|
|
fa188aa719 | ||
|
|
65475409b6 | ||
|
|
16d3ad375f | ||
|
|
50b8ac8c08 | ||
|
|
794828696b | ||
|
|
26da64bceb | ||
|
|
20f9048225 | ||
|
|
7d7f191f3c | ||
|
|
89c577dc32 | ||
|
|
52ad47a924 | ||
|
|
bf9e875195 | ||
|
|
eca909a5f8 | ||
|
|
2198e6a8a8 | ||
|
|
005236f52e | ||
|
|
4441f443b4 | ||
|
|
fd4071bc36 | ||
|
|
f7639a7321 | ||
|
|
44808ac042 | ||
|
|
95ff7ddeea | ||
|
|
09df8697ea | ||
|
|
3b746a5fa1 | ||
|
|
7c41d4a16f | ||
|
|
68b126153b | ||
|
|
bff9e65af6 | ||
|
|
f7d661dabf | ||
|
|
336470cc96 | ||
|
|
43da897cf3 | ||
|
|
51da0d223d | ||
|
|
a1236b244d | ||
|
|
a33b455d85 | ||
|
|
bf04f60efd | ||
|
|
0576e101d9 | ||
|
|
47b1dca226 | ||
|
|
f3bd94fd2b | ||
|
|
e0b99a5491 | ||
|
|
5fbf15b83b | ||
|
|
4b71a0d708 | ||
|
|
1401d79879 | ||
|
|
7fc11153fd | ||
|
|
d79e9f358c | ||
|
|
aaab9b867d | ||
|
|
4ac8a7d863 | ||
|
|
ebd3473750 | ||
|
|
074e7b9c79 | ||
|
|
f85f8a94d5 | ||
|
|
71b7f72f13 | ||
|
|
ccf72d517b | ||
|
|
6f9a35ebbc | ||
|
|
7ab66fbb8a | ||
|
|
5484fcc661 | ||
|
|
1a782fd8da | ||
|
|
33ba9fea0d | ||
|
|
4f4d4bc54f | ||
|
|
336e485f60 | ||
|
|
6c3976424a | ||
|
|
72ba8d6efd | ||
|
|
d39194384b | ||
|
|
58287a76ac | ||
|
|
fc924e604d | ||
|
|
1398e96ed6 | ||
|
|
6aabebb232 | ||
|
|
9e3f74f453 | ||
|
|
cc9445ae7c | ||
|
|
61057ded3b | ||
|
|
b3a552e7ef | ||
|
|
7b64e558e7 | ||
|
|
ee5bde91a5 | ||
|
|
e33e46570f | ||
|
|
94912089b7 | ||
|
|
1135c9b96d | ||
|
|
82346899f1 | ||
|
|
49e609b4e0 | ||
|
|
75d83f423c | ||
|
|
6c6d10f35c | ||
|
|
92d009ccd7 | ||
|
|
70e3deaa31 | ||
|
|
4eeb4f7627 | ||
|
|
152633e912 | ||
|
|
1c07e53788 | ||
|
|
ad2804a32c | ||
|
|
247630947f | ||
|
|
a2f03b5f55 | ||
|
|
3f9ecc60eb | ||
|
|
4b5488b445 | ||
|
|
51e19264f4 | ||
|
|
4af7e668cc | ||
|
|
26cd6d4f24 | ||
|
|
2d9413af9d | ||
|
|
f3011b5b5d | ||
|
|
2b8b1d42f2 | ||
|
|
f92894e877 | ||
|
|
0fca66e7f4 | ||
|
|
5cc3290bef | ||
|
|
0e1c544add | ||
|
|
4805152a7d | ||
|
|
4cea872956 | ||
|
|
c7fad20777 | ||
|
|
7de34da0b8 | ||
|
|
7c9da143a9 | ||
|
|
e3544248ee | ||
|
|
87cfef27ea | ||
|
|
8803f1a1dd | ||
|
|
823b234549 | ||
|
|
06ea564dbf | ||
|
|
5923da3325 | ||
|
|
12943c26d9 | ||
|
|
afa381d6ba | ||
|
|
911d4b2a9d | ||
|
|
e8ad9c3c0f | ||
|
|
46302a0f7f | ||
|
|
77e9338661 | ||
|
|
647fdf9987 | ||
|
|
4396e4ce15 | ||
|
|
905e9aeafe | ||
|
|
bbd10b5f76 | ||
|
|
ff5a009167 | ||
|
|
4ef558dd5b | ||
|
|
3b89ecd157 | ||
|
|
fae18dd562 | ||
|
|
a1e14e3f51 | ||
|
|
d02891bb2b | ||
|
|
7eec8ffbc0 | ||
|
|
84b5d77139 | ||
|
|
5c072c8530 | ||
|
|
41ed3c2210 | ||
|
|
88251e9f1a | ||
|
|
cd7df20b9f | ||
|
|
4e8c12ed50 | ||
|
|
49ad5154cf | ||
|
|
9e06b9acb5 | ||
|
|
1e2bd87b7e | ||
|
|
0cf74df051 | ||
|
|
9342bfaa06 | ||
|
|
ff4e30dcd7 | ||
|
|
d3240f4b78 | ||
|
|
69d5088b85 | ||
|
|
566628b774 | ||
|
|
bf8417674e | ||
|
|
c829292cc3 | ||
|
|
0d34139dea | ||
|
|
788cfcff22 | ||
|
|
1d9b5c9dd1 | ||
|
|
30842961d2 | ||
|
|
537668906f | ||
|
|
69fa5e7bf4 | ||
|
|
2de8b6c7e0 | ||
|
|
6324d3124c | ||
|
|
a58129f03c | ||
|
|
d7451c421d | ||
|
|
61b50ec301 | ||
|
|
73247722da | ||
|
|
724e61820e | ||
|
|
cf41316ffc | ||
|
|
613b9abcc6 | ||
|
|
7e7c7e8654 | ||
|
|
3b3902cb29 | ||
|
|
9b65bb4291 | ||
|
|
104c2d4fd4 | ||
|
|
0e915e591a | ||
|
|
8221e09825 | ||
|
|
71cb39ddc4 | ||
|
|
aacdd5d749 | ||
|
|
db39d6bed9 | ||
|
|
187b66fd7c | ||
|
|
c2625c9ba8 | ||
|
|
64fe224029 | ||
|
|
471a7bb74b | ||
|
|
938b1b5c69 | ||
|
|
8bad53bdd5 | ||
|
|
e0eef19c78 | ||
|
|
52abd760a4 | ||
|
|
888956b158 | ||
|
|
e892af34bb | ||
|
|
621725d354 | ||
|
|
caf4b83230 | ||
|
|
872170d935 | ||
|
|
71e29792d0 | ||
|
|
de6d1eb0da | ||
|
|
6cb981d4f1 | ||
|
|
21574b21db | ||
|
|
881f268079 | ||
|
|
29a5b2569e | ||
|
|
a7a94eb20b | ||
|
|
f339fabffa | ||
|
|
adfaa868c8 | ||
|
|
06c7467e4a | ||
|
|
0129828c54 | ||
|
|
542cb33c03 | ||
|
|
c21ae8cf07 | ||
|
|
c02050417d | ||
|
|
edecfb8df4 | ||
|
|
9cc3ab7f52 | ||
|
|
17a33a6ed9 | ||
|
|
682eeddce6 | ||
|
|
0505607b09 | ||
|
|
de8a2388fd | ||
|
|
297603f6cb | ||
|
|
db33ccf61f | ||
|
|
180c2d640a | ||
|
|
ed94dedbc2 | ||
|
|
8ce21e8e10 | ||
|
|
d682cd7886 | ||
|
|
519830bfea | ||
|
|
a6c8a61a0b | ||
|
|
03e8d7018f | ||
|
|
91871c7846 | ||
|
|
92e8a3fcc4 | ||
|
|
791ac16674 | ||
|
|
afd99db33f | ||
|
|
4caad5d27f | ||
|
|
a9ccecdd44 | ||
|
|
38585f5baa | ||
|
|
0dd880537c | ||
|
|
84b30d3be6 | ||
|
|
4a1735fbb4 | ||
|
|
2bfad01bd5 | ||
|
|
3b52812f55 | ||
|
|
c92fbd38bb | ||
|
|
fe109622fa | ||
|
|
a3bfb3b184 | ||
|
|
139a7fe588 | ||
|
|
42b3bbf574 | ||
|
|
867a0bf36b | ||
|
|
27634c3907 | ||
|
|
d8388c44a4 | ||
|
|
dd2314153c | ||
|
|
a47103c056 | ||
|
|
d17b46465c | ||
|
|
a953280d4d | ||
|
|
b4ab8549ff | ||
|
|
0338d248e0 | ||
|
|
3fa58014f6 | ||
|
|
a038497d94 | ||
|
|
3bd5d636e1 | ||
|
|
a588b44c9c | ||
|
|
4a96b8c987 | ||
|
|
83b7e1e821 | ||
|
|
3e88831439 | ||
|
|
3de1441225 | ||
|
|
18e5302741 | ||
|
|
e3939507b8 | ||
|
|
0a6708009e | ||
|
|
b2a390a5c2 | ||
|
|
da4ed8af4c | ||
|
|
05e6e5dea8 | ||
|
|
a2bd4e464d | ||
|
|
d2338d07b2 | ||
|
|
c0b487b280 | ||
|
|
446af0fcfb | ||
|
|
71e4a487db | ||
|
|
4d31b961dd | ||
|
|
d97ca5d5fa | ||
|
|
d27fd3782f | ||
|
|
0c76096a22 | ||
|
|
897a5643a7 | ||
|
|
295304567d | ||
|
|
f82509e60c | ||
|
|
8ec1b818f2 | ||
|
|
3d4893061c | ||
|
|
cb963ec76f | ||
|
|
ad40a34d5c | ||
|
|
35ff445f9b | ||
|
|
2409f7db37 | ||
|
|
d8d1e123ef | ||
|
|
70665bea3c | ||
|
|
59e79739e5 | ||
|
|
1abb62ee01 | ||
|
|
6352e33a5a | ||
|
|
4298468aca | ||
|
|
bc4f4330c1 | ||
|
|
591fb12edf | ||
|
|
5233da7638 | ||
|
|
bc0af7e372 | ||
|
|
bd4cca09ba | ||
|
|
cbccf06cc5 | ||
|
|
92be087cc4 | ||
|
|
9af83f59d9 | ||
|
|
2ce628e367 | ||
|
|
d67ccd9b75 | ||
|
|
94262e9a0c | ||
|
|
377044a879 | ||
|
|
264b2d4292 | ||
|
|
15785a653e | ||
|
|
2dee7d7708 | ||
|
|
e1e817a93c | ||
|
|
547839f0b0 | ||
|
|
944d98a937 | ||
|
|
37e40653a8 | ||
|
|
34cc89f79e | ||
|
|
ddd2933883 | ||
|
|
38eb4ac49f | ||
|
|
80019ab6b7 | ||
|
|
83cd0280ed | ||
|
|
abec23383d | ||
|
|
7450a36530 | ||
|
|
d0baa85fe2 | ||
|
|
08a0e5c0eb | ||
|
|
338526f107 | ||
|
|
b7ba220e18 | ||
|
|
6f294c99bf | ||
|
|
b62c616693 | ||
|
|
7b7f186e47 | ||
|
|
d5c870db44 | ||
|
|
dea6782c5e | ||
|
|
95f3ec68da | ||
|
|
6b546f0dc1 | ||
|
|
52f85673cc | ||
|
|
a151ffbdff | ||
|
|
49764eae1f | ||
|
|
447104a66e | ||
|
|
49f341f6db | ||
|
|
4077f8888b | ||
|
|
802fcc3a13 | ||
|
|
06239735f5 | ||
|
|
5fec26bd6c | ||
|
|
e1d6540fe6 | ||
|
|
6f2b34a857 | ||
|
|
aafb008d5a | ||
|
|
d21ba2dcb7 | ||
|
|
646de5cae4 | ||
|
|
54ad39f1fc | ||
|
|
686d491daf | ||
|
|
70f69acfff | ||
|
|
5e08aa3391 | ||
|
|
35f7a50345 | ||
|
|
f1523e7467 | ||
|
|
6b03659b81 | ||
|
|
07fc2fb724 | ||
|
|
c1efb6db57 | ||
|
|
fdcc00d69e | ||
|
|
f24e8c8da6 | ||
|
|
158372ed1b | ||
|
|
818a423dc9 | ||
|
|
b43520223d | ||
|
|
714ea10b55 | ||
|
|
438b2a5da6 | ||
|
|
05102262ca | ||
|
|
df6f3f1177 | ||
|
|
60de78f448 | ||
|
|
6e4aec0ab9 | ||
|
|
ebbf37222e | ||
|
|
9828908bfe | ||
|
|
925a696a17 | ||
|
|
9d81c2e4b6 | ||
|
|
4610ba71af | ||
|
|
0aebc644fa | ||
|
|
cce2e330ba | ||
|
|
d0de267be7 | ||
|
|
18bb6bc4b2 | ||
|
|
e70c18efa3 | ||
|
|
852588254a | ||
|
|
e8c5f335b3 | ||
|
|
1b61616806 | ||
|
|
ae93e4d1f6 | ||
|
|
9dcd33ac07 | ||
|
|
d6d8153004 | ||
|
|
b33a250f9e | ||
|
|
d09be99136 | ||
|
|
ec4510aea4 | ||
|
|
0276dd46f6 | ||
|
|
40d8501afc | ||
|
|
3f55c94662 | ||
|
|
ebf520aaf2 | ||
|
|
8686308f86 | ||
|
|
d002307088 | ||
|
|
182cca5d5e | ||
|
|
df04d70e38 | ||
|
|
bbc6aef5d2 | ||
|
|
b3015d3ed8 | ||
|
|
9d1390491a | ||
|
|
2265300ebd | ||
|
|
2254d2851a | ||
|
|
6fde4bd5d7 | ||
|
|
fe18e7c6c2 | ||
|
|
d9a4f15340 | ||
|
|
d6dd20ba1a | ||
|
|
1e4e68cb24 | ||
|
|
cbb0d138ab | ||
|
|
9dfc558dc5 | ||
|
|
9c83458d93 | ||
|
|
d8963766c2 | ||
|
|
53fa709767 | ||
|
|
3de393f030 | ||
|
|
6f4c090e7e | ||
|
|
04e121b196 | ||
|
|
7d855652e8 | ||
|
|
0b44d573a9 | ||
|
|
70a5a55e6d | ||
|
|
7270bda78e | ||
|
|
f34269245c | ||
|
|
3defaa16ed | ||
|
|
8315a289b7 | ||
|
|
ac6883c8b2 | ||
|
|
39a1c46b62 | ||
|
|
0274446ebb | ||
|
|
74ed9e24dc | ||
|
|
5b2c76a7de | ||
|
|
06fe40ed9e | ||
|
|
3bc27f3b05 | ||
|
|
253aae6137 | ||
|
|
cdca6bb55f | ||
|
|
e1e36e1a6a | ||
|
|
8c5ea39edf | ||
|
|
133b9e07aa | ||
|
|
bc9e0a09e8 | ||
|
|
ae36c65714 | ||
|
|
3b40059cac | ||
|
|
8877110b80 | ||
|
|
4eed606c30 | ||
|
|
3452861693 | ||
|
|
c1a7544cc5 | ||
|
|
f2fc022a7c | ||
|
|
c0d514c165 | ||
|
|
e926f8f504 | ||
|
|
039117311c | ||
|
|
bd242f4875 | ||
|
|
974a581428 | ||
|
|
9977c2aa3c | ||
|
|
63ba995222 | ||
|
|
42f10a56d3 | ||
|
|
1efe6595e1 | ||
|
|
3ca7707c01 | ||
|
|
a5ba79d176 | ||
|
|
8afa165501 | ||
|
|
6e0cd8ff46 | ||
|
|
1544f32c8c | ||
|
|
999dd188fb | ||
|
|
af14b730f1 | ||
|
|
5229deeaf2 | ||
|
|
2aa492587f | ||
|
|
8c8a5c6f21 | ||
|
|
1bd771b565 | ||
|
|
613087342b | ||
|
|
be6eb18570 | ||
|
|
207df5678d | ||
|
|
1892eab9a7 | ||
|
|
b3a199e457 | ||
|
|
6a93857cc6 | ||
|
|
e5d6d71017 | ||
|
|
40841e6435 | ||
|
|
716625fd34 | ||
|
|
03c4cc53a6 | ||
|
|
a043fc3ab2 | ||
|
|
72300864c7 | ||
|
|
f78ab57f5b | ||
|
|
9d3ca7d4bc | ||
|
|
3c1094ded3 | ||
|
|
23942b3245 | ||
|
|
01d3902fbb | ||
|
|
c18b7fb226 | ||
|
|
bb52f0eb00 | ||
|
|
729bc59e96 | ||
|
|
3954e0f2a2 | ||
|
|
c984813f94 | ||
|
|
e1d2352dc2 | ||
|
|
a782901afc | ||
|
|
b8de2e3df5 | ||
|
|
a0b64c4cdf | ||
|
|
293f9e15c6 | ||
|
|
4350a593d7 | ||
|
|
d50299ca55 | ||
|
|
2d00118252 | ||
|
|
9e44409ab5 | ||
|
|
a286187235 | ||
|
|
87b205c50a | ||
|
|
352cdbaf3d | ||
|
|
2f5c6752bd | ||
|
|
3712d65afd | ||
|
|
6b12a2c893 | ||
|
|
d89d2d2ebb | ||
|
|
f0132c00a3 | ||
|
|
12446fc1b7 | ||
|
|
4f83b6f8be | ||
|
|
48eac3643b | ||
|
|
babe187084 | ||
|
|
7bb4c5b790 | ||
|
|
1e7959439f | ||
|
|
0d30206ffd | ||
|
|
55dfc1b2e4 | ||
|
|
2d6a4abc1c | ||
|
|
ffd69d9067 | ||
|
|
1bdbc18926 | ||
|
|
daae458952 | ||
|
|
917c9f4c84 | ||
|
|
664d1da7b7 | ||
|
|
e38ec3527c | ||
|
|
673147646c | ||
|
|
b29aaea5f8 | ||
|
|
6d2760278c | ||
|
|
1747d75f92 | ||
|
|
4ca25679eb | ||
|
|
dbb44fbbd1 | ||
|
|
be8cfc1948 | ||
|
|
96b9886660 | ||
|
|
4d6d447cba | ||
|
|
958038667e | ||
|
|
f9a5010785 | ||
|
|
b8e75f2dec | ||
|
|
803c374da8 | ||
|
|
0736bc316c | ||
|
|
453cbe4cb5 | ||
|
|
cc58111668 | ||
|
|
9465608423 | ||
|
|
2203a7fef8 | ||
|
|
1f354e359e | ||
|
|
28fbc4ed82 | ||
|
|
789ab9990a | ||
|
|
4647f2f1d6 | ||
|
|
bc2b7320a3 | ||
|
|
24cdd4793c | ||
|
|
12525d5791 | ||
|
|
46f06dea2d | ||
|
|
23bc83cd1b | ||
|
|
934aa0c005 | ||
|
|
fef622860c | ||
|
|
4838be5705 | ||
|
|
6fc76ddcae | ||
|
|
eee60e9187 | ||
|
|
c42bc9c0eb | ||
|
|
c71a0a8f2f | ||
|
|
db444c2eb2 | ||
|
|
f184ce97fd | ||
|
|
57076cc62f | ||
|
|
605ca3affe | ||
|
|
876e66dcf8 | ||
|
|
48f38e7ee3 | ||
|
|
803400a693 | ||
|
|
01577f9c89 | ||
|
|
b81d66bd81 | ||
|
|
a30da726c5 | ||
|
|
993ac5ab16 | ||
|
|
8d6d6a41cf | ||
|
|
8b051e49f2 | ||
|
|
9f1b65613a | ||
|
|
3321a8a119 | ||
|
|
25a97a3f11 | ||
|
|
3950ad91ef | ||
|
|
d3bde5dd51 | ||
|
|
2c852fd826 | ||
|
|
e7d5a98c4e | ||
|
|
30e4fa0261 | ||
|
|
79781333ef | ||
|
|
d7918418a2 | ||
|
|
a810a2fe4d | ||
|
|
0b52bd671a | ||
|
|
4c82dbdc2d | ||
|
|
846ac879fd | ||
|
|
4f322793b1 | ||
|
|
e531962cd9 | ||
|
|
6cca77bcf2 | ||
|
|
79198b9f0e | ||
|
|
65c63fdf87 | ||
|
|
8e91250913 | ||
|
|
07e7c550ae | ||
|
|
32ce20c630 | ||
|
|
3f08dc88e1 | ||
|
|
333cb14dfa | ||
|
|
b561d3708f | ||
|
|
e728947a6e | ||
|
|
1b2782a976 | ||
|
|
62fd35e9e2 | ||
|
|
8b7450a343 | ||
|
|
514ee756e1 | ||
|
|
d53906a30a | ||
|
|
86a04bc81c | ||
|
|
0e42a8ecb6 | ||
|
|
5b144ac71b | ||
|
|
a9746da11e | ||
|
|
68557aed96 | ||
|
|
abaff93e9f | ||
|
|
da312433f6 | ||
|
|
befa59f160 | ||
|
|
0634538f8d | ||
|
|
5e4a6d9e28 | ||
|
|
1017c661d2 | ||
|
|
74e4652d5b | ||
|
|
cbe69ac2e3 | ||
|
|
4ef89cdda8 | ||
|
|
7c197f89f1 | ||
|
|
415fcea517 | ||
|
|
9f595ef409 | ||
|
|
18720adc37 | ||
|
|
17eea720bc | ||
|
|
5be564b3e9 | ||
|
|
e9c561318e | ||
|
|
f32088bf50 | ||
|
|
955cb201a3 | ||
|
|
7b3ebf687a | ||
|
|
d4cf099e95 | ||
|
|
d87d7a17e0 | ||
|
|
ffa7749116 | ||
|
|
6524a3af02 | ||
|
|
eafb27d99c | ||
|
|
65500b10d4 | ||
|
|
a11203ec66 | ||
|
|
80149175d5 | ||
|
|
7a60974005 | ||
|
|
c9a455d4de | ||
|
|
cfeba7b2a6 | ||
|
|
be75cd2e1a | ||
|
|
2ccb90d480 | ||
|
|
41b2731f5b | ||
|
|
45f26036a1 | ||
|
|
45680c8ce4 | ||
|
|
9f41be6da0 | ||
|
|
4e0174a36a | ||
|
|
4439278232 | ||
|
|
81d9ecb41f | ||
|
|
964cc80cce | ||
|
|
8e020bb605 | ||
|
|
cf125d1e2b | ||
|
|
29a4769bcc | ||
|
|
a62841c1b3 | ||
|
|
3048b49140 | ||
|
|
39a2a4f64b | ||
|
|
a95aa5f816 | ||
|
|
3286638d37 | ||
|
|
2816a08801 | ||
|
|
e4755fd44c | ||
|
|
43f2509a56 | ||
|
|
9d7d0efde9 | ||
|
|
aa55d2e37d | ||
|
|
43b385d24d | ||
|
|
b54aa3ef85 | ||
|
|
db2a4cea91 | ||
|
|
a00641ab7a | ||
|
|
c8517dfa6d | ||
|
|
e28e59d5ae | ||
|
|
c5474c839b | ||
|
|
1e7f7d9407 | ||
|
|
986d6e63ea | ||
|
|
10939c5d7b | ||
|
|
96dcbae05d | ||
|
|
fc8b8652ca | ||
|
|
802e316539 | ||
|
|
d32349d33b | ||
|
|
a561e67735 | ||
|
|
53807e3c3c | ||
|
|
1393e46eb2 | ||
|
|
0d06c13d4e | ||
|
|
1856fb9ea5 | ||
|
|
8bab7f5b93 | ||
|
|
a77f659593 | ||
|
|
4b24d7e794 | ||
|
|
78c354ac71 | ||
|
|
5e29d761e1 | ||
|
|
d2e9b98c69 | ||
|
|
413cf4576b | ||
|
|
c8a3a8496a | ||
|
|
57191e6891 | ||
|
|
e4f9a5f9c9 | ||
|
|
1194f90b94 | ||
|
|
3f05d31c56 | ||
|
|
0532391a9d | ||
|
|
01235a9b97 | ||
|
|
49c03638df | ||
|
|
c21b2166f5 | ||
|
|
7853ad41b9 | ||
|
|
f417d643f3 | ||
|
|
0ea5f1ca51 | ||
|
|
1ff475354a | ||
|
|
e530ff7215 | ||
|
|
df541c3178 | ||
|
|
d31801fd67 | ||
|
|
b431109fcb | ||
|
|
8ede1efe6f | ||
|
|
a74a5eb5dc | ||
|
|
3256b0d8e8 | ||
|
|
cc3eb24317 | ||
|
|
87541fb1e4 | ||
|
|
f8647df9e0 | ||
|
|
bb4ccff339 | ||
|
|
3f72cebd96 | ||
|
|
5dfd4a09c2 | ||
|
|
c48e15d55d | ||
|
|
9b70993ad6 | ||
|
|
51347e597b | ||
|
|
0f24cbdfe9 | ||
|
|
f278b93314 | ||
|
|
323ef6631c | ||
|
|
8006301d72 | ||
|
|
642c0acc41 | ||
|
|
8d923b8af8 | ||
|
|
90d55a5a22 | ||
|
|
5685b106be | ||
|
|
ec752459da | ||
|
|
29049e0628 | ||
|
|
a1cc62bb24 | ||
|
|
a2b148684d | ||
|
|
1614191d70 | ||
|
|
a52d24edec | ||
|
|
d008d3922d | ||
|
|
e43cab06e0 | ||
|
|
813b71e085 | ||
|
|
4d9eab3204 | ||
|
|
a2b36c4a5c | ||
|
|
92c8841999 | ||
|
|
3032581564 | ||
|
|
a3a5651692 | ||
|
|
5988776120 | ||
|
|
0eae151584 | ||
|
|
0380382bed | ||
|
|
aa4b352349 | ||
|
|
ea75601601 | ||
|
|
d876eadea9 | ||
|
|
2ae2288452 | ||
|
|
93429cacd9 | ||
|
|
8077af637c | ||
|
|
536bf24abc | ||
|
|
dcf9248483 | ||
|
|
2fdc231967 | ||
|
|
b933646c12 | ||
|
|
f94be667ad | ||
|
|
7bc7aff1ac | ||
|
|
1312c498f2 | ||
|
|
6f6a13809a | ||
|
|
2f38a52ec9 | ||
|
|
b4832cf599 | ||
|
|
2832470acb | ||
|
|
28a7613d4f | ||
|
|
050291c701 | ||
|
|
95bd39c887 | ||
|
|
12673d5d19 | ||
|
|
9f2ede3c64 | ||
|
|
b791532691 | ||
|
|
2d3a1722a7 | ||
|
|
ff5f40b966 | ||
|
|
a1ad38d2ea | ||
|
|
5833b38f3b | ||
|
|
4308943816 | ||
|
|
34d667ffd7 | ||
|
|
7f4928bcd8 | ||
|
|
c54790b2c4 | ||
|
|
2552f6f846 | ||
|
|
4e75232460 | ||
|
|
5381a435b5 | ||
|
|
3b9a850b09 | ||
|
|
91c9089b31 | ||
|
|
43cc48174d | ||
|
|
7bb6f10ddb | ||
|
|
ff93ee6389 | ||
|
|
61d3d5cf54 | ||
|
|
2dbd7eee00 | ||
|
|
84f5a82070 | ||
|
|
c714734141 | ||
|
|
4151b88d7a | ||
|
|
81438ffa07 | ||
|
|
46c6feb086 | ||
|
|
ac1c95d537 | ||
|
|
52c8db7acb | ||
|
|
5ccf65b418 | ||
|
|
693cbc6efd | ||
|
|
eafae197a2 | ||
|
|
bfb9e7ecd3 | ||
|
|
8ce63d4e46 | ||
|
|
67a3335346 | ||
|
|
722c7b63a9 | ||
|
|
a3ea3e0ee2 | ||
|
|
9b54df6528 | ||
|
|
931f345e35 | ||
|
|
1e55f25b05 | ||
|
|
7fa8212fed | ||
|
|
7b9ac44128 | ||
|
|
b76f454c25 | ||
|
|
fd06c066f3 | ||
|
|
e1fb73139a | ||
|
|
2ef9f08bd7 | ||
|
|
00ee4fa0e9 | ||
|
|
5336a7e448 | ||
|
|
f458341d26 | ||
|
|
9d3fde88de | ||
|
|
788b7be621 | ||
|
|
8d9c45be9b | ||
|
|
768bf40c05 | ||
|
|
6424e3881c | ||
|
|
c054389a9d | ||
|
|
05424ee827 | ||
|
|
f514c3899b | ||
|
|
ae13866937 | ||
|
|
c8543e95f0 | ||
|
|
93b4174017 | ||
|
|
b9566f9cb9 | ||
|
|
a4a700b9ac | ||
|
|
b22df3e973 | ||
|
|
2850e56813 | ||
|
|
6791954287 | ||
|
|
8b6f6bf2c8 | ||
|
|
643b0bbde8 | ||
|
|
20941d820b | ||
|
|
36ae7ea078 | ||
|
|
3efb36878f | ||
|
|
1bdb9809b3 | ||
|
|
87adb67877 | ||
|
|
58084388e6 | ||
|
|
b5ae37855a | ||
|
|
6fafe0b079 | ||
|
|
1eb2ebd00d | ||
|
|
5f58a796bb | ||
|
|
6031a94a7a | ||
|
|
3be9a8f3a6 | ||
|
|
1a144a21b0 | ||
|
|
57469a1b8d | ||
|
|
fae2d805c4 | ||
|
|
27f9311a9e | ||
|
|
59aceeaeb7 | ||
|
|
b0079b4d77 | ||
|
|
3cbfb92eb3 | ||
|
|
a0e4ecb6cb | ||
|
|
d75e18b5ba | ||
|
|
76a2637aa6 | ||
|
|
88f4d57039 | ||
|
|
110adc3eba | ||
|
|
66d5ef1d5c | ||
|
|
6a9d1e9bea | ||
|
|
9881e04427 | ||
|
|
6761012a02 | ||
|
|
b6a6fdd42a | ||
|
|
bc9540064a | ||
|
|
bf70737555 | ||
|
|
9f231db4d1 | ||
|
|
92dc09f370 | ||
|
|
3c90065624 | ||
|
|
f7d0afc041 | ||
|
|
a28888b032 | ||
|
|
7aaed87dfe | ||
|
|
1d42a2ec02 | ||
|
|
18bd2b1ae7 | ||
|
|
6b2a35175e | ||
|
|
ff04ad4285 | ||
|
|
4ee336aae9 | ||
|
|
485691e6fd | ||
|
|
dce5d75b99 | ||
|
|
eeeacffed5 | ||
|
|
e2cc66211a | ||
|
|
2403789a8d | ||
|
|
bf8a096d18 | ||
|
|
8d55760990 | ||
|
|
5f786c3eb4 | ||
|
|
c2d1228665 | ||
|
|
2251d3030e | ||
|
|
4a64642ad0 | ||
|
|
cac6ff221b | ||
|
|
1392989a57 | ||
|
|
8c7b504627 | ||
|
|
f005e80cce | ||
|
|
1c6749c809 | ||
|
|
f4cc9fec05 | ||
|
|
561aaa3b2e | ||
|
|
61fa12e7a6 | ||
|
|
869929e2a3 | ||
|
|
17226d63f3 | ||
|
|
638c6f6de0 | ||
|
|
55c5f53ff1 | ||
|
|
adf5425a09 | ||
|
|
fe73fb92e5 | ||
|
|
1b26b5fac2 | ||
|
|
421f145a94 | ||
|
|
87694b51ab | ||
|
|
61d7f86ea1 | ||
|
|
bf2cd59db7 | ||
|
|
f9dcedfd83 | ||
|
|
779d916332 | ||
|
|
620c952d7e | ||
|
|
5070e32d65 | ||
|
|
c9127de030 | ||
|
|
34281e14f4 | ||
|
|
fe221566ff | ||
|
|
e7b4feae5e | ||
|
|
7a897caa2d | ||
|
|
83d1914a2a | ||
|
|
cd9e55da87 | ||
|
|
5db2172c09 | ||
|
|
e4d5311288 | ||
|
|
bb202f3dd4 | ||
|
|
6b909cfefc | ||
|
|
f296f402d6 | ||
|
|
ce42f40a24 | ||
|
|
ec8ab9db35 | ||
|
|
efa07dc581 | ||
|
|
503fe75d26 | ||
|
|
a171a9dbee | ||
|
|
077e0b2f37 | ||
|
|
383727b22b | ||
|
|
22cac8cc62 | ||
|
|
7e646f286d | ||
|
|
ea9b576fc5 | ||
|
|
843f62bcfd | ||
|
|
d3787e7dc7 | ||
|
|
463212d041 | ||
|
|
934f10682d | ||
|
|
6cda61385d | ||
|
|
0d78d6ee95 | ||
|
|
2a68a4901d | ||
|
|
68a5917e47 | ||
|
|
0401959df2 | ||
|
|
dbfc9fc341 | ||
|
|
1685856bd9 | ||
|
|
359a444e1e | ||
|
|
3914c42187 | ||
|
|
65f2093136 | ||
|
|
d123d53742 | ||
|
|
b2fb8a0a67 | ||
|
|
9908ffe697 | ||
|
|
babb973087 | ||
|
|
15caff7cd2 | ||
|
|
b13784c5af | ||
|
|
34c74849b0 | ||
|
|
bad099605c | ||
|
|
02931ef2a1 | ||
|
|
bc4c71d19c | ||
|
|
492536fcbd | ||
|
|
4513ad3282 | ||
|
|
7dde92eb43 | ||
|
|
bf147f7cdf | ||
|
|
0527ec2396 | ||
|
|
345cb3a626 | ||
|
|
420d070ca0 | ||
|
|
e40bdc341a | ||
|
|
c5d95a1013 | ||
|
|
54b56e2556 | ||
|
|
f49ed10102 | ||
|
|
d90dde306e | ||
|
|
72192899d1 | ||
|
|
8068aa42ef | ||
|
|
9244201ac1 | ||
|
|
b780f4dee7 | ||
|
|
b83cd14ecd | ||
|
|
d87deb9c3a | ||
|
|
e14abc1315 | ||
|
|
cf31c655a8 | ||
|
|
a5400226cd | ||
|
|
953138233d | ||
|
|
918fd2b20b | ||
|
|
fa52783f1b | ||
|
|
6cefae53f6 | ||
|
|
2d7a950362 | ||
|
|
370a889122 | ||
|
|
604ed63f11 | ||
|
|
41be7d15d0 | ||
|
|
24fa4dd0d2 | ||
|
|
59041eed4d | ||
|
|
2c66ec0ec0 | ||
|
|
072f1692a1 | ||
|
|
169ea9a8c4 | ||
|
|
87ed8e874a | ||
|
|
a9dac9c65b | ||
|
|
c28154608b | ||
|
|
4bcbec4253 | ||
|
|
35def5e512 | ||
|
|
e74a868799 | ||
|
|
aa54ad8755 | ||
|
|
81a9787d06 | ||
|
|
a180136f8c | ||
|
|
59ba64a5e0 | ||
|
|
d9ed79c552 | ||
|
|
45ca7943d3 | ||
|
|
749e635911 | ||
|
|
e2840b00b9 | ||
|
|
327290efdb | ||
|
|
46af20912b | ||
|
|
cc61259e88 | ||
|
|
7588c1a9cf | ||
|
|
3ecf89732e | ||
|
|
0f921975bf | ||
|
|
6fc7d73cf6 | ||
|
|
c51861e4a4 | ||
|
|
b8ba6f9bba | ||
|
|
9c8f33f7cf | ||
|
|
d1bf89a4c3 | ||
|
|
709cb616e7 | ||
|
|
ddf75b8192 | ||
|
|
0952f63584 | ||
|
|
29c98b48ea | ||
|
|
213a573d10 | ||
|
|
5ad6f2d228 | ||
|
|
433b28b3a0 | ||
|
|
ce9e41171b | ||
|
|
3e037fcdb2 | ||
|
|
8a13267d75 | ||
|
|
15f96768c4 | ||
|
|
5cc9f96b4b | ||
|
|
b8b7d26542 | ||
|
|
3baf7422b7 | ||
|
|
c07558e545 | ||
|
|
e62f9fd51d | ||
|
|
3ecc17db98 | ||
|
|
a9c6dac6f4 | ||
|
|
058b3ed507 | ||
|
|
d0f022070f | ||
|
|
4982a1021f | ||
|
|
f1ef16661b | ||
|
|
634b9b3b84 | ||
|
|
b0ac43c5e3 | ||
|
|
abe07810d2 | ||
|
|
744ce34473 | ||
|
|
5c62b0186c | ||
|
|
7de181cf2d | ||
|
|
7238c011d2 | ||
|
|
3929bf4140 | ||
|
|
3e4141151f | ||
|
|
feaf9fb826 | ||
|
|
e0fcc9c11c | ||
|
|
11557ed5e4 | ||
|
|
9ee4c95453 | ||
|
|
154dfa469f | ||
|
|
2c98ce45cf | ||
|
|
0fe1fb9d11 | ||
|
|
5b7203a9cd | ||
|
|
59a7f14b75 | ||
|
|
d01ef7bc55 | ||
|
|
3975e1e9c5 | ||
|
|
0ecca0ba46 | ||
|
|
2ba7af7d03 | ||
|
|
bb3ed403bd | ||
|
|
a9ff01be70 | ||
|
|
37403795ff | ||
|
|
a7125e158a | ||
|
|
4b926ff4b3 | ||
|
|
9e83b9aaa9 | ||
|
|
bb767470a8 | ||
|
|
95c0688c63 | ||
|
|
527cfa86d7 | ||
|
|
43726402a7 | ||
|
|
37a42e485d | ||
|
|
585aebec95 | ||
|
|
6f95182718 | ||
|
|
3cbbee1dcd | ||
|
|
6447b0ac4e | ||
|
|
af0a83b5cf | ||
|
|
342d36fa78 | ||
|
|
30bde135b2 | ||
|
|
b9c059f84e | ||
|
|
51b138dd88 | ||
|
|
03a5253e0e | ||
|
|
2c84897674 | ||
|
|
d38b11667e | ||
|
|
7ffb74c0e1 | ||
|
|
1e1327da90 | ||
|
|
bce00f441b | ||
|
|
3ef5876ab9 | ||
|
|
ca974293bc | ||
|
|
9c951f918f | ||
|
|
245196fa7d | ||
|
|
f878c1f224 | ||
|
|
4439679d15 | ||
|
|
20404c1c5b | ||
|
|
7a7b914585 | ||
|
|
d2ca99c7fc | ||
|
|
f291e42832 | ||
|
|
964a754b09 | ||
|
|
cfef5b0198 | ||
|
|
4f0aaf3969 | ||
|
|
7998362b0b | ||
|
|
fd0e476cda | ||
|
|
d702648ef4 | ||
|
|
ddb5f02bc1 | ||
|
|
6f97d167db | ||
|
|
f0dc793112 | ||
|
|
5740220340 | ||
|
|
1bf332262a | ||
|
|
2e8e241ad8 | ||
|
|
3fe3fe4438 | ||
|
|
95ab9c6953 | ||
|
|
6ab09686ce | ||
|
|
8f3440cce6 | ||
|
|
cf18ee92d8 | ||
|
|
eb4242e091 | ||
|
|
89b1893b30 | ||
|
|
639ddefbe1 | ||
|
|
89f1684693 | ||
|
|
113a1dc7df | ||
|
|
96d3692522 | ||
|
|
794f7d6d79 | ||
|
|
8eae130850 | ||
|
|
8a28fb3499 | ||
|
|
089cec0ea6 | ||
|
|
207aabbc79 | ||
|
|
742c3307d0 | ||
|
|
a1f0609fc8 | ||
|
|
ed3d959d5a | ||
|
|
02e19a962a | ||
|
|
5a679b6fa8 | ||
|
|
7b1ba2d4f2 | ||
|
|
bee4b3fcff | ||
|
|
c353686eaa | ||
|
|
ecb8fc910b | ||
|
|
09cc856460 | ||
|
|
4a5dca826e | ||
|
|
0075f61d6c | ||
|
|
3ad48eb38c | ||
|
|
4d49b74c26 | ||
|
|
ba30ce20ba | ||
|
|
2ffe3d5fbd | ||
|
|
eb460fe44e | ||
|
|
0872713436 | ||
|
|
2f2957c5f6 | ||
|
|
180400d487 | ||
|
|
494393d216 | ||
|
|
f807938f35 | ||
|
|
7f8c9f8db4 | ||
|
|
391bce3905 | ||
|
|
c97e1744e1 | ||
|
|
ccd4cb22c8 | ||
|
|
7932613f76 | ||
|
|
d6cd5ccf76 | ||
|
|
f002201565 | ||
|
|
4944f3d9b8 | ||
|
|
375db0d2a5 | ||
|
|
6c5af264c3 | ||
|
|
195f207dd4 | ||
|
|
8f0afb0bb5 | ||
|
|
4bae35e072 | ||
|
|
36d2d6b355 | ||
|
|
18bf5e998e | ||
|
|
91a8800a92 | ||
|
|
6468e1898c | ||
|
|
c5e5dc9b1c | ||
|
|
9e58ed9e50 | ||
|
|
f5e384a554 | ||
|
|
b489ee97b7 | ||
|
|
7b6bc11486 | ||
|
|
91e33053fd | ||
|
|
8ec243a0af | ||
|
|
7930b95211 | ||
|
|
a8c72db863 | ||
|
|
7a1404628d | ||
|
|
b492a2bd71 | ||
|
|
8cbb109c01 | ||
|
|
1a61aa5c1b | ||
|
|
59c1e66b28 | ||
|
|
6e8dd1c551 | ||
|
|
c93fd73f77 | ||
|
|
bba0a43573 | ||
|
|
8cf472d5b1 | ||
|
|
3bfbb38214 | ||
|
|
17bced601f | ||
|
|
a997a14ec8 | ||
|
|
93edf5deab | ||
|
|
a30d765713 | ||
|
|
1de58db75b | ||
|
|
dda4d4bbc6 | ||
|
|
c45a0017e9 | ||
|
|
6d5d4f6046 | ||
|
|
528218aed3 | ||
|
|
44b5cfac27 | ||
|
|
4fba90059f | ||
|
|
86adb7ee8e | ||
|
|
ddfd86f48e | ||
|
|
6f19d3263d | ||
|
|
67ffce6ede | ||
|
|
61ee2dc1c6 | ||
|
|
74ae96f97f | ||
|
|
e33c6d836d | ||
|
|
da94fe501e | ||
|
|
3e599cfd62 | ||
|
|
e755d71127 | ||
|
|
483fab34df | ||
|
|
4f2e889b23 | ||
|
|
7a2b2bd843 | ||
|
|
c563b0e089 | ||
|
|
e71718a047 | ||
|
|
8184ecf1ce | ||
|
|
866f7aac81 | ||
|
|
73c7fb6b6d | ||
|
|
6ad913ddea | ||
|
|
0a870f2e04 | ||
|
|
47704ba79e | ||
|
|
8c2ded3fc1 | ||
|
|
74c380fa64 | ||
|
|
d169a8c193 | ||
|
|
da1a2b6837 | ||
|
|
5f09acd7c2 | ||
|
|
e80834e110 | ||
|
|
039a82945a | ||
|
|
f5bbd3b78b | ||
|
|
012b7ba940 | ||
|
|
75427b5ee1 | ||
|
|
dec79f130c | ||
|
|
19d22c3c41 | ||
|
|
b2d3429eca | ||
|
|
ccd60185ab | ||
|
|
2acd828874 | ||
|
|
98eacce893 | ||
|
|
8fa69efc71 | ||
|
|
3ffb7498f4 | ||
|
|
923beb5bba | ||
|
|
e55da824fc | ||
|
|
43f850fbdb | ||
|
|
896a7ea045 | ||
|
|
aeba476572 | ||
|
|
fce5633602 | ||
|
|
c17cc9be72 | ||
|
|
2489e8ed6c | ||
|
|
d8592bc0ba | ||
|
|
3d45a725f2 | ||
|
|
a7977ef57c | ||
|
|
da793f2e31 | ||
|
|
34adfe11b9 | ||
|
|
7e49da0998 | ||
|
|
e4aaeca3c8 | ||
|
|
e4065078dd | ||
|
|
e9eb4b38e0 | ||
|
|
009b34fb2e | ||
|
|
20afab27e9 | ||
|
|
ae5d33e47a | ||
|
|
a8f24f5840 | ||
|
|
9416857884 | ||
|
|
bb32d975eb | ||
|
|
c260b658cc | ||
|
|
a9be1f60af | ||
|
|
c90ac0e936 | ||
|
|
de648659b2 | ||
|
|
1b3b67c4dd | ||
|
|
b07dc69a90 | ||
|
|
c55a3f444f | ||
|
|
29d233afca | ||
|
|
887515db61 | ||
|
|
247b8c618c | ||
|
|
d729ef4fa9 | ||
|
|
ab80b4e8ce | ||
|
|
5f45c31e82 | ||
|
|
bdd9ae1d94 | ||
|
|
402a56a0ed | ||
|
|
9f8eef846a | ||
|
|
13f081ae90 | ||
|
|
ffb2857031 | ||
|
|
f7feeb515c | ||
|
|
531ba2d65b | ||
|
|
9a6f7d55be | ||
|
|
16f6aaa459 | ||
|
|
395f695c23 | ||
|
|
26a8ff7a8c | ||
|
|
5c2302eb4a | ||
|
|
91b6cf6691 | ||
|
|
36dcb2bf01 | ||
|
|
2d73b5adb2 | ||
|
|
2951129936 | ||
|
|
38f8ed6487 | ||
|
|
8bc3464ef5 | ||
|
|
601962df83 | ||
|
|
8c07ff95a7 | ||
|
|
0d8af39a0c | ||
|
|
702d89ff51 | ||
|
|
030ea82231 | ||
|
|
f7d2a83cdb | ||
|
|
ce4a810804 | ||
|
|
00a4195da8 | ||
|
|
465b68638c | ||
|
|
d877f98317 | ||
|
|
cf00c2a410 | ||
|
|
50eacd96f9 | ||
|
|
31f5c7e564 | ||
|
|
da62ee547d | ||
|
|
3d22cbbaa6 | ||
|
|
be23cf8cce | ||
|
|
49224723fc | ||
|
|
0afc888490 | ||
|
|
8395c0abeb | ||
|
|
a923745572 | ||
|
|
c2fd125268 | ||
|
|
2206265250 | ||
|
|
22c1ab4d16 | ||
|
|
a50c27f41b | ||
|
|
6b858ed281 | ||
|
|
9a37482b5e | ||
|
|
d2506cb078 | ||
|
|
c126219b03 | ||
|
|
8bd5d06ce8 | ||
|
|
c3d20372ab | ||
|
|
d1cdd98ac0 | ||
|
|
c60fe0cb97 | ||
|
|
9347c0dbbe | ||
|
|
0f4c1c7de1 | ||
|
|
29b1cc1dff | ||
|
|
660662669b | ||
|
|
a3d8362d13 | ||
|
|
e18b48516f | ||
|
|
ac40d94d9f | ||
|
|
e4607f5a74 | ||
|
|
40c201f4fc | ||
|
|
d57af7d738 | ||
|
|
0670dcee49 | ||
|
|
568cb30d7b | ||
|
|
319c639965 | ||
|
|
049281991b | ||
|
|
560f46946c | ||
|
|
536464fc5b | ||
|
|
f0c93e778a | ||
|
|
46995fe32a | ||
|
|
b477c070f3 | ||
|
|
84c4523d1f | ||
|
|
d2bdca72f8 | ||
|
|
88f23ef43d | ||
|
|
c5c1bb2221 | ||
|
|
3a86eb22c7 | ||
|
|
08ee223497 | ||
|
|
c01e299d16 | ||
|
|
ba380910bf | ||
|
|
6e290a8118 | ||
|
|
d3efc81e8b | ||
|
|
1ea7b8b016 | ||
|
|
afcdc79531 | ||
|
|
94b8f21373 | ||
|
|
a91d98185f | ||
|
|
22f4e80783 | ||
|
|
288af804e7 | ||
|
|
c9772d1c2a | ||
|
|
f9baa3306e | ||
|
|
978344a01f | ||
|
|
94f5b13012 | ||
|
|
34ccc1e905 | ||
|
|
055e09f033 | ||
|
|
4ca90ecd9a | ||
|
|
b2b62a4d36 | ||
|
|
de7dcf686b | ||
|
|
5acd3addc6 | ||
|
|
fbdc50bc86 | ||
|
|
f779782cdc | ||
|
|
ee11b037ab | ||
|
|
872cad2903 | ||
|
|
a815b2a877 | ||
|
|
4f498a05f8 | ||
|
|
9f11eac225 | ||
|
|
ee79bf219d | ||
|
|
96066d0ec2 | ||
|
|
7b1a7264e2 | ||
|
|
43a7cbb227 | ||
|
|
123d540bc6 | ||
|
|
125458af20 | ||
|
|
0adc78564c | ||
|
|
9df29b6113 | ||
|
|
096f8dbfe1 | ||
|
|
9b6384e14c | ||
|
|
b180ea9a23 | ||
|
|
43504da41d | ||
|
|
005d11bb38 | ||
|
|
20c9509104 | ||
|
|
c90953138e | ||
|
|
5363031f4e | ||
|
|
271944b006 | ||
|
|
9ee5a50e4f | ||
|
|
7ffa90f92b | ||
|
|
8459bd6dc7 | ||
|
|
952a862d8c | ||
|
|
71215dcba2 | ||
|
|
e974ab740f | ||
|
|
131e7f55f7 | ||
|
|
a4ba6542c9 | ||
|
|
396dfb59ea | ||
|
|
3e80e31c9c | ||
|
|
e119041ef6 | ||
|
|
efe9e0664a | ||
|
|
e8c06af550 | ||
|
|
f88ca6e560 | ||
|
|
1501b4fdda | ||
|
|
87b960007a | ||
|
|
bc5b29dfdf | ||
|
|
d2c23f8b6d | ||
|
|
91e33dbd68 | ||
|
|
1b427788e5 | ||
|
|
4f224de759 | ||
|
|
911ec9a3bf | ||
|
|
19240c6ab4 | ||
|
|
d90062f6d3 | ||
|
|
da471c5f6e | ||
|
|
549ca38125 | ||
|
|
84452a962b | ||
|
|
bd321ec073 | ||
|
|
47d2144b0a | ||
|
|
f35db58f70 | ||
|
|
630bc93614 | ||
|
|
711ad239af | ||
|
|
f17c2cd5a6 | ||
|
|
9bd8764805 | ||
|
|
8caad5f96c | ||
|
|
d21deca5fa | ||
|
|
c70aabdb33 | ||
|
|
986ad678e7 | ||
|
|
11c943f917 | ||
|
|
58c19eb210 | ||
|
|
81dd318e3d | ||
|
|
79b3f56dad | ||
|
|
fc78b3a6a4 | ||
|
|
b022109f9e | ||
|
|
3f72418773 | ||
|
|
daaf40fd96 | ||
|
|
1b819493cb | ||
|
|
65775228b3 | ||
|
|
ca9b53df5c | ||
|
|
0b5c0cd617 | ||
|
|
f8a87542eb | ||
|
|
1058c3e904 | ||
|
|
23373994a3 | ||
|
|
0c7a266476 | ||
|
|
a6bd8f49fa | ||
|
|
3d39fbf4fa | ||
|
|
b4f9bd596e | ||
|
|
be8c66d768 | ||
|
|
454ef459f5 | ||
|
|
b6f4114f63 | ||
|
|
51dc8a8ad1 | ||
|
|
f107909b0e | ||
|
|
7d621412e5 | ||
|
|
fde6d7e0de | ||
|
|
7cef26be19 | ||
|
|
e753c343be | ||
|
|
922f54fafe | ||
|
|
cae469fa55 | ||
|
|
aec033163e | ||
|
|
c65598710a | ||
|
|
73008d958d | ||
|
|
ac8852c894 | ||
|
|
0bddeee5b3 | ||
|
|
1e3a253015 | ||
|
|
56e16ae732 | ||
|
|
03cfa559c7 | ||
|
|
7222296d60 | ||
|
|
162ec4a0b3 | ||
|
|
63781076d3 | ||
|
|
eb5fff6939 | ||
|
|
4212425427 | ||
|
|
59adce11a1 | ||
|
|
f4f61e687c | ||
|
|
5a00501a77 | ||
|
|
cb8c0f172b | ||
|
|
4e5aebf8a5 | ||
|
|
08cd73d170 | ||
|
|
da2f06a566 | ||
|
|
ebe3d3df7b | ||
|
|
365d7fd062 | ||
|
|
0e01936f50 | ||
|
|
26870e89da | ||
|
|
055c22d59f | ||
|
|
08c3871987 | ||
|
|
10f297fac5 | ||
|
|
e2310f12e0 | ||
|
|
5152323676 | ||
|
|
63a7db8692 | ||
|
|
3910127690 | ||
|
|
5afda1ff50 | ||
|
|
d26b1ef955 | ||
|
|
1eb01c32f5 | ||
|
|
4925d7cf88 | ||
|
|
14bf1027d6 | ||
|
|
6771b317b9 | ||
|
|
c09b7a9fe9 | ||
|
|
eb3141c5fd | ||
|
|
25c0657a54 | ||
|
|
f2e0040eda | ||
|
|
94d4180995 | ||
|
|
bb0540329c | ||
|
|
4f3fcbe03c | ||
|
|
e3129b9b9e | ||
|
|
d17401ebc1 | ||
|
|
049757aa30 | ||
|
|
dc72c11c6f | ||
|
|
1758f35fa5 | ||
|
|
39f5467825 | ||
|
|
9b5dd9d374 | ||
|
|
385fd0fd8a | ||
|
|
abf5e48f0c | ||
|
|
4050343c12 | ||
|
|
cd1c73834c | ||
|
|
d0d1321482 | ||
|
|
f2b5230640 | ||
|
|
d489fb443b | ||
|
|
0b605cf215 | ||
|
|
d37050b85e | ||
|
|
1742e229bf | ||
|
|
c57f4ae587 | ||
|
|
d3cb75b4aa | ||
|
|
3c8eca8e37 | ||
|
|
f58f380482 | ||
|
|
fea9d2f3b1 | ||
|
|
070ee66f5d | ||
|
|
8a40b4f53f | ||
|
|
602e965813 | ||
|
|
433d33f545 | ||
|
|
0e1cd140f4 | ||
|
|
a2e25a27e9 | ||
|
|
40304ccf44 | ||
|
|
3eec8ffdf7 | ||
|
|
9198e61791 | ||
|
|
3b98850326 | ||
|
|
a55a1f8390 | ||
|
|
0d62f1ef26 | ||
|
|
3004199d0d | ||
|
|
265252f317 | ||
|
|
055ec17fdb | ||
|
|
83485bbdc8 | ||
|
|
73589bb775 | ||
|
|
7d3c0cb928 | ||
|
|
e4b72bf9c8 | ||
|
|
58d9078309 | ||
|
|
7c850118e9 | ||
|
|
52bcbf7130 | ||
|
|
fcedf023e9 | ||
|
|
8efde4ce75 | ||
|
|
f2fdc563eb | ||
|
|
17def48a3e | ||
|
|
b011c4167d | ||
|
|
47d7d52bcf | ||
|
|
e6aba0a0c9 | ||
|
|
b3736143bc | ||
|
|
8fac8b78f6 | ||
|
|
7b2db01105 | ||
|
|
293948d189 | ||
|
|
a6a301e855 | ||
|
|
43d916b56a | ||
|
|
d36f82f39b | ||
|
|
2e43b023aa | ||
|
|
4b4ffe09a8 | ||
|
|
ce3e12064b | ||
|
|
bbf7cabc35 | ||
|
|
42028f3e33 | ||
|
|
164bac13c1 | ||
|
|
22abc226aa | ||
|
|
e29b70e4ee | ||
|
|
45c38ff7ee | ||
|
|
14b282bcab | ||
|
|
76df639e42 | ||
|
|
a102dd7e32 | ||
|
|
1b5d8ff44b | ||
|
|
084c0e1036 | ||
|
|
c7e41f41a2 | ||
|
|
bfb80f6889 | ||
|
|
ae2a9d7c72 | ||
|
|
8505158a2f | ||
|
|
1a5bb28891 | ||
|
|
36d64d3151 | ||
|
|
919e5341c3 | ||
|
|
2420217ede | ||
|
|
d2d76a7e92 | ||
|
|
2817a5bd34 | ||
|
|
fdca271289 | ||
|
|
3331f37d82 | ||
|
|
e326d5fe15 | ||
|
|
347e26c9c1 | ||
|
|
9b95480ce4 | ||
|
|
c5078b73be | ||
|
|
eb0610ad62 | ||
|
|
edf116a9b2 | ||
|
|
a5ce165bda | ||
|
|
1f84c64c08 | ||
|
|
302a4621a6 | ||
|
|
7b55ca889d | ||
|
|
2e765654cf | ||
|
|
98a03439f1 | ||
|
|
e4fce66a62 | ||
|
|
904dd7f190 | ||
|
|
aca894c240 | ||
|
|
21b7d4e7ce | ||
|
|
8fe172079c | ||
|
|
59f27d4740 | ||
|
|
579f5ae8d5 | ||
|
|
74c0f407df | ||
|
|
a94782571a | ||
|
|
486b969bea | ||
|
|
5e6618c495 | ||
|
|
408ec2466f | ||
|
|
28af133a7e | ||
|
|
0ef3da0bca | ||
|
|
aaeb0390cc | ||
|
|
ef3e45b742 | ||
|
|
cf9f950945 | ||
|
|
88d9ed5c55 | ||
|
|
39c3291826 | ||
|
|
4b99e2a057 | ||
|
|
96fb57109a | ||
|
|
f773204794 | ||
|
|
dcf2df88bb | ||
|
|
e95ec249f3 | ||
|
|
d9bc044bdc | ||
|
|
12bdf02770 | ||
|
|
20d099fcb9 | ||
|
|
66a5863682 | ||
|
|
4d061f6645 | ||
|
|
0959ff5b7c | ||
|
|
009a3ecabb | ||
|
|
7486484b0d | ||
|
|
6b6321b5da | ||
|
|
11443ab0ea | ||
|
|
6cda4be691 | ||
|
|
d6c338e949 | ||
|
|
8a2ad04880 | ||
|
|
c35a78336e | ||
|
|
297a894a64 | ||
|
|
8271630d89 | ||
|
|
2867f28458 | ||
|
|
f3ee6e5211 | ||
|
|
aaa5e821c4 | ||
|
|
d8b00b3fad | ||
|
|
369c6b39fc | ||
|
|
16a2040470 | ||
|
|
190d75912c | ||
|
|
01f85fd766 | ||
|
|
15c8ba4618 | ||
|
|
691a9f28fa | ||
|
|
9b4e870c4b | ||
|
|
1a17b493aa | ||
|
|
a8aa097581 | ||
|
|
c8c1106757 | ||
|
|
7bf8a9184c | ||
|
|
105d88608c | ||
|
|
a41da5fb4c | ||
|
|
8179827666 | ||
|
|
4cab1c68e4 | ||
|
|
4d09959630 | ||
|
|
d559ef8432 | ||
|
|
e81dfd1d8c | ||
|
|
83fa56ad0d | ||
|
|
498428aad7 | ||
|
|
f6f4843daf | ||
|
|
3d47fa2d29 | ||
|
|
b19395d8a1 | ||
|
|
70c33ff7df | ||
|
|
84b9a879e0 | ||
|
|
a3818cc84d | ||
|
|
d489672220 | ||
|
|
38da0df92e | ||
|
|
f19f534059 | ||
|
|
f695069007 | ||
|
|
74bd918f63 | ||
|
|
e48738a40d | ||
|
|
ceed3fb350 | ||
|
|
865ff915c4 | ||
|
|
4ebf767ba1 | ||
|
|
e6d3180a82 | ||
|
|
cf3badcfc6 | ||
|
|
7ed7983520 | ||
|
|
5a10d6976a | ||
|
|
2fb0ad7b35 | ||
|
|
fe3cb79ebb | ||
|
|
646e2fc0a1 | ||
|
|
7f9f20fc4d | ||
|
|
a90190e869 | ||
|
|
ff146de84e | ||
|
|
dd52447969 | ||
|
|
9e7b2eed1f | ||
|
|
a079da904f | ||
|
|
7844ddaba7 | ||
|
|
c41e4b9a3c | ||
|
|
719d69150b | ||
|
|
0109888967 | ||
|
|
57501f5628 | ||
|
|
33e79d9c67 | ||
|
|
0df61bcc43 | ||
|
|
0cf93d9d12 | ||
|
|
a5062e8ea3 | ||
|
|
dc5b317ba4 | ||
|
|
5764ac6185 | ||
|
|
27502947b4 | ||
|
|
d77e517dd4 | ||
|
|
f2a1af112f | ||
|
|
46101d972b | ||
|
|
f55673b391 | ||
|
|
83561cc68a | ||
|
|
9c47313933 | ||
|
|
d2d8194131 | ||
|
|
bafa08166f | ||
|
|
ff0d4341f9 | ||
|
|
040babdcb5 | ||
|
|
f54cba2f99 | ||
|
|
4c56380f4b | ||
|
|
275918835b | ||
|
|
79760a2650 | ||
|
|
5273a113f5 | ||
|
|
f727a959d7 | ||
|
|
5e8b6894a4 | ||
|
|
71ef2cb483 | ||
|
|
b2ccd72cfd | ||
|
|
7c2ae8f4e4 | ||
|
|
4bb9a04bb0 | ||
|
|
66dbeec51e | ||
|
|
5c886af397 | ||
|
|
d3589e5cb3 | ||
|
|
189bb60232 | ||
|
|
f8fa4e00a5 | ||
|
|
17e0de9f8b | ||
|
|
fdd20fc29f | ||
|
|
3732e2e2d5 | ||
|
|
c9113f1760 | ||
|
|
29ad20dd6f | ||
|
|
26f77a67c0 | ||
|
|
87474c56dd | ||
|
|
9097e47063 | ||
|
|
b0ecddab06 | ||
|
|
dc6d23b909 | ||
|
|
0eb3d35862 | ||
|
|
d5d570db90 | ||
|
|
d5f09d03ce | ||
|
|
1195c51991 | ||
|
|
00784561f3 | ||
|
|
a401cd3394 | ||
|
|
1cbca3db26 | ||
|
|
237a8dcc71 | ||
|
|
9c2dc9030b | ||
|
|
22faf2b4a0 | ||
|
|
8c69c532f9 | ||
|
|
9d883e0e46 | ||
|
|
284c31f27f | ||
|
|
fde6c91ae1 | ||
|
|
77913e76a6 | ||
|
|
b5b0ef16a6 | ||
|
|
5b80df0232 | ||
|
|
34178f0d63 | ||
|
|
b3730bd720 | ||
|
|
5de72e7061 | ||
|
|
528c8678e1 | ||
|
|
4de638d69a | ||
|
|
604d0c7e2f | ||
|
|
0e7a059daf | ||
|
|
53047a65a6 | ||
|
|
10399c5deb | ||
|
|
ad42cf151e | ||
|
|
b20f2c3828 | ||
|
|
08e7b5c6bc | ||
|
|
4e7749afaf | ||
|
|
5c4d07c7f9 | ||
|
|
47d61d3b2f | ||
|
|
935ee7f954 | ||
|
|
579e36b23c | ||
|
|
920156c55f | ||
|
|
fe16253e47 | ||
|
|
2243656cf2 | ||
|
|
b40592fa95 | ||
|
|
5e1395570d | ||
|
|
4c6ca4e0c3 | ||
|
|
f58288ae73 | ||
|
|
77c5be55b4 | ||
|
|
6684219e9f | ||
|
|
277b5495f1 | ||
|
|
f0ec7321f3 | ||
|
|
30a5457bf6 | ||
|
|
917535d3b0 | ||
|
|
283b0a886c | ||
|
|
9f262dc605 | ||
|
|
5d20deddf1 | ||
|
|
714381884f | ||
|
|
72068dcda9 | ||
|
|
1b19dbf460 | ||
|
|
161a7ce3ae | ||
|
|
f820ab55d5 | ||
|
|
3365d512f5 | ||
|
|
a506576fd8 | ||
|
|
37b769b0d8 | ||
|
|
935887cce7 | ||
|
|
68d79694a9 | ||
|
|
c06bbb9795 | ||
|
|
a43d0d7050 | ||
|
|
ce75e1d79d | ||
|
|
4bea75104a | ||
|
|
8e0a53b25e | ||
|
|
9bf01164f6 | ||
|
|
8c3d5dca92 | ||
|
|
660782b418 | ||
|
|
65b912fce7 | ||
|
|
958f99b802 | ||
|
|
d0c3294314 | ||
|
|
3770274912 | ||
|
|
9cfc2be01d | ||
|
|
dad5871180 | ||
|
|
0d74b73f53 | ||
|
|
9f0aee8820 | ||
|
|
21de09c0ef | ||
|
|
cd6fdb721c | ||
|
|
04fa44d8ca | ||
|
|
afda997453 | ||
|
|
42b81cfbbc | ||
|
|
fe92d9d581 | ||
|
|
f31aed8925 | ||
|
|
32b9b39b29 | ||
|
|
6617f1665f | ||
|
|
e11f4c714f | ||
|
|
27b6a85067 | ||
|
|
560f21c006 | ||
|
|
28a02e2f2d | ||
|
|
8c4a9c42d4 | ||
|
|
7cd5c905a4 | ||
|
|
ac7aedd703 | ||
|
|
ad9d0ae022 | ||
|
|
9039cdbb68 | ||
|
|
2c48dca135 | ||
|
|
cd155871ce | ||
|
|
598b395d96 | ||
|
|
8243a5cf97 | ||
|
|
9fd1ab1d11 | ||
|
|
1ec6339cf2 | ||
|
|
b0ef55251a | ||
|
|
5b4212391e | ||
|
|
bf96870221 | ||
|
|
a8a22e01e3 | ||
|
|
825c3db14d | ||
|
|
22813311d6 | ||
|
|
4fda3e9406 | ||
|
|
0b4563b0a9 | ||
|
|
d9e2283116 | ||
|
|
8b90da65d7 | ||
|
|
f376fc66ac | ||
|
|
a77ec61c64 | ||
|
|
26e8141ba2 | ||
|
|
4d64da0c05 | ||
|
|
ac525f259f | ||
|
|
8acb6e3a9d | ||
|
|
8ee5a1e543 | ||
|
|
9d651a122f | ||
|
|
8905b670da | ||
|
|
342bb070da | ||
|
|
a0d084ffba | ||
|
|
b463c314fe | ||
|
|
235f0afaaf | ||
|
|
f8b155e37f | ||
|
|
225666b31e | ||
|
|
709b693a1a | ||
|
|
b44709ff5a | ||
|
|
4e67763f67 | ||
|
|
6543d72499 | ||
|
|
2749687509 | ||
|
|
42c93aa2dc | ||
|
|
0e22056e48 | ||
|
|
a077b2c10f | ||
|
|
f31ea2eb74 | ||
|
|
941d105745 | ||
|
|
18bf9ed314 | ||
|
|
3c2a77dfcd | ||
|
|
f4dda811c8 | ||
|
|
d8e1fa0cd8 | ||
|
|
e37bbc32ec | ||
|
|
18c000a44e | ||
|
|
3d8c0e93d6 | ||
|
|
cc32cc11ac | ||
|
|
bb316e66b3 | ||
|
|
95ff16137a | ||
|
|
2b493faeb8 | ||
|
|
d9c32b8947 | ||
|
|
724bac6159 | ||
|
|
6c320a2290 | ||
|
|
bf1ce6c67b | ||
|
|
cd0c280cb5 | ||
|
|
2e8d950775 | ||
|
|
65d8d63733 | ||
|
|
4d1d84779d | ||
|
|
5f914c191c | ||
|
|
6e5f3389e2 | ||
|
|
79d1e94f3d | ||
|
|
c8ffb4a55e | ||
|
|
c4946486ba | ||
|
|
0f60f46165 | ||
|
|
61e74e37f7 | ||
|
|
ad487d8ed5 | ||
|
|
ccffa29234 | ||
|
|
349e79c9fb | ||
|
|
dbc08bbf92 | ||
|
|
e0b3d5ab5a | ||
|
|
5c721a1921 | ||
|
|
00691dd53a | ||
|
|
fa7bc0a9ed | ||
|
|
43902995e7 | ||
|
|
ee45b90c2a | ||
|
|
b6856f9a8d | ||
|
|
813e8ccd3f | ||
|
|
2ee7ef8f93 | ||
|
|
68b3545322 | ||
|
|
d2ceafe36c | ||
|
|
3d955fffd7 | ||
|
|
4a5ed0397c | ||
|
|
1442752911 | ||
|
|
082086e3e8 | ||
|
|
6e1443b69b | ||
|
|
6ab4fcf220 | ||
|
|
5094eaad97 | ||
|
|
eb402385ca | ||
|
|
3bd87a904e | ||
|
|
0bcdf90344 | ||
|
|
6443c0f3b5 | ||
|
|
8d1c362e4d | ||
|
|
84c5bb5f8b | ||
|
|
a17019cd6a | ||
|
|
0999b1892f | ||
|
|
40c4b73ea0 | ||
|
|
fc13bbdc72 | ||
|
|
3d43ada7f3 | ||
|
|
15db87fa3e | ||
|
|
1286392de9 | ||
|
|
0759c26fe9 | ||
|
|
28a70669af | ||
|
|
ddfbc81b91 | ||
|
|
138c5ae4ec | ||
|
|
1700c43a33 | ||
|
|
e30fcc0ef2 | ||
|
|
573e6fb3c1 | ||
|
|
8416cb8d59 | ||
|
|
46defa07e4 | ||
|
|
747c0e5531 | ||
|
|
ca243ef999 | ||
|
|
4a7e19c749 | ||
|
|
cfb7c7a6f5 | ||
|
|
54dfa00cb8 | ||
|
|
8ec2a445b7 | ||
|
|
1bb559bb14 | ||
|
|
916764ada1 | ||
|
|
176d01204d | ||
|
|
b3da920da5 | ||
|
|
64bcdc4bef | ||
|
|
8e2691b39a | ||
|
|
2fa15538f4 | ||
|
|
538a17254f | ||
|
|
9135c2076e | ||
|
|
84fdc83068 | ||
|
|
b411642af3 | ||
|
|
eb3ba4e9f5 | ||
|
|
2ec4e1dbe3 | ||
|
|
c3447cba4b | ||
|
|
85624fc15c | ||
|
|
ebdbcdf4f9 | ||
|
|
458f8c8be3 | ||
|
|
ff717774b8 | ||
|
|
af2eece4f4 | ||
|
|
e304656da6 | ||
|
|
9b8deb82fe | ||
|
|
c44ea638a6 | ||
|
|
174a6aaae9 | ||
|
|
ebff2385b5 | ||
|
|
8bff1d0ec4 | ||
|
|
9307f72b99 | ||
|
|
929e2ec42d | ||
|
|
96f9b9d3d5 | ||
|
|
568094d911 | ||
|
|
d662881112 | ||
|
|
e68187ff6b | ||
|
|
9569d33b98 | ||
|
|
cfc5171a84 | ||
|
|
ed5bf3efac | ||
|
|
0ec3a37660 | ||
|
|
60b3d83cab | ||
|
|
2fd781b4d7 | ||
|
|
beaf398a4e | ||
|
|
99e39218a6 | ||
|
|
3e14ab8c32 | ||
|
|
19904a31bb | ||
|
|
ae031e3793 | ||
|
|
f6daf71b46 | ||
|
|
e4d814b4f9 | ||
|
|
94bd2f3701 | ||
|
|
95a1dd985f | ||
|
|
13447f8050 | ||
|
|
6601d961f4 | ||
|
|
ad02fc8df3 | ||
|
|
214ca783cc | ||
|
|
e7cedef90a | ||
|
|
792c1b02dc | ||
|
|
f6786add26 | ||
|
|
76140cdad3 | ||
|
|
71ce97432e | ||
|
|
aad871cf28 | ||
|
|
886940f46d | ||
|
|
a5b0ce0974 | ||
|
|
24283aeff7 | ||
|
|
b6775b59b2 | ||
|
|
6422964468 | ||
|
|
c186f2cdc8 | ||
|
|
70dd948ffc | ||
|
|
c4f62feb03 | ||
|
|
2ee25b0a21 | ||
|
|
ad39171be5 | ||
|
|
5cc48a4d7b | ||
|
|
f7c85c97dd | ||
|
|
7484be7656 | ||
|
|
240cf0d226 | ||
|
|
4a859bef48 | ||
|
|
4c9430c377 | ||
|
|
a21554f6d1 | ||
|
|
4d44ac1103 | ||
|
|
ce3917941e | ||
|
|
c69b5512d3 | ||
|
|
e40e3cb0cb | ||
|
|
ad59f4e03f | ||
|
|
b794133b1a | ||
|
|
17d641ef4c | ||
|
|
264f3b7626 | ||
|
|
d53bf785ac | ||
|
|
50ff2802d9 | ||
|
|
23662f6165 | ||
|
|
ca3f7e76fc | ||
|
|
affdc20a65 | ||
|
|
67c5fd1847 | ||
|
|
39e2e7712f | ||
|
|
249f8568db | ||
|
|
a94ec1cd2a | ||
|
|
380a55d574 | ||
|
|
c0cc4bdb2f | ||
|
|
9b4e464c94 | ||
|
|
1da3262bec | ||
|
|
5ec80daab9 | ||
|
|
a523decb65 | ||
|
|
a16d777fc4 | ||
|
|
4324aa50b1 | ||
|
|
8cd92ee71f | ||
|
|
99c9f5f134 | ||
|
|
df6262ba26 | ||
|
|
0e8bf17f0b | ||
|
|
ac86c3fcfe | ||
|
|
5d24f6ca19 | ||
|
|
e8486ba140 | ||
|
|
7374c9639e | ||
|
|
e60525fee1 | ||
|
|
1152befcf1 | ||
|
|
a1be422ad6 | ||
|
|
8dbd81bb4c | ||
|
|
aacacfed04 | ||
|
|
9a28c2fb86 | ||
|
|
7518a8c851 | ||
|
|
c02100cc89 | ||
|
|
9e4d1d33e2 | ||
|
|
cd367c9d2b | ||
|
|
212d407bb3 | ||
|
|
05422437df | ||
|
|
ec408c80f1 | ||
|
|
377160a742 | ||
|
|
3c6fcd2665 | ||
|
|
1a72acf12e | ||
|
|
ac88bec8fb | ||
|
|
44b5c6221b | ||
|
|
034d957be4 | ||
|
|
e9f6987324 | ||
|
|
5f42e2a0b5 | ||
|
|
288b900350 | ||
|
|
0e91901fcf | ||
|
|
63566a6b8e | ||
|
|
6540d6b4c7 | ||
|
|
a9de91e40d | ||
|
|
b81996212a | ||
|
|
6d8a6e8a2f | ||
|
|
b076bd2981 | ||
|
|
8cf8a8efaf | ||
|
|
1a1c3d8157 | ||
|
|
afd5bbeda1 | ||
|
|
9e0b7c37dd | ||
|
|
67864db59b | ||
|
|
ed56eb4174 | ||
|
|
1fd4628b01 | ||
|
|
55d89657dc | ||
|
|
a88bf736bc | ||
|
|
9d2bd2f255 | ||
|
|
6960ca4649 | ||
|
|
8e68ea7b46 | ||
|
|
556bd2cff0 | ||
|
|
aeed2d709e | ||
|
|
55f00756ce | ||
|
|
578fbf54c4 | ||
|
|
932bb97afc | ||
|
|
4c894f5d20 | ||
|
|
6e2f4697aa | ||
|
|
5699bc082d | ||
|
|
4c91972105 | ||
|
|
7bee9cd72f | ||
|
|
bbfd3a2946 | ||
|
|
9d5f0f6f5d | ||
|
|
8bd67a6cd8 | ||
|
|
73079d7989 | ||
|
|
029d419e64 | ||
|
|
219317632e | ||
|
|
6eaaa46779 | ||
|
|
126aa46196 | ||
|
|
d6b761c341 | ||
|
|
341edad850 | ||
|
|
5d1d48b2d2 | ||
|
|
2ece968c75 | ||
|
|
6263863395 | ||
|
|
9748a5a35c | ||
|
|
85084a59d4 | ||
|
|
a6446fb58c | ||
|
|
9a646b6f01 | ||
|
|
074185a235 | ||
|
|
e75f7b1c3e | ||
|
|
6f026e0c1d | ||
|
|
6cb7277cd4 | ||
|
|
d74e31756b | ||
|
|
45e3f391b4 | ||
|
|
8999dac823 | ||
|
|
7492f5a31f | ||
|
|
e291ebbde8 | ||
|
|
0903d79edc | ||
|
|
d106ea1326 | ||
|
|
a3e51bd1bf | ||
|
|
b5c06e279e | ||
|
|
db36213039 | ||
|
|
e93b6d5257 | ||
|
|
53d5eab46a | ||
|
|
764f33fbee | ||
|
|
d283aeb939 | ||
|
|
b68a94efc3 | ||
|
|
27ae5c8058 | ||
|
|
708bc91f86 | ||
|
|
3708a25d0c | ||
|
|
2502ac48b2 | ||
|
|
6ad981043a | ||
|
|
0938a4be8a | ||
|
|
93b45f27a8 | ||
|
|
b119a78569 | ||
|
|
fda97254a0 | ||
|
|
e6de770389 | ||
|
|
275134d134 | ||
|
|
55ae33e602 | ||
|
|
41e7425292 | ||
|
|
10649da46d | ||
|
|
efb44af530 | ||
|
|
09e5fae8bd | ||
|
|
3b850f7efd | ||
|
|
2759459920 | ||
|
|
2d3527b38a | ||
|
|
740282e755 | ||
|
|
690266044e | ||
|
|
42bdbf01b0 | ||
|
|
bf0e190b0b | ||
|
|
023e219143 | ||
|
|
908cfdc2ed | ||
|
|
84681caf33 | ||
|
|
def7cb206c | ||
|
|
a37f5e918e | ||
|
|
0ab8cc16d6 | ||
|
|
6cc58314a8 | ||
|
|
823694bfdd | ||
|
|
6cb159352d | ||
|
|
c17f4db5c0 | ||
|
|
d2a37f73ee | ||
|
|
f08eea1d62 | ||
|
|
cedaa8f156 | ||
|
|
07c580e45d | ||
|
|
e0aac7e0b5 | ||
|
|
d48883e7fb | ||
|
|
2bbf5b8187 | ||
|
|
8f4b963096 | ||
|
|
e480492594 | ||
|
|
564b2fda57 | ||
|
|
cb4c8acfd1 | ||
|
|
8dbd306657 | ||
|
|
7b1b7c1737 | ||
|
|
13620ed535 | ||
|
|
076a546df6 | ||
|
|
0ccc36dc07 | ||
|
|
3d07cd9a69 | ||
|
|
f14b3edfe0 | ||
|
|
d0d1c15a55 | ||
|
|
000b0c6fe9 | ||
|
|
a86b6615d6 | ||
|
|
a728fab273 | ||
|
|
20d66e020a | ||
|
|
0d8a7c924d | ||
|
|
dff7a1d012 | ||
|
|
78c9b5b919 | ||
|
|
b1bd07ab9e | ||
|
|
dc0a7df625 | ||
|
|
a2aa100178 | ||
|
|
ef7198b9c8 | ||
|
|
7c5a57a7b2 | ||
|
|
91ab737ff3 | ||
|
|
2fcb6d8170 | ||
|
|
2cfc98a670 | ||
|
|
35ada5c545 | ||
|
|
386f0fd966 | ||
|
|
8c60d7479a | ||
|
|
3e7fda40f2 | ||
|
|
d0f73e3997 | ||
|
|
b7a43cd434 | ||
|
|
843cc4a913 | ||
|
|
e82ef65134 | ||
|
|
3a921c1c96 | ||
|
|
ca26d57b4b | ||
|
|
5d20cf73f6 | ||
|
|
4431e316ce | ||
|
|
366708c7ef | ||
|
|
004692ca84 | ||
|
|
63857bde9d | ||
|
|
fbb8744558 | ||
|
|
e9eecd7892 | ||
|
|
2dfea51e35 | ||
|
|
e8163181e3 | ||
|
|
296d30d66c | ||
|
|
9b9236751c | ||
|
|
5d07ec19c7 | ||
|
|
9409a2bf42 | ||
|
|
3929511437 | ||
|
|
e23bc368d8 | ||
|
|
13f0ba5bd2 | ||
|
|
15bdd314ca | ||
|
|
510f6c75e9 | ||
|
|
f2fdb777cf | ||
|
|
14cd1cee26 | ||
|
|
4464889c71 | ||
|
|
6059c36663 | ||
|
|
f8f92c9b4d | ||
|
|
5c88ba2f65 | ||
|
|
6767e95a89 | ||
|
|
d3b54cb032 | ||
|
|
36ea5ff352 | ||
|
|
f4b01a6ffd | ||
|
|
95269795e8 | ||
|
|
c2ebc725ee | ||
|
|
e713f982f0 | ||
|
|
1e100746ba | ||
|
|
a6f2a043d1 | ||
|
|
20846db182 | ||
|
|
4fce7cc3af | ||
|
|
93fa9c0156 | ||
|
|
3ad0489eb2 | ||
|
|
63f95d7b68 | ||
|
|
fe600f7011 | ||
|
|
3dc33e2bc6 | ||
|
|
73e813a0b7 | ||
|
|
9fe07187fb | ||
|
|
36969b0a11 | ||
|
|
c9e64e3b14 | ||
|
|
0d73d1950f | ||
|
|
765d3faeb0 | ||
|
|
f6117dc228 | ||
|
|
7aa5a8ee4e | ||
|
|
7150f471f1 | ||
|
|
7621aa1cdc | ||
|
|
ab73700691 | ||
|
|
1e004a7b2f | ||
|
|
a19a1cdec1 | ||
|
|
01ec31624f | ||
|
|
01f1beaecf | ||
|
|
a3e17d84aa | ||
|
|
2c23b9f7fd | ||
|
|
0cdf9c63f7 | ||
|
|
542ab75196 | ||
|
|
06f04e8f0b | ||
|
|
2a9fb562be | ||
|
|
2de846edca | ||
|
|
0564b104c8 | ||
|
|
c85f7eebe7 | ||
|
|
a4d722762e | ||
|
|
f4af6d1c60 | ||
|
|
74b978675b | ||
|
|
d4fc6f1ada | ||
|
|
8766be3f56 | ||
|
|
b179939a83 | ||
|
|
1d1faa7bbf | ||
|
|
5ccc23a15b | ||
|
|
a04c487b92 | ||
|
|
952ff5a4b4 | ||
|
|
4a6f49976f | ||
|
|
dd7de29e19 | ||
|
|
99b941392d | ||
|
|
e7fe125e95 | ||
|
|
3f447ae0d3 | ||
|
|
bea64d56b3 | ||
|
|
951569e8f2 | ||
|
|
42e717d035 | ||
|
|
9c9f8a8ad0 | ||
|
|
470fb254cd | ||
|
|
6e405f3b08 | ||
|
|
38a726afb0 | ||
|
|
090210a930 | ||
|
|
b3af639f9c | ||
|
|
c78ff1d1d4 | ||
|
|
ff79e499a9 | ||
|
|
e44578c4a3 | ||
|
|
58ec5b2c01 | ||
|
|
920fe54a3d | ||
|
|
0a19b97c1b | ||
|
|
d530dfe241 | ||
|
|
d488409742 | ||
|
|
5ade7ee4db | ||
|
|
1edef5fa57 | ||
|
|
f88009b0b3 | ||
|
|
6a3dde4ba0 | ||
|
|
1c3dc9e22e | ||
|
|
5edc2a4cc8 | ||
|
|
b50a704260 | ||
|
|
f351270c6f | ||
|
|
47be8c1e9e | ||
|
|
183078baff | ||
|
|
072551a3eb | ||
|
|
0765becb6d | ||
|
|
cb6463ef09 | ||
|
|
acb1f92e71 | ||
|
|
c9aa5c7151 | ||
|
|
df94b062da | ||
|
|
a486561f77 | ||
|
|
67249d24ea | ||
|
|
4a06f77803 | ||
|
|
df45c0af90 | ||
|
|
deb0f20ecd | ||
|
|
97c12e3f11 | ||
|
|
cd1ea3a76e | ||
|
|
5e9b647b3c | ||
|
|
df1479740f | ||
|
|
4ce14db645 | ||
|
|
4cd7a46c35 | ||
|
|
90c701a3d5 | ||
|
|
c4b2797ea8 | ||
|
|
6987c5e922 | ||
|
|
4172933d7f | ||
|
|
cf78aa4a04 | ||
|
|
194ad1aa89 | ||
|
|
4ca8c2ec36 | ||
|
|
f159b772c3 | ||
|
|
97408cc1d5 | ||
|
|
0bd5a93194 | ||
|
|
3214fc193e | ||
|
|
93e0ba9a4b | ||
|
|
7ae36b72e6 | ||
|
|
b0212de4d4 | ||
|
|
d1135529d1 | ||
|
|
dfbc96a018 | ||
|
|
57c29eceab | ||
|
|
364a73d137 | ||
|
|
02de81775c | ||
|
|
eb44dad0f3 | ||
|
|
68ebd4bd0f | ||
|
|
764d365b0a | ||
|
|
4add45f4ab | ||
|
|
61eb47d472 | ||
|
|
4e2582d250 | ||
|
|
ebcdc39c77 | ||
|
|
77a75ade85 | ||
|
|
9ff04b3627 | ||
|
|
14a7ebd1d4 | ||
|
|
b43305d50a | ||
|
|
b9b3770593 | ||
|
|
b5fdaeabeb | ||
|
|
4772b9e82c | ||
|
|
1aafa79ea4 | ||
|
|
3c5bd90a08 | ||
|
|
25157d32ba | ||
|
|
479a3f4099 | ||
|
|
cf789ae1d5 | ||
|
|
72a6768d8f | ||
|
|
65ec3aec83 | ||
|
|
a50ce4c396 | ||
|
|
5753866874 | ||
|
|
4a0283575f | ||
|
|
02e8b8510a | ||
|
|
4955643421 | ||
|
|
8caf5a8727 | ||
|
|
13fd5e17a9 | ||
|
|
a0c72fd78b | ||
|
|
ffa709c463 | ||
|
|
fa41a2bc2d | ||
|
|
02b74481d7 | ||
|
|
bf3e5b5660 | ||
|
|
ca281950c1 | ||
|
|
db609ce972 | ||
|
|
97745aa237 | ||
|
|
3d6e8d70f5 | ||
|
|
f3d8b87045 | ||
|
|
6c0585834f | ||
|
|
1bb0686aff | ||
|
|
f14b8d1375 | ||
|
|
e5b7bd1d10 | ||
|
|
f82fc86414 | ||
|
|
f1c5798eab | ||
|
|
2b4e273a05 | ||
|
|
e3fea0227f | ||
|
|
c42dccbba7 | ||
|
|
f0e137b9b6 | ||
|
|
7c27686c72 | ||
|
|
d1a8d1ea7c | ||
|
|
9258f8fbea | ||
|
|
60dbf521fc | ||
|
|
f94d3af077 | ||
|
|
8c3dc2f57a | ||
|
|
31f5c88e2a | ||
|
|
3a890e0794 | ||
|
|
f75ec7aa20 | ||
|
|
bb0b35d92e | ||
|
|
a2a080c560 | ||
|
|
7f82ef2d3c | ||
|
|
64f9338ce7 | ||
|
|
03e8d0223d | ||
|
|
ca4b07193e | ||
|
|
8589026531 | ||
|
|
0935e05ed0 | ||
|
|
5b3e145c58 | ||
|
|
245e264451 | ||
|
|
560b161b5d | ||
|
|
f7539f948e | ||
|
|
90f3c5fed9 | ||
|
|
ad5748d2dd | ||
|
|
effe1ec89c | ||
|
|
7ffb1684a4 | ||
|
|
16d46acb7e | ||
|
|
516c74264f | ||
|
|
307cafc7ec | ||
|
|
2ecdf23042 | ||
|
|
a2c4aeb93b | ||
|
|
048652eabd | ||
|
|
4446bd852a | ||
|
|
f7903b42c0 | ||
|
|
4178ada3cc | ||
|
|
aac1e9562b | ||
|
|
ccd79ae26a | ||
|
|
698ec0eb7a | ||
|
|
17bc321c3a | ||
|
|
224f723640 | ||
|
|
e6a4743743 | ||
|
|
d3bf71083a | ||
|
|
0d3edd37be | ||
|
|
73859c855f | ||
|
|
b8dc5d880c | ||
|
|
389f04129c | ||
|
|
ad3392880f | ||
|
|
4dc434a32a | ||
|
|
80d4cacb06 | ||
|
|
0a73bb4ea3 | ||
|
|
29e7379b37 | ||
|
|
cecb520a5e | ||
|
|
2625cd446b | ||
|
|
08946ebc19 | ||
|
|
2e274e2d2c | ||
|
|
e855c40f9b | ||
|
|
74ad4469eb | ||
|
|
3d0437004e | ||
|
|
d96fe358a8 | ||
|
|
f602bb0430 | ||
|
|
9abd5b27cc | ||
|
|
9d54cbed8e | ||
|
|
53428d13da | ||
|
|
fdad6c7d74 | ||
|
|
8397378786 | ||
|
|
d4dc1a4f3c | ||
|
|
ac44288328 | ||
|
|
4f1c0f2e66 | ||
|
|
85766699c1 | ||
|
|
8eb8d11863 | ||
|
|
3e3be4307b | ||
|
|
8a05fdf438 | ||
|
|
48cd3d52aa | ||
|
|
345f18cff6 | ||
|
|
79802d120e | ||
|
|
1ce6cc8ed3 | ||
|
|
1c7222827c | ||
|
|
fa91686460 | ||
|
|
7e824b34d7 | ||
|
|
1a556e02b7 | ||
|
|
ce45f15775 | ||
|
|
ab45121773 | ||
|
|
c0f56d74d3 | ||
|
|
9c50d2b7e0 | ||
|
|
dedf988eb8 | ||
|
|
57f35855b4 | ||
|
|
e57ea7b495 | ||
|
|
0f179bc7d8 | ||
|
|
0767a0ed5a | ||
|
|
b7bc51f358 | ||
|
|
eefbaf460c | ||
|
|
6a1741c921 | ||
|
|
d2ba398587 | ||
|
|
de6e1d8149 | ||
|
|
a9c55a4964 | ||
|
|
304c2642b4 | ||
|
|
22e0a2b8db | ||
|
|
d81f057c18 | ||
|
|
bc9b3381dc | ||
|
|
854ec6e26d | ||
|
|
dd573f18d1 | ||
|
|
4fbdd18da9 | ||
|
|
b5c328fcfb | ||
|
|
2ba50cf15e | ||
|
|
dd7d4f8bb4 | ||
|
|
320fcd19fd | ||
|
|
796d3d894d | ||
|
|
ab98e2e737 | ||
|
|
818e90e2b8 | ||
|
|
73a7d9d655 | ||
|
|
bf813ccbcd | ||
|
|
6b2ec59efc | ||
|
|
cddfc53d2e | ||
|
|
981b564863 | ||
|
|
0212ef63ac | ||
|
|
2f8b09dd79 | ||
|
|
1dd1b8ec2e | ||
|
|
c944fbba4c | ||
|
|
bc21d68cf0 | ||
|
|
9cccc46dde | ||
|
|
17be3a6fac | ||
|
|
9f4b106270 | ||
|
|
3aca38a5eb | ||
|
|
32f68499f0 | ||
|
|
1fd9338ea8 | ||
|
|
b6e2dcad50 | ||
|
|
0ff1160d6f | ||
|
|
d72a4fadea | ||
|
|
1864bb13c5 | ||
|
|
23dc3e00ef | ||
|
|
dcfa86f366 | ||
|
|
2344409faa | ||
|
|
7c6a5ef891 | ||
|
|
ec0aaf7c91 | ||
|
|
4e2d3d9e46 | ||
|
|
607767151a | ||
|
|
ab10f8d46b | ||
|
|
7b3eb1e366 | ||
|
|
3272766ef0 | ||
|
|
8ef5bf8252 | ||
|
|
d17576a112 | ||
|
|
ab8949f37f | ||
|
|
485ce9850f | ||
|
|
e50c5f0079 | ||
|
|
0ebb61cdfc | ||
|
|
8092b32eb1 | ||
|
|
e4a83b590f | ||
|
|
a680775c79 | ||
|
|
7f378f4a1c | ||
|
|
03e5390d71 | ||
|
|
7a20f0a433 | ||
|
|
edb61e7383 | ||
|
|
5d9fd2fac6 | ||
|
|
2732488633 | ||
|
|
e24f8782c2 | ||
|
|
abe6588dfb | ||
|
|
d11fd43d71 | ||
|
|
dc72297270 | ||
|
|
7f4844646e | ||
|
|
c262c9d528 | ||
|
|
1ddf563e0a | ||
|
|
53b1436e68 | ||
|
|
3bc1575dd1 | ||
|
|
06d3f4a42d | ||
|
|
9e1cea2217 | ||
|
|
94727450ee | ||
|
|
8b497ab062 | ||
|
|
183e718289 | ||
|
|
d39e007eee | ||
|
|
c36765d095 | ||
|
|
53691c917d | ||
|
|
1ae85cb606 | ||
|
|
73dbf24912 | ||
|
|
8471ea4da6 | ||
|
|
45d1e418d0 | ||
|
|
81c3f2b07c | ||
|
|
117d38f182 | ||
|
|
0f549143f1 | ||
|
|
4871ed6fa7 | ||
|
|
633528e878 | ||
|
|
b44f5a8acf | ||
|
|
34d644435c | ||
|
|
029c8a682d | ||
|
|
b51e4e6da2 | ||
|
|
786cd66e09 | ||
|
|
c7cf3be804 | ||
|
|
a2994feac1 | ||
|
|
4d679b88e9 | ||
|
|
09b4f53846 | ||
|
|
cd08343d56 | ||
|
|
a3930c40a1 | ||
|
|
e21c22b7a5 | ||
|
|
afee68c01d | ||
|
|
06ee387ca4 | ||
|
|
6884799532 | ||
|
|
3bd2560c2a | ||
|
|
436a3fa340 | ||
|
|
a36af150a3 | ||
|
|
41a338338a | ||
|
|
19815f345f | ||
|
|
664e176e6d | ||
|
|
a0377fad29 | ||
|
|
a14c9933b2 | ||
|
|
51a7e4f2ab | ||
|
|
4926604bb2 | ||
|
|
be7522f1c9 | ||
|
|
ff5dd46daa | ||
|
|
c6f9e35319 | ||
|
|
a42e88b0c1 | ||
|
|
af97021e1a | ||
|
|
1874afb3b8 | ||
|
|
a6744438e0 | ||
|
|
25b5cf9e90 | ||
|
|
4211b276e2 | ||
|
|
af523e3355 | ||
|
|
ecde63f8ee | ||
|
|
898d237e1c | ||
|
|
01545db6c4 | ||
|
|
db1682f73d | ||
|
|
0256004bec | ||
|
|
410b93191b | ||
|
|
b3006183cc | ||
|
|
d011a9ae75 | ||
|
|
3313e0f7cf | ||
|
|
b3585f43e2 | ||
|
|
88fba710c1 | ||
|
|
db96e414b5 | ||
|
|
dd900ba2a7 | ||
|
|
851798d3fc | ||
|
|
96487e7b69 | ||
|
|
f715e66068 | ||
|
|
c785e5abdd | ||
|
|
a2a9fa447a | ||
|
|
bc57bf8c8b | ||
|
|
3830b53416 | ||
|
|
4ac6655d11 | ||
|
|
019d4c0ee1 | ||
|
|
b1c34af71c | ||
|
|
996188a871 | ||
|
|
5b04ccffc3 | ||
|
|
2ed9ff6df4 | ||
|
|
d4f83a97bb | ||
|
|
93d186ad42 | ||
|
|
55d615393e | ||
|
|
17f8d9e203 | ||
|
|
61c6044b60 | ||
|
|
3dc81cf9b0 | ||
|
|
9c7a015397 | ||
|
|
778e79b391 | ||
|
|
f372c834f1 | ||
|
|
ac1d59a35b | ||
|
|
863ca6b0ad | ||
|
|
0095ad5e8a | ||
|
|
b0cbab0d1c | ||
|
|
78f01e5d7b | ||
|
|
6a2717be04 | ||
|
|
beb304f9a0 | ||
|
|
05e7591218 | ||
|
|
14a4901808 | ||
|
|
503f716b1f | ||
|
|
4c88543f30 | ||
|
|
f38e8cad58 | ||
|
|
c3580f3874 | ||
|
|
d6c94f493f | ||
|
|
1c3896c991 | ||
|
|
e7f9bed8c6 | ||
|
|
20f0038734 | ||
|
|
d6e776cdcb | ||
|
|
e155a35ea2 | ||
|
|
60ddb620b3 | ||
|
|
ad62dea1b5 | ||
|
|
b9ecf6aa98 | ||
|
|
38c76b5e04 | ||
|
|
51513ac9be | ||
|
|
27f7415698 | ||
|
|
26d61d3a50 | ||
|
|
47a44a0ade | ||
|
|
b65f86b3fd | ||
|
|
f0b6dd9421 | ||
|
|
12dd0170c1 | ||
|
|
fce3bf2590 | ||
|
|
89cbb8ae12 | ||
|
|
7febc01d6a | ||
|
|
020619e94f | ||
|
|
8f2dce12ab | ||
|
|
c7f95d5a20 | ||
|
|
13c00d5655 | ||
|
|
81248ed0bd | ||
|
|
7e4371f482 | ||
|
|
c5cb55bf5e | ||
|
|
2b902fadd5 | ||
|
|
2c048602ce | ||
|
|
e7e1d3da77 | ||
|
|
b6999150bd | ||
|
|
6fda96e366 | ||
|
|
5f67d8db15 | ||
|
|
a505cd44cf | ||
|
|
03a6a992bc | ||
|
|
056a20c5b4 | ||
|
|
5d1fd6eaff | ||
|
|
13a9d5b2e1 | ||
|
|
9fd915575e | ||
|
|
dbe6a48541 | ||
|
|
763c3e67da | ||
|
|
6ab5e3db79 | ||
|
|
f302f3081a | ||
|
|
35503691a5 | ||
|
|
75e79c7988 | ||
|
|
dd08336865 | ||
|
|
72512f9d80 | ||
|
|
064a8a7fa9 | ||
|
|
b107d5dadf | ||
|
|
6850d8a92d | ||
|
|
31ae3b7d9e | ||
|
|
c19a506be7 | ||
|
|
a155f808d4 | ||
|
|
8bae623c40 | ||
|
|
55fff914e1 | ||
|
|
4eb615560f | ||
|
|
5e35877c1d | ||
|
|
fee75619de | ||
|
|
8d75ca3e2b | ||
|
|
9c829f1234 | ||
|
|
9e063a31cf | ||
|
|
a90b731b9d | ||
|
|
8b77317f3f | ||
|
|
4212bef5ff | ||
|
|
87dc6b6ac6 | ||
|
|
25960521ba | ||
|
|
d7e346db0c | ||
|
|
e4762e882b | ||
|
|
b2b7b1fedf | ||
|
|
18ee76903e | ||
|
|
d1b5b83464 | ||
|
|
d5f8b1bc03 | ||
|
|
d3061c14d6 | ||
|
|
f8929a86b6 | ||
|
|
89de6370b0 | ||
|
|
d767bcea84 | ||
|
|
793652caa2 | ||
|
|
e00e1b9298 | ||
|
|
47a912b407 | ||
|
|
7cb35cb36c | ||
|
|
6ae24a4f78 | ||
|
|
5ce0723c27 | ||
|
|
aaf906e2e7 | ||
|
|
3c541ab081 | ||
|
|
1e1b862ba6 | ||
|
|
1ab7044d22 | ||
|
|
dfdb150130 | ||
|
|
2ff034e580 | ||
|
|
05d7dff003 | ||
|
|
85c723d298 | ||
|
|
46bdfa2986 | ||
|
|
ca57cfafb2 | ||
|
|
0692132a3d | ||
|
|
e1c3900039 | ||
|
|
935df3adb4 | ||
|
|
82232aea17 | ||
|
|
f649946d25 | ||
|
|
8d04a49348 | ||
|
|
f88f96383d | ||
|
|
0739865153 | ||
|
|
c42798914d | ||
|
|
6a226785d2 | ||
|
|
aea46a7b42 | ||
|
|
a1db66cc36 | ||
|
|
db1d980a25 | ||
|
|
9c1441c4a7 | ||
|
|
2c74efde79 | ||
|
|
2f9a058b39 | ||
|
|
e84ae3b7d3 | ||
|
|
c18d05dea3 | ||
|
|
0190144cb2 | ||
|
|
00d6db435e | ||
|
|
4659c0f62b | ||
|
|
ac556bf6e3 | ||
|
|
c45ade924c | ||
|
|
ed5903d8e8 | ||
|
|
e8131f6c3a | ||
|
|
3117cccf77 | ||
|
|
9ab2f6139e | ||
|
|
6af7f1e336 | ||
|
|
122fa744d5 | ||
|
|
5337e59f6d | ||
|
|
b001731f00 | ||
|
|
4da2ede624 | ||
|
|
07c4911f84 | ||
|
|
aa6427ff64 | ||
|
|
82705ef806 | ||
|
|
2756f73ebc | ||
|
|
757cd00481 | ||
|
|
de66b12cd9 | ||
|
|
0e5914386e | ||
|
|
7212301ce7 | ||
|
|
255e901985 | ||
|
|
d458952a9d | ||
|
|
345f6580e1 | ||
|
|
d0d60eeb64 | ||
|
|
f63a557c2b | ||
|
|
9be3944503 | ||
|
|
3ef0596bab | ||
|
|
80c318a7b8 | ||
|
|
2f0457a7c7 | ||
|
|
fe23970464 | ||
|
|
44c289f8bd | ||
|
|
5c0d48ae05 | ||
|
|
817970dcb5 | ||
|
|
8364f69150 | ||
|
|
3463ededcb | ||
|
|
810f729bc4 | ||
|
|
ad28751bf5 | ||
|
|
53da4a5eaa | ||
|
|
fe7ebb5f22 | ||
|
|
303e3d793f | ||
|
|
1c0b862082 | ||
|
|
7245a82ae8 | ||
|
|
1796238bf1 | ||
|
|
02e72a8d3f | ||
|
|
2923bbed79 | ||
|
|
f647347235 | ||
|
|
af75ac398f | ||
|
|
4efb41a3b0 | ||
|
|
0679df49ff | ||
|
|
269f505f75 | ||
|
|
51bbad9fc4 | ||
|
|
087564e8ec | ||
|
|
930fe4517c | ||
|
|
d9ef99b30b | ||
|
|
e7c2e9947f | ||
|
|
43a6103285 | ||
|
|
de0f115f92 | ||
|
|
5fef6a21f2 | ||
|
|
ee2c2ea0a8 | ||
|
|
f9c7b7bef9 | ||
|
|
71aaa90a3d | ||
|
|
4f6d2611e3 | ||
|
|
e2b6ff47cf | ||
|
|
8b6d5049a0 | ||
|
|
3b04a93363 | ||
|
|
7f1a8f768b | ||
|
|
ae6cf6aa74 | ||
|
|
44174f1b54 | ||
|
|
80ab5c71d6 | ||
|
|
3accaae0ab | ||
|
|
205ed61c30 | ||
|
|
dd1e92f200 | ||
|
|
dfdeeaa7b2 | ||
|
|
c688f07e4a | ||
|
|
3b960d7af8 | ||
|
|
d060880a08 | ||
|
|
a89df6e6f9 | ||
|
|
d0982e40b8 | ||
|
|
b2b5d7336b | ||
|
|
52eded0c35 | ||
|
|
cf19adec2e | ||
|
|
68229bef38 | ||
|
|
bf9471809e | ||
|
|
696477864b | ||
|
|
d75d771fe0 | ||
|
|
528b2e1c75 | ||
|
|
d64e3a6629 | ||
|
|
406d003fd0 | ||
|
|
c2f3407727 | ||
|
|
cbbf115241 | ||
|
|
2e8275d6eb | ||
|
|
a2ae482a2e | ||
|
|
5307db2a3c | ||
|
|
c313b27b27 | ||
|
|
0ca9a07dd0 | ||
|
|
a15be6c1a8 | ||
|
|
54225bb401 | ||
|
|
0177b178c4 | ||
|
|
b1d6036c10 | ||
|
|
1d347f4f1b | ||
|
|
917f1a2c4b | ||
|
|
ef822e749d | ||
|
|
3b533d4ef9 | ||
|
|
6dd193c917 | ||
|
|
01262a8e2a | ||
|
|
191e30201c | ||
|
|
f537c5eec0 | ||
|
|
86b722c2dd | ||
|
|
2390909e05 | ||
|
|
24e5ae385e | ||
|
|
61b480840b | ||
|
|
7c45ada269 | ||
|
|
0ac020cac2 | ||
|
|
8edaba230d | ||
|
|
cc0685e84e | ||
|
|
d0b02f327d | ||
|
|
4651e0d179 | ||
|
|
42a3b8f9f7 | ||
|
|
79aeaf0312 | ||
|
|
cc10d13b8d | ||
|
|
7b931df995 | ||
|
|
cae5dac4e9 | ||
|
|
909a5fab6c | ||
|
|
6abff904d4 | ||
|
|
89764d0a7d | ||
|
|
deba2b7113 | ||
|
|
786adb9f6d | ||
|
|
f34284ac6f | ||
|
|
5ecd90fc71 | ||
|
|
8e58d7fd4e | ||
|
|
11f9289298 | ||
|
|
479fab0812 | ||
|
|
d73034b348 | ||
|
|
092243fa99 | ||
|
|
b7f7ffb3f0 | ||
|
|
b12201bb8e | ||
|
|
436367da6d | ||
|
|
99405ea94e | ||
|
|
ad0a4ee6a9 | ||
|
|
189cafba5f | ||
|
|
d5821e18ef | ||
|
|
3d4de0722a | ||
|
|
21c75241f3 | ||
|
|
e4e2304a7a | ||
|
|
a933ac584c | ||
|
|
f90b913f09 | ||
|
|
a7d362b765 | ||
|
|
baf7394b18 | ||
|
|
2e87f8045f | ||
|
|
739bfbd1e8 | ||
|
|
effafdbf2c | ||
|
|
9afeaf6d85 | ||
|
|
a751b71eef | ||
|
|
4963f7710b | ||
|
|
4618b54437 | ||
|
|
925e1d256f | ||
|
|
0c2a49af89 | ||
|
|
cba66f029b | ||
|
|
e00ce1a786 | ||
|
|
9a18a3af63 | ||
|
|
d22f57a707 | ||
|
|
c531a3f2a2 | ||
|
|
eebd0556c3 | ||
|
|
22a32211bf | ||
|
|
2bda982a21 | ||
|
|
ebdd89bc29 | ||
|
|
1ed7dfd34a | ||
|
|
d33fdfd4ba | ||
|
|
57077045c3 | ||
|
|
316d4cff28 | ||
|
|
c24ecfa5df | ||
|
|
ae496f2c4a | ||
|
|
01974685f8 | ||
|
|
0d2ac65c27 | ||
|
|
233811a9be | ||
|
|
1274343416 | ||
|
|
10cd0f26f2 | ||
|
|
5f0e3732d7 | ||
|
|
71989f43f4 | ||
|
|
f8969e3111 | ||
|
|
cc4dd31051 | ||
|
|
e55b18e37f | ||
|
|
6142c31006 | ||
|
|
6699e56b96 | ||
|
|
55fd3b7812 | ||
|
|
ff7e3a3fc0 | ||
|
|
9dad4bc4d6 | ||
|
|
1dc74269f3 | ||
|
|
3f32c572cd | ||
|
|
9333a976a1 | ||
|
|
e7711d4abc | ||
|
|
d7dd4a9b36 | ||
|
|
2432914631 | ||
|
|
72e8b2d0f9 | ||
|
|
1ccd9654eb | ||
|
|
c4f2d9c609 | ||
|
|
06102f36be | ||
|
|
9576f85a5e | ||
|
|
9eb70696cb | ||
|
|
a9642cad6b | ||
|
|
0b76972c42 | ||
|
|
f486591a87 | ||
|
|
8bbee5aba8 | ||
|
|
2f5dc56cb0 | ||
|
|
21349d883c | ||
|
|
a90d45fc6f | ||
|
|
3980b6b451 | ||
|
|
b88bde2b45 | ||
|
|
f31b457bcd | ||
|
|
51a66e5fdb | ||
|
|
00cbf2acbf | ||
|
|
5ecb4e0f0e | ||
|
|
99328cae74 | ||
|
|
9460525bf3 | ||
|
|
ffb2686d43 | ||
|
|
b583d6ecd5 | ||
|
|
d8a038921f | ||
|
|
6aa03a43ba | ||
|
|
5d9f3ae2a9 | ||
|
|
0f3f066eb9 | ||
|
|
71d909827d | ||
|
|
03810f131e | ||
|
|
3b9413274d | ||
|
|
22548dd982 | ||
|
|
9a115483e9 | ||
|
|
6cb3683875 | ||
|
|
04e0013780 | ||
|
|
7398200cfd | ||
|
|
dc17c8350c | ||
|
|
4c05cc7259 | ||
|
|
66b4c424e9 | ||
|
|
dbc28cb451 | ||
|
|
c959888801 | ||
|
|
2edc2d5d8c | ||
|
|
e227f735cf | ||
|
|
2e80dd5421 | ||
|
|
4932f3b6b1 | ||
|
|
865dc1c543 | ||
|
|
4e865f5d64 | ||
|
|
0f3098d169 | ||
|
|
eae0e207f8 | ||
|
|
a41fc47544 | ||
|
|
7093117fb7 | ||
|
|
0dc7889b4d | ||
|
|
54edffe7c8 | ||
|
|
46132a087e | ||
|
|
67f26bfa0e | ||
|
|
7be79532db | ||
|
|
e13d163edc | ||
|
|
8f778ad067 | ||
|
|
b90e2949e7 | ||
|
|
adf2230d4a | ||
|
|
9d361966e3 | ||
|
|
bee564fd76 | ||
|
|
56431a2b63 | ||
|
|
e923cd2e0e | ||
|
|
dc81375150 | ||
|
|
caebd98825 | ||
|
|
816c97ad46 | ||
|
|
b205db96ad | ||
|
|
0fc1950400 | ||
|
|
8c974aafc7 | ||
|
|
0c798d691d | ||
|
|
a30031a3b3 | ||
|
|
ee3b45b38e | ||
|
|
fb490ec1e0 | ||
|
|
4e78da1b6d | ||
|
|
fda847f7f8 | ||
|
|
e641856118 | ||
|
|
a4797c5bc8 | ||
|
|
23955e65c0 | ||
|
|
dfb1572ffe | ||
|
|
b6d98dc02d | ||
|
|
d91df6837c | ||
|
|
c3bf4eac2d | ||
|
|
aa4b2d2f6b | ||
|
|
24e75e73b2 | ||
|
|
081c77c5e8 | ||
|
|
d108142470 | ||
|
|
09a1e5f755 | ||
|
|
1ba419de43 | ||
|
|
2835da67e5 | ||
|
|
d8d2bb7111 | ||
|
|
03eb2a0e9f | ||
|
|
e5b6c0a790 | ||
|
|
09631d585f | ||
|
|
2307a53dad | ||
|
|
635f1f31dc | ||
|
|
85f72f6592 | ||
|
|
5edaa9ae4e | ||
|
|
a6f5cb2061 | ||
|
|
f839815855 | ||
|
|
e1d3ad0cce | ||
|
|
462676144b | ||
|
|
aaeeb2b32b | ||
|
|
81bc172569 | ||
|
|
080ae3187d | ||
|
|
d2af1a86da | ||
|
|
2e6460c7ab | ||
|
|
8c725bcf7f | ||
|
|
fddc03c37f | ||
|
|
9df5b4d83f | ||
|
|
9b240699ae | ||
|
|
413d25791b | ||
|
|
b444b07d68 | ||
|
|
c20c73bb96 | ||
|
|
7ee9c45326 | ||
|
|
6f183b83d9 | ||
|
|
75ab1763bf | ||
|
|
a240610772 | ||
|
|
0b4eadc263 | ||
|
|
1e81d4d1fe | ||
|
|
d751c27253 | ||
|
|
833724a845 | ||
|
|
cbc633eaf5 | ||
|
|
35822d615f | ||
|
|
3cd606ff24 | ||
|
|
934dd32568 | ||
|
|
9426ebdf6f | ||
|
|
93d476198d | ||
|
|
70ee68dc12 | ||
|
|
1c1004e0ee | ||
|
|
3464671874 | ||
|
|
247aff2676 | ||
|
|
693e7babca | ||
|
|
ab739925d0 | ||
|
|
b9446d283b | ||
|
|
e0961a1a4c | ||
|
|
3674033c42 | ||
|
|
d394f8a620 | ||
|
|
87c6fe6d28 | ||
|
|
0dd336a451 | ||
|
|
dd24470d75 | ||
|
|
7be983c1e3 | ||
|
|
130d0fc1df | ||
|
|
24d0896071 | ||
|
|
c692f6f393 | ||
|
|
d4624143f4 | ||
|
|
45036868df | ||
|
|
7f1180be38 | ||
|
|
857bc7224c | ||
|
|
68537c924c | ||
|
|
9ce3328695 | ||
|
|
2a2e1b4f05 | ||
|
|
c200756247 | ||
|
|
c5009afced | ||
|
|
c9c9719d04 | ||
|
|
1637fa7870 | ||
|
|
11ae49e28b | ||
|
|
e345b085ab | ||
|
|
afd947fdba | ||
|
|
a5b25f0dba | ||
|
|
072f63608b | ||
|
|
24c14e2a75 | ||
|
|
1ecc024543 | ||
|
|
68fbdc8469 | ||
|
|
0561948cf8 | ||
|
|
dd8cc50163 | ||
|
|
df49e689a8 | ||
|
|
acb4243a3a | ||
|
|
08da626c15 | ||
|
|
0aefe60360 | ||
|
|
08fa4d48a4 | ||
|
|
78647e27e6 | ||
|
|
672fe67c48 | ||
|
|
c8542c7d7c | ||
|
|
6bc4b7199c | ||
|
|
20a1499dde | ||
|
|
42a51c0810 | ||
|
|
da68ed881f | ||
|
|
4c473d5f9a | ||
|
|
5340c588cb | ||
|
|
614a54e204 | ||
|
|
959e78cfe3 | ||
|
|
3bd7f4e000 | ||
|
|
d55115885e | ||
|
|
265a661cc8 | ||
|
|
2adb98c8f5 | ||
|
|
69775d106b | ||
|
|
54459e0042 | ||
|
|
be14537f85 | ||
|
|
dcf6b47f58 | ||
|
|
ab1f3f6312 | ||
|
|
a04b06923e | ||
|
|
fd5c33d528 | ||
|
|
b6a12cdbf3 | ||
|
|
c03b08562d | ||
|
|
939b8fa225 | ||
|
|
c905f14f7a | ||
|
|
3221ed9f9b | ||
|
|
d979499d5b | ||
|
|
feac914b27 | ||
|
|
3a0db373ed | ||
|
|
c86dbd6ef9 | ||
|
|
e586e0a92e | ||
|
|
bab1fcadc2 | ||
|
|
cb67854213 | ||
|
|
71bc265772 | ||
|
|
1043a62d87 | ||
|
|
899c8bd754 | ||
|
|
be7c3f492a | ||
|
|
e0b3348d2e | ||
|
|
fd60565435 | ||
|
|
1f46fd2737 | ||
|
|
1058007e7d | ||
|
|
35e8e04417 | ||
|
|
4c98763120 | ||
|
|
4694b7cd33 | ||
|
|
925d2d2dd8 | ||
|
|
c6ec08068c | ||
|
|
2b760459fc | ||
|
|
8ec68ac58e | ||
|
|
ca96e110be | ||
|
|
8b14031051 | ||
|
|
ead0fefd01 | ||
|
|
aa19c3ec09 | ||
|
|
40531e074c | ||
|
|
0201d47113 | ||
|
|
165cc0250d | ||
|
|
c80a397d40 | ||
|
|
5fe5d445c3 | ||
|
|
4e0c0f5436 | ||
|
|
cc07868d43 | ||
|
|
4e9934b928 | ||
|
|
f60d449c69 | ||
|
|
7ae608f7cc | ||
|
|
89f7e48ff4 | ||
|
|
a8fe4b4a98 | ||
|
|
258dc1c324 | ||
|
|
11101190ad | ||
|
|
f50e460e33 | ||
|
|
0aba0a833a | ||
|
|
5fe0450a17 | ||
|
|
2af6d39c4d | ||
|
|
a8d0f11cb1 | ||
|
|
98ef22649e | ||
|
|
23b23cf16d | ||
|
|
fc8c99535f | ||
|
|
2a31dc5ef1 | ||
|
|
8c5f47a606 | ||
|
|
ac0d36674b | ||
|
|
d41476c0dc | ||
|
|
a5159714f9 | ||
|
|
ab687f9786 | ||
|
|
89b0e349c3 | ||
|
|
b270c10c9e | ||
|
|
d2b4869261 | ||
|
|
443813fa78 | ||
|
|
d74652ad77 | ||
|
|
3351f6f310 | ||
|
|
44b682e76c | ||
|
|
f77a07f23a | ||
|
|
34a6cf6bf3 | ||
|
|
ba91e609f9 | ||
|
|
8ed2b17098 | ||
|
|
b291568527 | ||
|
|
7c468bb984 | ||
|
|
44bb8fc91c | ||
|
|
d126eab38e | ||
|
|
6bc96e9845 | ||
|
|
4a7fe5c784 | ||
|
|
0ef0ba36fb | ||
|
|
94b82a6632 | ||
|
|
0c19c15d38 | ||
|
|
92d83adabf | ||
|
|
56533b5ff6 | ||
|
|
57facf1ff9 | ||
|
|
a06a98d6b4 | ||
|
|
02c05cf855 | ||
|
|
0afd723690 | ||
|
|
cfd77783cb | ||
|
|
7c3de03d22 | ||
|
|
6ebae236dc | ||
|
|
9fbd01524e | ||
|
|
6beebe2d1b | ||
|
|
ae3fa3eb4b | ||
|
|
491ed144c8 | ||
|
|
37ed34b2ae | ||
|
|
5ffe16db27 | ||
|
|
f638e3d801 | ||
|
|
bdf77bb7b3 | ||
|
|
a6bb76628c | ||
|
|
12e13d5d1c | ||
|
|
d202bfdf33 | ||
|
|
e31121e729 | ||
|
|
213f859d6a | ||
|
|
e183b0a59a | ||
|
|
4c6d045b26 | ||
|
|
fac5778c3c | ||
|
|
259cbff6cf | ||
|
|
26a29c0bbf | ||
|
|
fcec323f10 | ||
|
|
e1c3380307 | ||
|
|
1bbc127a12 | ||
|
|
8238eb7a2e | ||
|
|
ea3d91f9a6 | ||
|
|
8125518e5a | ||
|
|
1cccfdbc0a | ||
|
|
fec75fb30c | ||
|
|
9e59127c4b | ||
|
|
b6f9abb42e | ||
|
|
2a3b7691df | ||
|
|
88cf547a18 | ||
|
|
7c80b14778 | ||
|
|
efd2a66d4d | ||
|
|
17733ae97e | ||
|
|
6292d17272 | ||
|
|
bbdb0df3fe | ||
|
|
49d75fce7b | ||
|
|
d254e8f77a | ||
|
|
b7497fd26b | ||
|
|
60cc55f149 | ||
|
|
10dac50943 | ||
|
|
780d4da30a | ||
|
|
941fa45804 | ||
|
|
b4c3006e53 | ||
|
|
889dfc91c5 | ||
|
|
317557903a | ||
|
|
e2046ebd3e | ||
|
|
c0956eaa87 | ||
|
|
b07844afa6 | ||
|
|
213f6a6fec | ||
|
|
dd3b9851d8 | ||
|
|
d2884c0f44 | ||
|
|
bdb188813a | ||
|
|
2e37cae1db | ||
|
|
492c648c9a | ||
|
|
fb8f99f4bb | ||
|
|
0fc39accca | ||
|
|
ac7304623f | ||
|
|
e5445e35c5 | ||
|
|
76b7dfba73 | ||
|
|
c889dfc383 | ||
|
|
a8db744cf1 | ||
|
|
a38b843d41 | ||
|
|
75029123a1 | ||
|
|
b7f9058092 | ||
|
|
48bc1f8a2f | ||
|
|
fc020f6856 | ||
|
|
e350c34922 | ||
|
|
4d4e7fc261 | ||
|
|
2d52e33eec | ||
|
|
607e720a71 | ||
|
|
08a930567a | ||
|
|
8fee70efc6 | ||
|
|
7f9608e465 | ||
|
|
1048ca4d31 | ||
|
|
89ec968bc5 | ||
|
|
d367e0a603 | ||
|
|
2f2f14523f | ||
|
|
80328cc998 | ||
|
|
80422ba730 | ||
|
|
e1730fdb79 | ||
|
|
b1a83cda4b | ||
|
|
5c2b8b4da3 | ||
|
|
bc5a29d3ed | ||
|
|
60b6ccbca3 | ||
|
|
6702aac0c2 | ||
|
|
31673889df | ||
|
|
fa7d176469 | ||
|
|
4b2e83ca2e | ||
|
|
7ec8930cbe | ||
|
|
03de480af2 | ||
|
|
db817b7609 | ||
|
|
7f6a7fba58 | ||
|
|
6c6c8f93a3 | ||
|
|
a4e0c89fb7 | ||
|
|
9d8c1802f6 | ||
|
|
af11368c30 | ||
|
|
1399ee9866 | ||
|
|
1f04cbe947 | ||
|
|
076ac4cec7 | ||
|
|
9f4356027e | ||
|
|
1717cd7e54 | ||
|
|
45118f9ff7 | ||
|
|
d07066dd60 | ||
|
|
539e67d14f | ||
|
|
0d7395d4c0 | ||
|
|
39a6561c78 | ||
|
|
62511d2f0d | ||
|
|
c52d0c8d1f | ||
|
|
7e852ab067 | ||
|
|
aa438b4c8b | ||
|
|
1e07b9e029 | ||
|
|
5959e1a297 | ||
|
|
affcbd2957 | ||
|
|
387b77e0e3 | ||
|
|
ad9cbcaad2 | ||
|
|
880fa0788b | ||
|
|
7c9562db82 | ||
|
|
3782c165f8 | ||
|
|
94d78de927 | ||
|
|
244ae736d1 | ||
|
|
53bf691fea | ||
|
|
a332955926 | ||
|
|
8e87409b64 | ||
|
|
a0a30bd8aa | ||
|
|
bf5005fb35 | ||
|
|
34072cded2 | ||
|
|
d4211f957c | ||
|
|
d91d9ab35f | ||
|
|
2dfc726c38 | ||
|
|
67186e6422 | ||
|
|
f57ee81e99 | ||
|
|
6d447236b2 | ||
|
|
c03fd40dbf | ||
|
|
71ab5a1507 | ||
|
|
806c1654d5 | ||
|
|
09fb06a0f8 | ||
|
|
7d282c606e | ||
|
|
03302643db | ||
|
|
2dfc875b76 | ||
|
|
2cd1728186 | ||
|
|
065c28854f | ||
|
|
020b41abaa | ||
|
|
040e132d2e | ||
|
|
5986af1690 | ||
|
|
77f5915915 | ||
|
|
6493c127fa | ||
|
|
091125ed08 | ||
|
|
04435e6374 | ||
|
|
f6299dd8cd | ||
|
|
593f9eff79 | ||
|
|
d8b61a92bd | ||
|
|
b010155f31 | ||
|
|
cebf2ca8f5 | ||
|
|
cbb3ca7363 | ||
|
|
333ecaeb07 | ||
|
|
7dde9ab0bb | ||
|
|
88767b8aa3 | ||
|
|
30d51a5f6a | ||
|
|
b876679619 | ||
|
|
e151f0977e | ||
|
|
23637972b1 | ||
|
|
5c5274bbe4 | ||
|
|
5a172d30f1 | ||
|
|
836e421363 | ||
|
|
c22451aa05 | ||
|
|
3376debc78 | ||
|
|
a9deda4f31 | ||
|
|
da7e44f55d | ||
|
|
8dc2e0a160 | ||
|
|
a0a4ff9655 | ||
|
|
4db1db9bcf | ||
|
|
179beb2a4c | ||
|
|
49194f893b | ||
|
|
c11096373e | ||
|
|
0b56ffbc24 | ||
|
|
2a5940d420 | ||
|
|
c6f191790c | ||
|
|
c3737aaf2e | ||
|
|
14c1ee66c6 | ||
|
|
e97ea67535 | ||
|
|
5774df2e62 | ||
|
|
032f024e12 | ||
|
|
9b3b1c3cc6 | ||
|
|
6aa0aeeee4 | ||
|
|
83267c2744 | ||
|
|
1e37aae43e | ||
|
|
3c83fdaccd | ||
|
|
af0e425306 | ||
|
|
a80cf0f502 | ||
|
|
1ca3a5cb09 | ||
|
|
8ab35ec423 | ||
|
|
2808024594 | ||
|
|
f43cf32516 | ||
|
|
81c26556b1 | ||
|
|
bf4338590a | ||
|
|
b1f74c929b | ||
|
|
0b59c879a9 | ||
|
|
a1f0756396 | ||
|
|
47b9f5a4c4 | ||
|
|
f79192b231 | ||
|
|
437c47868b | ||
|
|
170af495f2 | ||
|
|
4294bd5bf5 | ||
|
|
406cde65db | ||
|
|
aebe27dbf9 | ||
|
|
d44fc5cf70 | ||
|
|
5aeced673a | ||
|
|
f6b9c69382 | ||
|
|
1c6eaf2c29 | ||
|
|
7e48899b61 | ||
|
|
26690d1e69 | ||
|
|
95a2bc5d04 | ||
|
|
24aad20869 | ||
|
|
468ee8bffe | ||
|
|
f4cd4c7375 | ||
|
|
7f34e64efb | ||
|
|
037bf6525d | ||
|
|
5621d64891 | ||
|
|
762a92f156 | ||
|
|
2beb9bace0 | ||
|
|
3aa9871440 | ||
|
|
aa490aefe2 | ||
|
|
da215ffb9d | ||
|
|
a7458f1597 | ||
|
|
0d931a0bd7 | ||
|
|
15944245f8 | ||
|
|
deba02be6c | ||
|
|
c58807d8bb | ||
|
|
0d085e66c9 | ||
|
|
bdd4f4c908 | ||
|
|
d0896aa911 | ||
|
|
57f18e2913 | ||
|
|
b1c5725a78 | ||
|
|
e791ed4ba5 | ||
|
|
b7a1475217 | ||
|
|
a9a1043e5d | ||
|
|
be045d15ee | ||
|
|
4b7da3530f | ||
|
|
b78fe84421 | ||
|
|
56484c6b34 | ||
|
|
aa36403572 | ||
|
|
61b09cd93f | ||
|
|
e604f9de38 | ||
|
|
e586399abb | ||
|
|
fb88a5379a | ||
|
|
ae65547498 | ||
|
|
267d51b8ce | ||
|
|
8a0d2cbd61 | ||
|
|
6ebbf3b901 | ||
|
|
c8ea6adc94 | ||
|
|
135c0139fa | ||
|
|
0750684ec4 | ||
|
|
c7c73224da | ||
|
|
6d5c7887ae | ||
|
|
ad98de19e8 | ||
|
|
e80002e7fd | ||
|
|
28474b0249 | ||
|
|
c79b1f9705 | ||
|
|
b14673e4eb | ||
|
|
f9af12c05c | ||
|
|
59574159b0 | ||
|
|
95e7685d4b | ||
|
|
844e1dc8a1 | ||
|
|
d98a0f2890 | ||
|
|
3b8a20fbd7 | ||
|
|
76130a91f6 | ||
|
|
0a9114bc49 | ||
|
|
5edd305d37 | ||
|
|
c2c20da250 | ||
|
|
b76fef912c | ||
|
|
155e6e0bf2 | ||
|
|
cdac6dacc9 | ||
|
|
07972a671e | ||
|
|
d409394bdf | ||
|
|
043b2fe7e5 | ||
|
|
8b47a22bdc | ||
|
|
0f484dc85e | ||
|
|
c493d45517 | ||
|
|
c084d7a2c8 | ||
|
|
25ac3338ec | ||
|
|
f35fd13600 | ||
|
|
1b7e09ca77 | ||
|
|
12290267eb | ||
|
|
ebbecf4b84 | ||
|
|
58359146cf | ||
|
|
9a9fea87b1 | ||
|
|
b29695acc5 | ||
|
|
8a79508fae | ||
|
|
f391f7928d | ||
|
|
9ea939469f | ||
|
|
cbb45094a5 | ||
|
|
e9863f8ffd | ||
|
|
4dc1d6b91d | ||
|
|
b5a47566d6 | ||
|
|
5feb40590b | ||
|
|
6966036c99 | ||
|
|
1b997ae1f9 | ||
|
|
09a071e124 | ||
|
|
614fa385db | ||
|
|
9c298a80e2 | ||
|
|
b9c0e38fae | ||
|
|
383cba7354 | ||
|
|
aeda561e1b | ||
|
|
e330a6e1ae | ||
|
|
302855dfbd | ||
|
|
fcb26a2dab | ||
|
|
f11cc86e8d | ||
|
|
6be539d148 | ||
|
|
595541fb2a | ||
|
|
ed06db5643 | ||
|
|
feb4b0f232 | ||
|
|
1a16d8a69d | ||
|
|
f4bb92fe8c | ||
|
|
0ed9913358 | ||
|
|
320eb3fe20 | ||
|
|
857d0a9834 | ||
|
|
a273cca87d | ||
|
|
80a2cb8356 | ||
|
|
b8fedbb512 | ||
|
|
4107dbd5ae | ||
|
|
fbe8c16f6c | ||
|
|
8a5357e565 | ||
|
|
694ff78a8f | ||
|
|
ccb68c8691 | ||
|
|
147f22b1d5 | ||
|
|
b382c5a4b2 | ||
|
|
6e5fc39f94 | ||
|
|
3eaf36d3e4 | ||
|
|
cff9f91172 | ||
|
|
156049b854 | ||
|
|
ccff430c1b | ||
|
|
699c8384c0 | ||
|
|
e05c2f4f02 | ||
|
|
3f82d1d595 | ||
|
|
faee259868 | ||
|
|
faf083a084 | ||
|
|
6819a06d21 | ||
|
|
4de98507f2 | ||
|
|
932abb840f | ||
|
|
cc6168afd2 | ||
|
|
12d797b01d | ||
|
|
b7bac17db8 | ||
|
|
1cefdb9a24 | ||
|
|
6977aa090a | ||
|
|
b5d3a4c53d | ||
|
|
0d8b72f494 | ||
|
|
c91f8193ae | ||
|
|
22bedee8ea | ||
|
|
1950aabbd3 | ||
|
|
4249c242ae | ||
|
|
bf968ee96c | ||
|
|
95fbfba6f5 | ||
|
|
b0e162e820 | ||
|
|
96ad022091 | ||
|
|
14a1353684 | ||
|
|
e30498458e | ||
|
|
525db3e566 | ||
|
|
d0deef5085 | ||
|
|
2d8c0d8d46 | ||
|
|
fe346acb80 | ||
|
|
a86c6819df | ||
|
|
47790ceee6 | ||
|
|
dc6e06647f | ||
|
|
1c0c7b152a | ||
|
|
90f03a62f8 | ||
|
|
69f6e3e11d | ||
|
|
179c02dfb0 | ||
|
|
582a5daf7a | ||
|
|
c692322053 | ||
|
|
07d58522cd | ||
|
|
4e1275d014 | ||
|
|
ae8351effa | ||
|
|
a3426ba78c | ||
|
|
5f41f6f3fd | ||
|
|
4a869d5793 | ||
|
|
f676ab0fb4 | ||
|
|
26cbb07ce4 | ||
|
|
2e09c985fc | ||
|
|
a0d0c9e855 | ||
|
|
e4493ea30e | ||
|
|
b0daad0e7d | ||
|
|
660a96be70 | ||
|
|
4c06d1139d | ||
|
|
b6ce889ed5 | ||
|
|
e7cacde27a | ||
|
|
9578b2d8a7 | ||
|
|
eb390b3c44 | ||
|
|
61150c5103 | ||
|
|
d88081d2ec | ||
|
|
590165d9d8 | ||
|
|
0cc1e53857 | ||
|
|
53cc111e74 | ||
|
|
67b3c3ab48 | ||
|
|
691ab27283 | ||
|
|
f071976811 | ||
|
|
cf1fe176da | ||
|
|
3e582c07eb | ||
|
|
52cd18e33b | ||
|
|
5699e65b9e | ||
|
|
07085766c4 | ||
|
|
04e15d8ed0 |
@@ -1,6 +0,0 @@
|
||||
[Dolphin]
|
||||
Timestamp=2019,4,23,18,58,8
|
||||
Version=4
|
||||
|
||||
[Settings]
|
||||
HiddenFilesShown=true
|
||||
34
.gitea/workflows/updateDomains.yaml
Normal file
34
.gitea/workflows/updateDomains.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Update channel domains
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '30 17 * * *'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
update:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install requests
|
||||
|
||||
- name: Update domains
|
||||
run: python tools/updateDomains.py
|
||||
|
||||
- name: Commit & Push changes
|
||||
uses: actions-js/push@master
|
||||
with:
|
||||
message: "Aggiornamento domini"
|
||||
branch: "master"
|
||||
github_token: ${{ secrets.API_TOKEN_GITHUB }}
|
||||
21
.github/ISSUE_TEMPLATE/canale-non-funzionante.md
vendored
Executable file
21
.github/ISSUE_TEMPLATE/canale-non-funzionante.md
vendored
Executable file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: Segnala Problemi ad un Canale
|
||||
about: Invio segnalazione per un canale non funzionante
|
||||
title: 'Inserisci il nome del canale'
|
||||
labels: Problema Canale
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Per poter scrivere o allegare file nella pagina devi:**
|
||||
- cliccare sui [ ... ] in alto a destra della scheda
|
||||
- Edit. Da questo momento puoi scrivere e/o inviare file.
|
||||
|
||||
|
||||
Inserisci il nome del canale
|
||||
|
||||
|
||||
- Indica il tipo di problema riscontrato, sii il più esauriente possibile. Che azione ha portato all'errore (Es. non riesco ad aggiungere film nella videoteca, ne dal menu contestuale, ne dalla voce in fondo alla lista dei server)
|
||||
|
||||
- Ottieni il log seguendo le istruzioni: https://telegra.ph/LOG-11-20 e invialo qui.
|
||||
|
||||
19
.github/ISSUE_TEMPLATE/server-non-funzionante.md
vendored
Executable file
19
.github/ISSUE_TEMPLATE/server-non-funzionante.md
vendored
Executable file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
name: Segnala Problemi ad un Server
|
||||
about: Invio segnalazione per un server non funzionante
|
||||
title: 'Inserisci il nome del server'
|
||||
labels: Problema Server
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Per poter scrivere o allegare file nella pagina devi:**
|
||||
- cliccare sui [ ... ] in alto a destra della scheda
|
||||
- Edit. Da questo momento puoi scrivere e/o inviare file.
|
||||
|
||||
|
||||
Inserisci il nome del server che indica problemi e se il problema è circoscritto ad un solo canale, indicalo
|
||||
|
||||
|
||||
- Allega il file di log nella sua completezza. Non cancellarne delle parti.
|
||||
|
||||
286
.github/ISSUE_TEMPLATE/test-canale.md
vendored
Executable file
286
.github/ISSUE_TEMPLATE/test-canale.md
vendored
Executable file
@@ -0,0 +1,286 @@
|
||||
---
|
||||
name: Test Canale
|
||||
about: Pagina per il test di un canale
|
||||
title: ''
|
||||
labels: Test Canale
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
Documento Template per il Test del canale
|
||||
|
||||
Specifica, dove possibile, il tipo di problema che incontri, anche se non è presente alcuna voce per indicarlo.
|
||||
Se hai **suggerimenti/consigli/dubbi sul test**...Proponili e/o Chiedi! Scrivendo un commento a questo stesso issue, che trovi in fondo, dopo questa pagina.
|
||||
|
||||
**Avvertenze:**
|
||||
|
||||
Per il test dei canali **DEVI**:
|
||||
- utilizzare la versione **[BETA](https://kodiondemand.github.io/repo/KoD-installer-BETA.zip)** di KOD!
|
||||
- **ABILITARE IL DEBUG PER I LOG**
|
||||
|
||||
**Per eseguire il test, ricordati di titolarlo con il nome del canale da te scelto, e salvare la pagina cliccando sul bottone verde in basso "SUBMIT NEW ISSUE"**
|
||||
|
||||
**Ogni volta che hai un ERRORE con avviso di LOG. Puoi scegliere se:
|
||||
ALLEGARE IMMEDIATAMENTE il file kodi.log nel punto, della pagina, in cui sei nel test
|
||||
Allegare il file kodi.log a fine pagina.**
|
||||
|
||||
**Per poter scrivere o allegare file nella pagina devi:**
|
||||
- cliccare sui [ ... ] in alto a destra della scheda
|
||||
- Edit. Da questo momento puoi scrivere e/o inviare file.
|
||||
Dopodiché clicca sul bottone verde "Update comment" per continuare il test nel modo consueto o per terminarlo!
|
||||
|
||||
Se hai problemi non previsti dal test, segnalali aggiungendoli in fondo al test.
|
||||
|
||||
**SE VEDI I QUADRATINI MA NON RIESCI A CLICCARLI... DEVI CLICCARE SUL BOTTONE VERDE "SUBMIT NEW ISSUE"**
|
||||
|
||||
***
|
||||
I file relativi al canale li trovi:
|
||||
- su browser:
|
||||
[Apre la pagina dei Canali](https://github.com/kodiondemand/addon/tree/master/channels)
|
||||
- sul device:
|
||||
[nella specifica cartella](https://github.com/kodiondemand/addon/wiki/Percorsi-sui-diversi-S.O.) , .kodi/addons/channels.
|
||||
Per aprirli non servono programmi particolari un semplice editor di testo è sufficiente.
|
||||
|
||||
**Test N.1**: Controllo del file .json
|
||||
|
||||
Occorrente: file .json
|
||||
|
||||
**1. Indica la coerenza delle voci presenti in "language" con i contenuti presenti sul sito:**
|
||||
valori: ita, sub-ita (sub-ita)
|
||||
|
||||
- [ ] coerenti
|
||||
- [ ] non coerenti
|
||||
|
||||
Se non sono coerenti il test è FALLITO, continua comunque a revisionare il resto
|
||||
|
||||
**2. Icone del canale**
|
||||
Controlla sia presente qualcosa, tra le " " di thumbnail e banner, e che le immagini appaiano su KoD
|
||||
|
||||
**in thumbnail:**
|
||||
- [ ] Presente
|
||||
- [ ] Assente
|
||||
|
||||
**in banner:**
|
||||
- [ ] Presente
|
||||
- [ ] Assente
|
||||
|
||||
**3. Verifica la coerenza delle voci presenti in "categories" con i contenuti presenti sul sito:**
|
||||
|
||||
Riepilogo voci:
|
||||
|
||||
movie, tvshow, anime, documentary, vos, adult
|
||||
|
||||
(se il sito contiene film e serie, devono esserci sia movie che tvshow, se contiene solo film, solo movie)
|
||||
|
||||
- [ ] Corrette
|
||||
- [ ] 1 o più Errata/e
|
||||
- [ ] Assenti - Non sono presenti voci in categories, in questo caso non puoi continuare il test.
|
||||
|
||||
Se le voci sono: Assenti, dopo aver compilato la risposta, salva il test e **NON** proseguire.
|
||||
**TEST FALLITO**
|
||||
|
||||
***
|
||||
|
||||
**Test su KOD.**
|
||||
|
||||
Entra in KOD -> Canali. Nella lista accedi al canale che stai testando.
|
||||
**N.B.**: Il nome del canale è il campo **name** nel file .json.
|
||||
|
||||
**Test N.2: Pagina Canale**
|
||||
|
||||
**1. Cerca o Cerca Film...**
|
||||
Cerca un titolo a caso in KOD e lo stesso titolo sul sito. Confronta i risultati.
|
||||
|
||||
- [ ] OK
|
||||
- indica il tipo di problema
|
||||
|
||||
**Sezione FILM (se il sito non ha film elimina questa parte)**
|
||||
|
||||
**TestN.3: Pagina dei Titoli**
|
||||
*Test da effettuare mentre sei dentro un menu del canale (film, serietv, in corso ecc..)*.
|
||||
Voci nel menu contestuale di KOD. Posizionati su di un titolo e controlla se hai le seguenti voci, nel menu contestuale (tasto c o tenendo enter premuto):
|
||||
|
||||
**1. Aggiungi Film in videoteca**
|
||||
|
||||
- [ ] Si
|
||||
- [ ] No
|
||||
|
||||
**2. Scarica Film (devi avere il download abilitato)**
|
||||
|
||||
- [ ] Si
|
||||
- [ ] No
|
||||
|
||||
**Fine test menu contestuale**
|
||||
|
||||
**Fondo pagina dei titoli**
|
||||
|
||||
**3. Paginazione, controlla ci sia la voce "Successivo" (se non c'è controlla sul sito se è presente)**
|
||||
|
||||
- [ ] Sì
|
||||
- [ ] NO
|
||||
|
||||
**Dentro un titolo
|
||||
|
||||
**4. Entra nella pagina del titolo e verifica ci sia almeno 1 server:**
|
||||
|
||||
- [ ] Si
|
||||
- [ ] No
|
||||
|
||||
**5. Eventuali problemi riscontrati**
|
||||
- scrivi qui il problema/i
|
||||
|
||||
**Sezione Serie TV (se il sito non ha serietv elimina questa parte)**
|
||||
|
||||
Test da effettuare mentre sei nella pagina dei titoli.
|
||||
Per ogni titolo verifica ci siano le voci nel menu contestuale.
|
||||
|
||||
**1. Aggiungi Serie in videoteca**
|
||||
|
||||
- [ ] Si
|
||||
- [ ] No
|
||||
|
||||
**2. Scarica Stagione (devi avere il download abilitato)**
|
||||
|
||||
- [ ] Si
|
||||
- [ ] No
|
||||
|
||||
**3. Scarica Serie (devi avere il download abilitato)**
|
||||
|
||||
- [ ] Si
|
||||
- [ ] No
|
||||
|
||||
**4. Cerca o Cerca Serie...**
|
||||
Cerca un titolo a caso in KOD e lo stesso titolo sul sito. Confronta i risultati.
|
||||
|
||||
- [ ] Ok
|
||||
- indica il tipo di problema
|
||||
|
||||
**5. Entra nella pagina della serie, verifica che come ultima voce ci sia "Aggiungi in videoteca":**
|
||||
|
||||
- [ ] Si, appare
|
||||
- [ ] Non appare
|
||||
|
||||
**6. Entra nella pagina dell'episodio, **NON** deve apparire la voce "Aggiungi in videoteca":**
|
||||
|
||||
- [ ] Si, appare
|
||||
- [ ] Non appare
|
||||
|
||||
**7. Eventuali problemi riscontrati**
|
||||
- scrivi qui il problema/i
|
||||
|
||||
**Sezione Anime (se il sito non ha anime elimina questa parte)**
|
||||
|
||||
Test da effettuare mentre sei nella pagina dei titoli. Per ogni titolo verifica ci siano le voci nel menu contestuale.
|
||||
|
||||
**1. Rinumerazione (se gli episodi non appaiono nella forma 1x01)**
|
||||
|
||||
- [ ] Si
|
||||
- [ ] No
|
||||
|
||||
**2. Aggiungi Serie in videoteca**
|
||||
|
||||
- [ ] Si
|
||||
- [ ] No
|
||||
|
||||
**3. Aggiungi 2-3 titoli in videoteca.**
|
||||
- [ ] Aggiunti correttamente
|
||||
- [Indica eventuali problemi] (copia-incolla per tutti i titoli con cui hai avuto il problema)
|
||||
|
||||
- COPIA qui l'ERRORE dal LOG
|
||||
|
||||
**4. Scarica Serie**
|
||||
|
||||
- [ ] Si
|
||||
- [ ] No
|
||||
|
||||
**5. Cerca o Cerca Serie...**
|
||||
Cerca un titolo a caso in KOD e lo stesso titolo sul sito. Confronta i risultati.
|
||||
|
||||
- [ ] Ok
|
||||
- indica il tipo di problema
|
||||
|
||||
**6. Entra nella pagina della serie, verifica che come ultima voce ci sia "Aggiungi in videoteca":**
|
||||
|
||||
- [ ] Appare
|
||||
- [ ] Non appare
|
||||
|
||||
**7. Entra nella pagina dell'episodio, NON ci deve essere la voce "Aggiungi in videoteca":**
|
||||
|
||||
- [ ] Non appare
|
||||
- [ ] Appare
|
||||
|
||||
**8. Eventuali problemi riscontrati**
|
||||
- scrivi qui il problema/i
|
||||
|
||||
** TEST PER IL CONFRONTO TRA SITO E CANALE **
|
||||
|
||||
**TestN.4: Pagina Sito - Menu Canale**
|
||||
|
||||
Occorrente: Browser, KOD! e il file canale.py ( da browser o da file )
|
||||
Avviso:
|
||||
- Sul Browser disattiva eventuali componenti aggiuntivi che bloccano i JS (javascript), li riattivi alla fine del test.
|
||||
|
||||
Entra in ogni menu e controlla che i risultati, delle prime 5-6 pagine, siano gli stessi che trovi sul sito, comprese le varie info (ita/sub-ita, qualità ecc..), inoltre entra, se ci sono, nei menu dei generi - anni - lettera, verifica che cliccando su una voce si visualizzino i titoli.
|
||||
|
||||
*Copia questa sezione per ogni voce che presenta problemi:*
|
||||
|
||||
- [ ] Voce menu ( del canale dove riscontri errori)
|
||||
|
||||
Titoli non corrispondenti:
|
||||
|
||||
- [ ] Il totale dei Titoli è diverso da quello del sito. Alcuni Titoli non compaiono.
|
||||
- [ ] Appaiono titoli per pagine informative o link a siti esterni. Es: Avviso agli utenti.
|
||||
- [ ] La lingua, del titolo, è diversa da quella riportata dal sito
|
||||
- [ ] Non è indicato in 1 o più titoli che sono SUB-ITA
|
||||
- [ ] Cliccando su "Successivo" non si visualizzano titoli
|
||||
- [ ] Non è indicata la qualità: Hd-DVD/rip e altri, nonostante sul sito siano presenti
|
||||
|
||||
- [ ] NO
|
||||
|
||||
|
||||
*Fine Copia*
|
||||
|
||||
|
||||
**Test.N5: Ricerca Globale**
|
||||
|
||||
Per questo test ti consiglio di inserire come UNICO sito quello che stai testando, come canale incluso in: Ricerca Globale -> scegli i canali da includere
|
||||
Il test è già compilato con le spunte, dato che devi copiarlo solo in caso di errori. Togli la spunta dove funziona.
|
||||
Si consiglia di cercare almeno a fino 5 titoli. O perlomeno non fermarti al 1°.
|
||||
|
||||
Cerca 5 FILM a tuo piacimento, se il titolo non esce controlla confrontando i risultati sul sito...:
|
||||
|
||||
*Copia questa sezione per ogni voce che presenta problemi*
|
||||
|
||||
controlla ci siano queste voci se titolo è un FILM:
|
||||
|
||||
- [ ] inserisci il titolo cercato che da problemi
|
||||
- [x] Aggiungi in videoteca
|
||||
- [x] Scarica Film
|
||||
|
||||
*Fine Copia*
|
||||
|
||||
controlla ci siano queste voci se titolo è una SERIE/ANIME:
|
||||
|
||||
*Copia questa sezione per ogni voce che presenta problemi*
|
||||
|
||||
controlla ci siano queste voci se titolo è un FILM:
|
||||
|
||||
- [ ] inserisci il titolo cercato che da problemi
|
||||
- [x] Aggiungi in videoteca
|
||||
- [x] Scarica Serie
|
||||
- [x] Scarica Stagione
|
||||
|
||||
- [ ] inserisci il titolo cercato che da problemi
|
||||
|
||||
*Fine Copia*
|
||||
|
||||
|
||||
Se il canale ha la parte Novità (questa stringa avvisa che NON è presente: "not_active": ["include_in_newest"]).
|
||||
|
||||
**Test.N6: Novità.**
|
||||
Per questo test ti consiglio di inserire come UNICO sito quello che stai testando, come canale incluso in: Novità -> categoria (film, serie o altro )
|
||||
|
||||
- [ ] Descrivere il problema
|
||||
|
||||
Fine TEST!
|
||||
|
||||
Grazie mille da parte di tutto il team KoD!
|
||||
30
.github/workflows/tests.yml
vendored
Executable file
30
.github/workflows/tests.yml
vendored
Executable file
@@ -0,0 +1,30 @@
|
||||
name: Test Suite
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4.3.0
|
||||
with:
|
||||
python-version: 3.9
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
./tests/run.sh
|
||||
|
||||
- name: Commit & Push changes
|
||||
uses: dmnemec/copy_file_to_another_repo_action@main
|
||||
env:
|
||||
API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }}
|
||||
with:
|
||||
source_file: 'reports'
|
||||
destination_repo: 'kodiondemand/kodiondemand.github.io'
|
||||
user_email: 'tests@kod.bot'
|
||||
user_name: 'bot'
|
||||
commit_message: 'Test suite'
|
||||
30
.github/workflows/updateDomains.yml
vendored
Executable file
30
.github/workflows/updateDomains.yml
vendored
Executable file
@@ -0,0 +1,30 @@
|
||||
name: Update channel domains
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '30 17 * * *'
|
||||
|
||||
jobs:
|
||||
update:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install requests
|
||||
|
||||
- name: Update domains
|
||||
run: python tools/updateDomains.py
|
||||
|
||||
- name: Commit & Push changes
|
||||
uses: actions-js/push@master
|
||||
with:
|
||||
message: "Aggiornamento domini"
|
||||
branch: "master"
|
||||
github_token: ${{ secrets.API_TOKEN_GITHUB }}
|
||||
32
.github/workflows/updateDomainsStable.yml
vendored
Executable file
32
.github/workflows/updateDomainsStable.yml
vendored
Executable file
@@ -0,0 +1,32 @@
|
||||
name: Update channel domains
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '30 17 * * *'
|
||||
|
||||
jobs:
|
||||
update:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: stable
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install requests
|
||||
|
||||
- name: Update domains
|
||||
run: python tools/updateDomains.py
|
||||
|
||||
- name: Commit & Push changes
|
||||
uses: actions-js/push@master
|
||||
with:
|
||||
message: "Aggiornamento domini"
|
||||
branch: "stable"
|
||||
github_token: ${{ secrets.API_TOKEN_GITHUB }}
|
||||
7
.gitignore
vendored
Normal file → Executable file
7
.gitignore
vendored
Normal file → Executable file
@@ -3,4 +3,11 @@
|
||||
.DS_Store
|
||||
.idea/
|
||||
.directory
|
||||
custom_code.json
|
||||
last_commit.txt
|
||||
__pycache__/
|
||||
.vscode/settings.json
|
||||
bin/
|
||||
lib/abi
|
||||
tests/home/
|
||||
reports/
|
||||
55
CONTRIBUTING.md
Executable file
55
CONTRIBUTING.md
Executable file
@@ -0,0 +1,55 @@
|
||||
Ciao, grazie per aver preso in considerazione di contribuire a questo progetto!<br>
|
||||
Ci sono molti modi per farlo, e per alcuni di essi non è necessario essere uno sviluppatore.
|
||||
|
||||
Puoi ad esempio [segnalare i cambiamenti di struttura](#segnalare-i-cambiamenti-di-struttura) dei canali/server, [scrivere guide o registrare video-esempi](#scrivere-guide-o-registrare-video-esempi) su alcune funzionalità "avanzate", dare consigli su funzionalità nuove o per migliorare quelle già presenti.
|
||||
|
||||
# Segnalare i cambiamenti di struttura
|
||||
KoD, alla fine, non è altro che un browser che estrapola dai siti le info richieste secondo regole ben precise, basate sulla struttura dei siti.<br>
|
||||
I siti web cambiano, spesso, ciò che oggi funziona domani potrebbe non più funzionare, pertanto sono fondamentali le segnalazioni, ma esse per essere realmente utili devono:
|
||||
- contenere il file di log (lo potete generare andando in Aiuto - Segnala un problema e seguendo le istruzioni)
|
||||
- spiegare brevemente qual'è il problema e dove, ad esempio "cineblog da errore quando entro nella sezione Film", oppure "wstream non da nessun errore ma il video di fatto non parte"
|
||||
- essere replicabili, se si tratta di cose che accadono una volta ogni tanto puoi provare a segnalare lo stesso, sperando che nel log ci sia qualche indizio. Se non c'è, nada
|
||||
|
||||
Prima di segnalare un problema assicurati che sia realmemte legato a KoD, sotto alcuni requisiti necessari:
|
||||
- avere l'ultima versione di KoD, per controllare vai qui e confronta il numero con quello presente nella sezione aiuto: https://github.com/kodiondemand/addon/commits/stable
|
||||
- avere una versione di kodi supportata, attualmente si tratta di 17.x e 18.x
|
||||
- verificare che il problema non dipenda dal sito stesso: se esce il messaggio 'Apri nel Browser': apri il tuo Browser e prova se li il film o serie tv funziona, senno apri il menù contestuale (tasto c) e clicca su "apri nel browser"
|
||||
|
||||
Sei pregato di attenerti il più possibile a quanto descritto qua perchè un semplice "non funziona" fa solo perdere tempo.
|
||||
Puoi fare tutte le segnalazioni nella sezione [issues](https://github.com/kodiondemand/addon/issues), cliccando su "new issue" appariranno dei template che ti guideranno nel processo.
|
||||
Assicurati che qualcun'altro non abbia già effettuato la stessa segnalazione, nel caso avessi altro da aggiungere rispondi ad un issue già aperto piuttosto che farne uno nuovo.
|
||||
|
||||
# Scrivere guide o registrare video-esempi
|
||||
Cerca di essere sintetico ma senza tralasciare le informazioni essenziali, una volta fatto mandalo pure sul [gruppo telegram](https://t.me/kodiondemand) taggando gli admin (@admin).<br>
|
||||
Verrà preso in considerazione il prima possibile ed eventualmente inserito nella [wiki](https://github.com/kodiondemand/addon/wiki) o verrà creato un comando richiamabile nel gruppo.
|
||||
|
||||
# Consigli
|
||||
Effettuali sempre nella sezione [issues](https://github.com/kodiondemand/addon/issues), miraccomando descrivi e fai esempi pratici.<br>
|
||||
|
||||
# Per sviluppatori
|
||||
|
||||
Di seguito tutte le info su come prendere confidenza col codice e come contribuire
|
||||
|
||||
## Da dove posso partire?
|
||||
Un buon punto di partenza è [la wiki](https://github.com/kodiondemand/addon/wiki), qui è presente un minimo di documentazione sul funzionamento di KoD.<br>
|
||||
Ti consigliamo vivamente, una volta compreso il funzionamento generale dell'addon (e prima di iniziare a sviluppare), di [forkare e clonare il repository](https://help.github.com/en/github/getting-started-with-github/fork-a-repo).<br>
|
||||
Questo perchè, oltre al fatto di poter iniziare a mandare modifiche sul tuo account github, l'utilizzo di git abilita la [dev mode](https://github.com/kodiondemand/addon/wiki/dev-mode), che ti sarà di aiuto nelle tue attività.
|
||||
|
||||
## che cosa posso fare?
|
||||
Puoi provare a fixare un bug che hai riscontrato, aggiungere un canale/server che ti interessa ecc..
|
||||
Oppure puoi guardare nella sezione [Projects](https://github.com/kodiondemand/addon/projects) cosa è previsto e iniziare a svilupparlo!
|
||||
|
||||
## ho fatto le modifiche che volevo, e ora?
|
||||
Pusha sul tuo fork le modifiche che hai fatto e manda una pull request. Se è la prima volta ecco qualche link che ti aiuterà:
|
||||
- http://makeapullrequest.com/
|
||||
- http://www.firsttimersonly.com/
|
||||
- [How to Contribute to an Open Source Project on GitHub](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github).
|
||||
|
||||
Quando crei la pull request, ricordati di spiegare brevemente qual'è la modifica e perchè l'hai fatta.
|
||||
Quando avremo tempo revisioneremo le modifiche, potremmo anche segnalarti alcuni problemi, nel caso prenditi pure il tutto il tempo che vuoi per sistemare (non è necessaria un'altra pull, tutti i commit verranno riportati nella prima).<br>
|
||||
Quando sarà tutto a posto accetteremo la pull includendo le modifiche
|
||||
|
||||
## Regole per le collaborazioni:
|
||||
- Se si riutilizza codice proveniente da altri addon è necessario citarne la fonte, per rispetto di chi ci ha lavorato, in caso contrario il pull request verrà respinto.
|
||||
- Ogni modifica o novità inviata dev'essere testata, può capitare che vi sia sfuggito qualche bug (è normale), ma l'invio di materiale senza preventivi controlli non è gradito.
|
||||
- I nuovi canali devono essere funzionanti e completi di tutte le feature, comprese videoteca ed autoplay, non verranno accettati finchè non lo saranno.
|
||||
23
README.md
Normal file → Executable file
23
README.md
Normal file → Executable file
@@ -1,24 +1,13 @@
|
||||
# Kodi On Demand
|
||||
### Un fork italiano di [Alfa](https://github.com/alfa-addon)
|
||||
Ognuno è libero (anzi, invitato!) a collaborare, per farlo è possibile utilizzare i pull request.
|
||||
|
||||
KOD, come Alfa, è sotto licenza GPL v3, pertanto siete liberi di utilizzare parte del codice, a patto di rispettare i termini di suddetta licenza, che si possono riassumere in:
|
||||
Installazione: https://kodiondemand.github.io/#download
|
||||
|
||||
KoD, come Alfa, è sotto licenza GPL v3, pertanto siete liberi di utilizzare parte del codice, a patto di rispettare i termini di suddetta licenza, che si possono riassumere in:
|
||||
|
||||
- Il tuo addon deve essere rilasciando secondo la stessa licenza, ovvero essere open source (il fatto che lo zip sia visibile da chiunque non ha importanza, è necessario avere un repository git come questo)
|
||||
- Aggiungere i crediti a tutto ciò che copiate/modificate, ad esempio aggiungendo un commento nel file in questione o, meglio, facendo un cherry-pick (in modo da preservarnee lo storico)
|
||||
- Aggiungere i crediti a tutto ciò che copiate/modificate, ad esempio aggiungendo un commento nel file in questione o, meglio, facendo un cherry-pick (in modo da preservarne lo storico)
|
||||
|
||||
### Come contribuire?
|
||||
- Fai un Fork del repository.
|
||||
- Effettua tutte le modifiche e fai un push nel tuo repository remoto.
|
||||
- Testa tutte le funzioni principali (videoteca, autoplay, scraper web) o eventuali aggiunte extra.
|
||||
- Apri una pull request.
|
||||
### Come contribuire o fare segnalazioni?
|
||||
Ti piace il progetto e vuoi dare una mano? Leggi [qui](https://github.com/kodiondemand/addon/blob/master/CONTRIBUTING.md)
|
||||
|
||||
Regole per le collaborazioni:
|
||||
- Se si riutilizza codice proveniente da altri addon è necessario citarne la fonte, per rispetto di chi ci ha lavorato, in caso contrario il pull request verrà respinto.
|
||||
- Ogni modifica o novità inviata dev'essere testata, può capitare che vi sia sfuggito qualche bug (è normale), ma l'invio di materiale senza preventivi controlli non è gradito.
|
||||
- I nuovi canali devono essere funzionanti e completi di tutte le feature, comprese videoteca ed autoplay, non verranno accettati finchè non lo saranno.
|
||||
|
||||
Se parte del codice di un tuo addon è stato incluso in questo progetto e ne desideri l'eliminazione, crea una issue portando le prove di essere veramente uno dei dev e lo elimineremo.
|
||||
|
||||
### Qualcosa non funziona?
|
||||
Sentiti libero di segnalarlo al team [qui](https://github.com/kodiondemand/addon/issues)
|
||||
|
||||
0
__init__.py
Normal file → Executable file
0
__init__.py
Normal file → Executable file
@@ -1 +0,0 @@
|
||||
theme: jekyll-theme-midnight
|
||||
36
addon.xml
Normal file → Executable file
36
addon.xml
Normal file → Executable file
@@ -1,26 +1,34 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.kod" name="Kodi on Demand" version="0.3.1" provider-name="KOD Team">
|
||||
<addon id="plugin.video.kod" name="Kodi on Demand" version="1.7.7" provider-name="KoD Team">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
<import addon="metadata.themoviedb.org"/>
|
||||
<import addon="metadata.tvdb.com"/>
|
||||
<import addon="script.module.web-pdb" />
|
||||
<!-- <import addon="script.module.libtorrent" optional="true"/> -->
|
||||
<import addon="metadata.themoviedb.org" optional="true"/>
|
||||
<import addon="metadata.tvshows.themoviedb.org" optional="true"/>
|
||||
<!-- <import addon="metadata.tvdb.com"/> -->
|
||||
|
||||
</requires>
|
||||
<extension point="xbmc.python.pluginsource" library="default.py">
|
||||
<provides>video</provides>
|
||||
</extension>
|
||||
<extension point="kodi.context.item">
|
||||
<menu id="kodi.core.main">
|
||||
<item library="contextmenu.py">
|
||||
<label>90001</label>
|
||||
<visible>!String.StartsWith(ListItem.FileNameAndPath, plugin://plugin.video.kod/) + [ String.IsEqual(ListItem.dbtype, tvshow) | String.IsEqual(ListItem.dbtype, movie) | String.IsEqual(ListItem.dbtype, season) | String.IsEqual(ListItem.dbtype, episode) ]</visible>
|
||||
</item>
|
||||
</menu>
|
||||
</extension>
|
||||
<extension point="xbmc.addon.metadata">
|
||||
<summary lang="en">Kodi on Demand is a Kodi add-on to search and watch contents on the web.</summary>
|
||||
<summary lang="it">Kodi on Demand è un addon di Kodi per cercare e guardare contenuti sul web.</summary>
|
||||
<assets>
|
||||
<icon>logo.png</icon>
|
||||
<fanart>fanart.jpg</fanart>
|
||||
<screenshot>resources/media/themes/ss/1.png</screenshot>
|
||||
<screenshot>resources/media/themes/ss/2.png</screenshot>
|
||||
<screenshot>resources/media/themes/ss/3.png</screenshot>
|
||||
<icon>resources/media/logo.png</icon>
|
||||
<fanart>resources/media/fanart.jpg</fanart>
|
||||
<screenshot>resources/media/screenshot-1.png</screenshot>
|
||||
<screenshot>resources/media/screenshot-2.png</screenshot>
|
||||
<screenshot>resources/media/screenshot-3.png</screenshot>
|
||||
</assets>
|
||||
<news>Benvenuto su KOD!</news>
|
||||
<news>- fix di routine ai canali/server
|
||||
</news>
|
||||
<description lang="it">Naviga velocemente sul web e guarda i contenuti presenti</description>
|
||||
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]
|
||||
[COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR]</disclaimer>
|
||||
@@ -30,6 +38,6 @@
|
||||
<forum>https://t.me/kodiondemand</forum>
|
||||
<source>https://github.com/kodiondemand/addon</source>
|
||||
</extension>
|
||||
<extension point="xbmc.service" library="videolibrary_service.py" start="login|startup">
|
||||
<extension point="xbmc.service" library="service.py" start="login|startup">
|
||||
</extension>
|
||||
</addon>
|
||||
|
||||
105
channels.json
Normal file → Executable file
105
channels.json
Normal file → Executable file
@@ -1,63 +1,44 @@
|
||||
{
|
||||
"altadefinizione01_club": "https://www.altadefinizione01.cc",
|
||||
"altadefinizione01_link": "http://altadefinizione01.link",
|
||||
"altadefinizione01": "https://altadefinizione01.to",
|
||||
"altadefinizioneclick": "https://altadefinizione.cloud",
|
||||
"altadefinizionehd": "https://altadefinizione.doctor",
|
||||
"animeforge": "https://ww1.animeforce.org",
|
||||
"animeleggendari": "https://animepertutti.com",
|
||||
"animestream": "https://www.animeworld.it",
|
||||
"animespace": "https://animespace.tv",
|
||||
"animesubita": "http://www.animesubita.org",
|
||||
"animetubeita": "http://www.animetubeita.com",
|
||||
"animevision": "https://www.animevision.it",
|
||||
"animeworld": "https://www.animeworld.it",
|
||||
"asiansubita": "http://asiansubita.altervista.org",
|
||||
"casacinema": "https://www.casacinema.site",
|
||||
"casacinemainfo": "https://www.casacinema.info",
|
||||
"cb01anime": "http://www.cineblog01.ink",
|
||||
"cinemalibero": "https://cinemalibero.icu",
|
||||
"cinemastreaming": "https://cinemastreaming.icu",
|
||||
"documentaristreamingda": "https://documentari-streaming-da.com",
|
||||
"dreamsub": "https://www.dreamsub.stream",
|
||||
"eurostreaming": "https://eurostreaming.gratis",
|
||||
"eurostreaming_video": "https://www.eurostreaming.best",
|
||||
"fastsubita": "http://fastsubita.com",
|
||||
"ffilms":"https://ffilms.org",
|
||||
"filmigratis": "https://filmigratis.net",
|
||||
"filmgratis": "https://www.filmaltadefinizione.net",
|
||||
"filmontv": "https://www.comingsoon.it",
|
||||
"filmpertutti": "https://www.filmpertutti.tube",
|
||||
"filmsenzalimiti": "https://filmsenzalimiti.best",
|
||||
"filmsenzalimiticc": "https://www.filmsenzalimiti.host",
|
||||
"filmsenzalimiti_blue": "https://filmsenzalimiti.best",
|
||||
"filmsenzalimiti_info": "https://www.filmsenzalimiti.host",
|
||||
"filmstreaming01": "https://filmstreaming01.com",
|
||||
"filmstreamingita": "http://filmstreamingita.live",
|
||||
"guarda_serie": "https://guardaserie.site",
|
||||
"guardafilm": "http://www.guardafilm.top",
|
||||
"guardarefilm": "https://www.guardarefilm.video",
|
||||
"guardaseriecc": "https://guardaserie.site",
|
||||
"guardaserieclick": "https://www.guardaserie.media",
|
||||
"guardaserie_stream": "https://guardaserie.co",
|
||||
"guardaserieonline": "http://www.guardaserie.media",
|
||||
"guardogratis": "http://guardogratis.io",
|
||||
"ilgeniodellostreaming": "https://ilgeniodellostreaming.pw",
|
||||
"italiafilm": "https://www.italia-film.pw",
|
||||
"italiafilmhd": "https://italiafilm.info",
|
||||
"italiaserie": "https://italiaserie.org",
|
||||
"itastreaming": "https://itastreaming.film",
|
||||
"majintoon": "https://toonitalia.org",
|
||||
"mondolunatico": "http://mondolunatico.org",
|
||||
"mondolunatico2": "http://mondolunatico.org/stream/",
|
||||
"mondoserietv": "https://mondoserietv.com",
|
||||
"piratestreaming": "https://www.piratestreaming.watch",
|
||||
"seriehd": "https://www.seriehd.info",
|
||||
"serietvonline": "https://serietvonline.xyz",
|
||||
"serietvsubita": "http://serietvsubita.xyz",
|
||||
"serietvu": "https://www.serietvu.club",
|
||||
"streamingaltadefinizione": "https://www.streamingaltadefinizione.space",
|
||||
"streamking": "http://streamking.cc",
|
||||
"tantifilm": "https://www.tantifilm.plus",
|
||||
"toonitalia": "https://toonitalia.org"
|
||||
}
|
||||
"direct": {
|
||||
"altadefinizione01": "https://altadefinizione01.pet",
|
||||
"animealtadefinizione": "http://ww38.animealtadefinizione.it",
|
||||
"animeforce": "https://www.animeforce.it",
|
||||
"animesaturn": "https://www.animesaturn.cx",
|
||||
"animeunity": "https://www.animeunity.tv",
|
||||
"animeworld": "https://www.animeworld.so",
|
||||
"aniplay": "https://aniplay.co",
|
||||
"casacinema": "https://casacinema.media",
|
||||
"cb01anime": "https://cb01new.one",
|
||||
"cinemalibero": "https://cinemalibero.cafe",
|
||||
"cinetecadibologna": "http://cinestore.cinetecadibologna.it",
|
||||
"dinostreaming": "https://dinostreaming.it",
|
||||
"discoveryplus": "https://www.discoveryplus.com",
|
||||
"dreamsub": "https://www.animeworld.so",
|
||||
"eurostreaming": "https://eurostreaming.lifestyle",
|
||||
"eurostreaming_actor": "https://eurostreaming.my",
|
||||
"filmstreaming": "https://film-streaming-ita.cam",
|
||||
"guardaseriecam": "https://guardaserie.kitchen",
|
||||
"hd4me": "https://hd4me.net",
|
||||
"ilcorsaronero": "https://ilcorsaronero.link",
|
||||
"ilgeniodellostreaming_cam": "https://ilgeniodellostreaming.foo",
|
||||
"italiafilm": "https://italia-film.biz",
|
||||
"mediasetplay": "https://mediasetinfinity.mediaset.it",
|
||||
"mondoserietv": "http://ww25.mondoserietv.club/?subid1=20230304-0434-261c-9cb0-a0044930e0a9",
|
||||
"paramount": "https://www.mtv.it",
|
||||
"piratestreaming": "https://piratestreaming.design",
|
||||
"plutotv": "https://pluto.tv",
|
||||
"raiplay": "https://www.raiplay.it",
|
||||
"serietvu": "http://ww1.serietvu.live/?sub1=47fb879a-5325-11ee-94a7-cc35006f53d1",
|
||||
"streamingcommunity": "https://streamingcommunity.ooo",
|
||||
"streamingita": "https://streamingita.click",
|
||||
"tantifilm": "https://tantifilm.name",
|
||||
"toonitalia": "https://toonitalia.xyz"
|
||||
},
|
||||
"findhost": {
|
||||
"altadefinizione": "https://altadefinizione.nuovo.live",
|
||||
"altadefinizionecommunity": "https://altaregistrazione.net",
|
||||
"animealtadefinizione": "https://www.animealtadefinizione.it",
|
||||
"cineblog01": "https://cb01.uno",
|
||||
"filmpertutti": "https://filmpertuttiii.nuovo.live"
|
||||
}
|
||||
}
|
||||
107
channels/0example.json.txt
Executable file
107
channels/0example.json.txt
Executable file
@@ -0,0 +1,107 @@
|
||||
Rev:0.2
|
||||
Update: 03-10-2019
|
||||
#####################
|
||||
|
||||
Promemoria da cancellare pena la non visibilità del canale in KOD!!
|
||||
|
||||
#####################
|
||||
|
||||
|
||||
le voci in settings sono state inserite per l'unico scopo
|
||||
di velocizzare la scrittura del file
|
||||
Vanno lasciate solo quelle voci il cui funzionamento sul
|
||||
canale non vanno attivate.
|
||||
"not_active": ["include_in_newest"], VA INSERITO nei canali che NON hanno nessuna voce newest.
|
||||
Ovviamente va mantenuto tutto il codice di quell'id tra le {}
|
||||
se vanno cancellati tutti deve rimanere la voce:
|
||||
"settings": []
|
||||
##################### Cancellare fino a qui!
|
||||
{
|
||||
"id": "nome del file .json",
|
||||
"name": "Nome del canale visualizzato in KOD",
|
||||
"language": ["ita", "sub-ita"],
|
||||
"active": false,
|
||||
"thumbnail": "",
|
||||
"banner": "",
|
||||
"categories": ["movie", "tvshow", "anime", "vos", "documentary"],
|
||||
"not_active": ["include_in_newest"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "@70728",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "@70727",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "@70727",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "@70727",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero di link da verificare",
|
||||
"default": 2,
|
||||
"enabled": false,
|
||||
"visible": "eq(-1,false)",
|
||||
"lvalues": [ "3", "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "@30019",
|
||||
"default": 0,
|
||||
"enabled": false,
|
||||
"visible": false,
|
||||
"lvalues": ["Non Filtrare"]
|
||||
}
|
||||
],
|
||||
|
||||
"renumber": [
|
||||
{
|
||||
"id": "autorenumber",
|
||||
"type": "bool",
|
||||
"label": "@70712",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "autorenumber_mode",
|
||||
"type": "bool",
|
||||
"label": "@70688",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": "eq(-1,false)"
|
||||
}
|
||||
]
|
||||
}
|
||||
269
channels/0example.py.txt
Executable file
269
channels/0example.py.txt
Executable file
@@ -0,0 +1,269 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per 'idcanale nel json'
|
||||
# By: pincopallo!
|
||||
# Eventuali crediti se vuoi aggiungerli
|
||||
# ------------------------------------------------------------
|
||||
# Rev: 0.2
|
||||
# Update 12-10-2019
|
||||
# fix:
|
||||
# 1. aggiunto pagination e sistemate alcune voci
|
||||
# 2. modificato problemi in eccezioni
|
||||
# 3. aggiunta la def check
|
||||
# 4. modifica alla legenda e altre aggiunte
|
||||
|
||||
# Questo vuole solo essere uno scheletro per velocizzare la scrittura di un canale.
|
||||
# La maggior parte dei canali può essere scritta con il decoratore.
|
||||
# I commenti sono più un promemoria... che una vera e propria spiegazione!
|
||||
# Niente di più.
|
||||
# Ulteriori informazioni sono reperibili nel wiki:
|
||||
# https://github.com/kodiondemand/addon/wiki/decoratori
|
||||
|
||||
"""
|
||||
Questi sono commenti per i beta-tester.
|
||||
|
||||
Su questo canale, nella categoria 'Ricerca Globale'
|
||||
non saranno presenti le voci 'Aggiungi alla Videoteca'
|
||||
e 'Scarica Film'/'Scarica Serie', dunque,
|
||||
la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
|
||||
|
||||
Novità. Indicare in quale/i sezione/i è presente il canale:
|
||||
- Nessuna, film, serie, anime...
|
||||
|
||||
Avvisi:
|
||||
- Eventuali avvisi per i tester
|
||||
|
||||
Ulteriori info:
|
||||
|
||||
"""
|
||||
# CANCELLARE Ciò CHE NON SERVE per il canale, lascia il codice commentato ove occorre,
|
||||
# ma fare PULIZIA quando si è finito di testarlo
|
||||
|
||||
# Qui gli import
|
||||
#import re
|
||||
|
||||
# per l'uso dei decoratori, per i log, e funzioni per siti particolari
|
||||
from core import support
|
||||
|
||||
# in caso di necessità
|
||||
#from core import scrapertools, httptools, servertools, tmdb
|
||||
from core.item import Item # per newest
|
||||
#from lib import unshortenit
|
||||
|
||||
##### fine import
|
||||
|
||||
# se il sito ha un link per ottenere l'url corretto in caso di oscuramenti
|
||||
# la funzione deve ritornare l'indirizzo corretto, verrà chiamata solo se necessario (link primario irraggiungibile)
|
||||
def findhost(url):
|
||||
permUrl = httptools.downloadpage(url, follow_redirects=False).headers
|
||||
if 'google' in permUrl['location']:
|
||||
host = permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
else:
|
||||
host = permUrl['location']
|
||||
return host
|
||||
|
||||
# se si usa findhost metti in channels.json l'url del sito che contiene sempre l'ultimo dominio
|
||||
host = config.get_channel_url(findhost)
|
||||
# se non si usa metti direttamente l'url finale in channels.json
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
### fine variabili
|
||||
|
||||
#### Inizio delle def principali ###
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
support.info(item)
|
||||
|
||||
# Ordine delle voci
|
||||
# Voce FILM, puoi solo impostare l'url
|
||||
film = ['', # url per la voce FILM, se possibile la pagina principale con le ultime novità
|
||||
#Voce Menu,['url','action','args',contentType]
|
||||
('Al Cinema', ['', 'peliculas', '']),
|
||||
('Generi', ['', 'genres', 'genres']),
|
||||
('Per Lettera', ['', 'genres', 'letters']),
|
||||
('Anni', ['', 'genres', 'years']),
|
||||
('Qualità', ['', 'genres', 'quality']),
|
||||
('Mi sento fortunato', ['', 'genres', 'lucky']),
|
||||
('Popolari', ['', 'peliculas', '']),
|
||||
('Sub-ITA', ['', 'peliculas', ''])
|
||||
]
|
||||
|
||||
# Voce SERIE, puoi solo impostare l'url
|
||||
tvshow = ['', # url per la voce Serie, se possibile la pagina con titoli di serie
|
||||
#Voce Menu,['url','action','args',contentType]
|
||||
('Novità', ['', '', '']),
|
||||
('Per Lettera', ['', 'genres', 'letters']),
|
||||
('Per Genere', ['', 'genres', 'genres']),
|
||||
('Per anno', ['', 'genres', 'years'])
|
||||
]
|
||||
# Voce ANIME, puoi solo impostare l'url
|
||||
anime = ['', # url per la voce Anime, se possibile la pagina con titoli di anime
|
||||
#Voce Menu,['url','action','args',contentType]
|
||||
('Novità', ['', '', '']),
|
||||
('In Corso',['', '', '', '']),
|
||||
('Ultimi Episodi',['', '', '', '']),
|
||||
('Ultime Serie',['', '', '', ''])
|
||||
]
|
||||
|
||||
"""
|
||||
Eventuali Menu per voci non contemplate!
|
||||
"""
|
||||
|
||||
# se questa voce non è presente il menu genera una voce
|
||||
# search per ogni voce del menu. Es. Cerca Film...
|
||||
search = '' # se alla funzione search non serve altro
|
||||
|
||||
# VOCE CHE APPARIRA' come prima voce nel menu di KOD!
|
||||
# [Voce Menu,['url','action','args',contentType]
|
||||
top = ([ '' ['', '', '', ''])
|
||||
|
||||
# Se vuoi creare un menu personalizzato o perchè gli altri non
|
||||
# ti soddisfano
|
||||
# [Voce Menu,['url','action','args',contentType]
|
||||
nome = [( '' ['', '', '', ''])
|
||||
return locals()
|
||||
|
||||
|
||||
# Legenda known_keys per i groups nei patron
|
||||
# known_keys = ['url', 'title', 'title2', 'season', 'episode', 'thumb', 'quality',
|
||||
# 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang']
|
||||
# url = link relativo o assoluto alla pagina titolo film/serie
|
||||
# title = titolo Film/Serie/Anime/Altro
|
||||
# title2 = titolo dell'episodio Serie/Anime/Altro
|
||||
# season = stagione in formato numerico
|
||||
# episode = numero episodio, in formato numerico.
|
||||
# thumb = linkrealtivo o assoluto alla locandina Film/Serie/Anime/Altro
|
||||
# quality = qualità indicata del video
|
||||
# year = anno in formato numerico (4 cifre)
|
||||
# duration = durata del Film/Serie/Anime/Altro
|
||||
# genere = genere del Film/Serie/Anime/Altro. Es: avventura, commedia
|
||||
# rating = punteggio/voto in formato numerico
|
||||
# type = tipo del video. Es. movie per film o tvshow per le serie. Di solito sono discrimanti usati dal sito
|
||||
# lang = lingua del video. Es: ITA, Sub-ITA, Sub, SUB ITA.
|
||||
# AVVERTENZE: Se il titolo è trovato nella ricerca TMDB/TVDB/Altro allora le locandine e altre info non saranno quelle recuperate nel sito.!!!!
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
support.info(item)
|
||||
#support.dbg() # decommentare per attivare web_pdb
|
||||
|
||||
action = ''
|
||||
blacklist = ['']
|
||||
patron = r''
|
||||
patronBlock = r''
|
||||
patronNext = ''
|
||||
pagination = ''
|
||||
|
||||
#debug = True # True per testare le regex sul sito
|
||||
return locals()
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
support.info(item)
|
||||
#support.dbg()
|
||||
|
||||
action = ''
|
||||
blacklist = ['']
|
||||
patron = r''
|
||||
patronBlock = r''
|
||||
patronNext = ''
|
||||
pagination = ''
|
||||
|
||||
#debug = True
|
||||
return locals()
|
||||
|
||||
# Questa def è utilizzata per generare i menu del canale
|
||||
# per genere, per anno, per lettera, per qualità ecc ecc
|
||||
@support.scrape
|
||||
def genres(item):
|
||||
support.info(item)
|
||||
#support.dbg()
|
||||
|
||||
action = ''
|
||||
blacklist = ['']
|
||||
patron = r''
|
||||
patronBlock = r''
|
||||
patronNext = ''
|
||||
pagination = ''
|
||||
|
||||
#debug = True
|
||||
return locals()
|
||||
|
||||
############## Fine ordine obbligato
|
||||
## Def ulteriori
|
||||
|
||||
# per quei casi dove il sito non differenzia film e/o serie e/o anime
|
||||
# e la ricerca porta i titoli mischiati senza poterli distinguere tra loro
|
||||
# andranno modificate anche le def peliculas e episodios ove occorre
|
||||
def check(item):
|
||||
support.info('select --->', item)
|
||||
#support.dbg()
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# pulizia di data, in caso commentare le prossime 2 righe
|
||||
data = re.sub('\n|\t', ' ', data)
|
||||
data = re.sub(r'>\s+<', '> <', data)
|
||||
block = scrapertools.find_single_match(data, r'')
|
||||
if re.findall('', data, re.IGNORECASE):
|
||||
support.info('select = ### è una serie ###')
|
||||
return episodios(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
args='serie',
|
||||
contentType='tvshow',
|
||||
#data1 = data decommentando portiamo data nella def senza doverla riscaricare
|
||||
))
|
||||
|
||||
############## Fondo Pagina
|
||||
# da adattare al canale
|
||||
def search(item, text):
|
||||
support.info('search', item)
|
||||
itemlist = []
|
||||
text = text.replace(' ', '+')
|
||||
item.url = host + '/index.php?do=search&story=%s&subaction=search' % (text)
|
||||
# bisogna inserire item.contentType per la ricerca globale
|
||||
# se il canale è solo film, si può omettere, altrimenti bisgona aggiungerlo e discriminare.
|
||||
item.contentType = item.contentType
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
info('search log:', line)
|
||||
return []
|
||||
|
||||
|
||||
# da adattare al canale
|
||||
# inserire newest solo se il sito ha la pagina con le ultime novità/aggiunte
|
||||
# altrimenti NON inserirlo
|
||||
def newest(categoria):
|
||||
support.info('newest ->', categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host
|
||||
item.action = 'peliculas'
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == 'peliculas':
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.info('newest log: ', {0}.format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
# da adattare...
|
||||
# consultare il wiki sia per support.server che ha vari parametri,
|
||||
# sia per i siti con hdpass
|
||||
#support.server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True)
|
||||
def findvideos(item):
|
||||
support.info('findvideos ->', item)
|
||||
return support.server(item, headers=headers)
|
||||
20
channels/1337x.json
Executable file
20
channels/1337x.json
Executable file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"id": "1337x",
|
||||
"name": "1337x",
|
||||
"language": ["ita", "sub-ita", "eng"],
|
||||
"active": true,
|
||||
"thumbnail": "1337x.png",
|
||||
"banner": "1337x.png",
|
||||
"categories": ["movie", "tvshow", "torrent"],
|
||||
"not_active": ["include_in_newest"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "itaSearch",
|
||||
"type": "bool",
|
||||
"label": "Cerca contenuti in italiano",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
193
channels/1337x.py
Executable file
193
channels/1337x.py
Executable file
@@ -0,0 +1,193 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per 1337x
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import inspect
|
||||
from core import support
|
||||
from platformcode import logger, config
|
||||
|
||||
# host = support.config.get_channel_url()
|
||||
host = 'https://www.1337x.to'
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
|
||||
menu = [('Film ITA {bullet bold}',['/movie-lib-sort/all/it/popularity/desc/all/1/', 'peliculas', '', 'movie']),
|
||||
('Film {submenu}',['/movie-library/1/', 'peliculas', 'filter', 'movie']),
|
||||
('Serie TV {bullet bold}',['/series-library/', 'az', '', 'tvshow'])]
|
||||
|
||||
search = ''
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def moviefilter(item):
|
||||
if logger.testMode:
|
||||
return host +'/movie-lib-sort/all/all/score/desc/all/1/'
|
||||
from platformcode import platformtools
|
||||
|
||||
item.args = ''
|
||||
controls = []
|
||||
data = support.match(item).data
|
||||
|
||||
patronBlock = r'<select name="{}"[^>]+>(.+?)</select>'
|
||||
patron = r'value="([^"]+)">([^<]+)'
|
||||
|
||||
genres = support.match(data, patronBlock=patronBlock.format('genre'), patron=patron).matches
|
||||
years = support.match(data, patronBlock=patronBlock.format('year'), patron=patron).matches
|
||||
langs = support.match(data, patronBlock=patronBlock.format('lang'), patron=patron).matches
|
||||
sorts = support.match(data, patronBlock=patronBlock.format('sortby'), patron=patron).matches
|
||||
orders = support.match(data, patronBlock=patronBlock.format('sort'), patron=patron).matches
|
||||
|
||||
item.genreValues = [x[0] for x in genres]
|
||||
item.yearValues = [x[0] for x in years]
|
||||
item.langValues = [x[0] for x in langs]
|
||||
item.sortValues = [x[0] for x in sorts]
|
||||
item.orderValues = [x[0] for x in orders]
|
||||
|
||||
genres = [g[1] for g in genres]
|
||||
years = [g[1] for g in years]
|
||||
langs = [g[1] for g in langs]
|
||||
sorts = [g[1] for g in sorts]
|
||||
orders = [g[1] for g in orders]
|
||||
|
||||
controls.append({'id': 'lang', 'label': 'Lingua', 'type': 'list', 'enabled':True, 'visible':True, 'lvalues':langs, 'default': 0})
|
||||
controls.append({'id': 'genre', 'label': 'Genere', 'type': 'list', 'enabled':True, 'visible':True, 'lvalues':genres, 'default': 0})
|
||||
controls.append({'id': 'year', 'label': 'Anno', 'type': 'list', 'enabled':True, 'visible':True, 'lvalues':years, 'default': 0})
|
||||
controls.append({'id': 'sort', 'label': 'Anno', 'type': 'list', 'enabled':True, 'visible':True, 'lvalues':sorts, 'default': 0})
|
||||
controls.append({'id': 'order', 'label': 'Anno', 'type': 'list', 'enabled':True, 'visible':True, 'lvalues':orders, 'default': 0})
|
||||
return platformtools.show_channel_settings(list_controls=controls, item=item, caption='Filtro', callback='filtered')
|
||||
|
||||
|
||||
|
||||
def filtered(item, values):
|
||||
genre = item.genreValues[values['genre']]
|
||||
lang = item.langValues[values['lang']]
|
||||
sortby = item.sortValues[values['sort']]
|
||||
order = item.orderValues[values['order']]
|
||||
year = item.yearValues[values['year']]
|
||||
|
||||
return '{}/movie-lib-sort/{}/{}/{}/{}/{}/1/'.format(host, genre, lang, sortby, order, year)
|
||||
|
||||
|
||||
def az(item):
|
||||
import string
|
||||
itemlist = [item.clone(title='1-9', url=item.url +'num/1/', action='peliculas', thumbnail=support.thumb('az'))]
|
||||
for letter in list(string.ascii_lowercase):
|
||||
itemlist.append(item.clone(title=letter.upper(), url=item.url + letter +'/1/', action='peliculas', thumbnail=support.thumb('az')))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, text):
|
||||
support.info('search', text)
|
||||
item.args = 'search'
|
||||
if config.get_setting('itaSearch', channel=item.channel, default=False):
|
||||
text += ' ita'
|
||||
text = text.replace(' ', '+')
|
||||
item.url = '{}/search/{}/1/'.format(host, text)
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Cattura la eccezione così non interrompe la ricerca globle se il canale si rompe!
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("search except: ", line)
|
||||
return []
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
if item.args == 'filter':
|
||||
item.url = moviefilter(item)
|
||||
if not item.url:
|
||||
data = ' '
|
||||
else:
|
||||
data = support.match(item).data
|
||||
# debug = True
|
||||
if item.args == 'search':
|
||||
sceneTitle = 'undefined'
|
||||
patron = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<(?:[^>]+>){3,7}(?P<seed>[^<]+)<(?:[^>]+>){6}(?P<size>[^<]+)<span'
|
||||
patronNext = r'"([^"]+)">>>'
|
||||
elif item.contentType == 'movie':
|
||||
patron = r'<img[^>]+data-original="(?P<thumb>[^"]+)(?:[^>]+>){15}(?P<title>[^<]+).*?<p>(?P<plot>[^<]+).*?<a href="(?P<url>[^"]+)'
|
||||
patronNext = r'"([^"]+)">>>'
|
||||
else:
|
||||
action = 'seasons'
|
||||
patron = r'<img src="(?P<thumb>[^"]+)(?:[^>]+>){4}\s*<a href="(?P<url>[^"]+)[^>]+>(?P<title>[^<]+)'
|
||||
|
||||
if (item.args == 'search' or item.contentType != 'movie') and not support.stackCheck(['get_channel_results']):
|
||||
patronNext = None
|
||||
def itemlistHook(itemlist):
|
||||
lastUrl = support.match(data, patron=r'href="([^"]+)">Last').match
|
||||
if lastUrl:
|
||||
currentPage = support.match(item.url, string=True, patron=r'/(\d+)/').match
|
||||
nextPage = int(currentPage) + 1
|
||||
support.nextPage(itemlist, item, next_page=item.url.replace('/{}'.format(currentPage), '/{}'.format(nextPage)), function_or_level='peliculas')
|
||||
return itemlist
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def seasons(item):
|
||||
item.contentType = 'season'
|
||||
action = 'episodios'
|
||||
patron = r'<li>\s*<a href="(?P<url>[^"]+)[^>]+>\s*<img alt="[^"]*"\ssrc="(?P<thumb>[^"]+)(?:([^>]+)>){2}\s*(?P<title>\w+ (?P<season>\d+))'
|
||||
return locals()
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
patron = r'<img src="(?P<thumb>[^"]+)(?:[^>]+>){13}\s*(?P<season>\d+)x(?P<episode>\d+)\s*<span class="seperator">(?:[^>]+>){2}\s*<a href="(?P<url>[^"]+)">(?P<title>[^<]+)'
|
||||
def itemlistHook(itemlist):
|
||||
itemlist.reverse()
|
||||
return itemlist
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
itemlist = []
|
||||
item.disableAutoplay = True
|
||||
if item.args == 'search':
|
||||
itemlist.append(item.clone(server='torrent', action='play'))
|
||||
else:
|
||||
from lib.guessit import guessit
|
||||
|
||||
items = support.match(item.url, patron=r'<a href="([^"]+)">([^<]+)<(?:[^>]+>){3}([^<]+)<(?:[^>]+>){6}([^<]+)<span').matches
|
||||
|
||||
for url, title, seed, size in items:
|
||||
parsedTitle = guessit(title)
|
||||
|
||||
title = support.scrapertools.unescape(parsedTitle.get('title', ''))
|
||||
|
||||
lang = ''
|
||||
if parsedTitle.get('language'):
|
||||
langs = parsedTitle.get('language')
|
||||
if isinstance(langs, list):
|
||||
lang = 'MULTI'
|
||||
else:
|
||||
lang = vars(langs).get('alpha3').upper()
|
||||
if not (lang.startswith('MUL') or lang.startswith('ITA')):
|
||||
subs = parsedTitle.get('subtitle_language')
|
||||
if isinstance(subs, list):
|
||||
lang = 'Multi-Sub'
|
||||
else:
|
||||
lang = vars(subs).get('alpha3').upper()
|
||||
if lang:
|
||||
title = '{} [{}]'.format(title, lang)
|
||||
|
||||
sizematch = support.match(size, patron='(\d+(?:\.\d+)?)\s* (\w+)').match
|
||||
sizenumber = float(sizematch[0])
|
||||
if sizematch[1].lower() == 'gb':
|
||||
sizenumber = sizenumber * 1024
|
||||
|
||||
itemlist.append(item.clone(title = '{} [{} SEEDS] [{}]'.format(title, seed, size), seed=int(seed), size=sizenumber, url=host + url, server='torrent', action='play'))
|
||||
itemlist.sort(key=lambda it: (it.seed, it.size), reverse=True)
|
||||
|
||||
Videolibrary = True if 'movie' in item.args else False
|
||||
return support.server(item, itemlist=itemlist, Videolibrary=Videolibrary, Sorted=False)
|
||||
|
||||
|
||||
def play(item):
|
||||
from core import servertools
|
||||
data = support.match(item.url, patron=r'href="(magnet[^"]+)').match
|
||||
return servertools.find_video_items(item, data=data)
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"id": "LIKUOO",
|
||||
"name": "LIKUOO",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://likuoo.video/files_static/images/logo.jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = 'http://www.likuoo.video'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstar" , action="categorias", url=host + "/pornstars/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?s=%s" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="item_p">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="item">.*?'
|
||||
patron += '<a href="([^"]+)" title="(.*?)">.*?'
|
||||
patron += 'src="(.*?)".*?'
|
||||
patron += '<div class="runtime">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
scrapedtime = scrapedtime.replace("m", ":").replace("s", " ")
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " +scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = "https:" + scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videochannel=item.channel
|
||||
return itemlist
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"id": "TXXX",
|
||||
"name": "TXXX",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.txxx.com/images/desktop-logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
151
channels/TXXX.py
151
channels/TXXX.py
@@ -1,151 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = 'http://www.txxx.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="lista", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/s=%s" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="channel-thumb">.*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<span>(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,num in matches:
|
||||
scrapedplot = ""
|
||||
scrapedurl = host + scrapedurl
|
||||
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title , url=scrapedurl ,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" ,
|
||||
text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="categories-list__link" href="([^"]+)">.*?'
|
||||
patron += '<span class="categories-list__name cat-icon" data-title="([^"]+)">.*?'
|
||||
patron += '<span class="categories-list__badge">(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,num in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=title , url=url ,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
|
||||
patron += '</div>(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
|
||||
contentTitle = scrapedtitle
|
||||
scrapedhd = scrapertools.find_single_match(scrapedtime, '<span class="thumb__hd">(.*?)</span>')
|
||||
duration = scrapertools.find_single_match(scrapedtime, '<span class="thumb__duration">(.*?)</span>')
|
||||
if scrapedhd != '':
|
||||
title = "[COLOR yellow]" +duration+ "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
|
||||
else:
|
||||
title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=title) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next.*?" href="([^"]+)" title="Next Page"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
video_url = decode_url(partes[0])
|
||||
video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url)
|
||||
video_url += '&' if '?' in video_url else '?'
|
||||
video_url += 'lip=' + partes[2] + '<=' + partes[3]
|
||||
itemlist.append(item.clone(action="play", title=item.title, url=video_url))
|
||||
return itemlist
|
||||
|
||||
|
||||
def decode_url(txt):
|
||||
_0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~'
|
||||
reto = ''; n = 0
|
||||
# En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes)
|
||||
txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt)
|
||||
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M')
|
||||
|
||||
while n < len(txt):
|
||||
a = _0x52f6x15.index(txt[n])
|
||||
n += 1
|
||||
b = _0x52f6x15.index(txt[n])
|
||||
n += 1
|
||||
c = _0x52f6x15.index(txt[n])
|
||||
n += 1
|
||||
d = _0x52f6x15.index(txt[n])
|
||||
n += 1
|
||||
|
||||
a = a << 2 | b >> 4
|
||||
b = (b & 15) << 4 | c >> 2
|
||||
e = (c & 3) << 6 | d
|
||||
reto += chr(a)
|
||||
if c != 64: reto += chr(b)
|
||||
if d != 64: reto += chr(e)
|
||||
|
||||
return urllib.unquote(reto)
|
||||
|
||||
0
channels/__init__.py
Normal file → Executable file
0
channels/__init__.py
Normal file → Executable file
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"id": "absoluporn",
|
||||
"name": "absoluporn",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.absoluporn.es/image/deco/logo.gif",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = 'http://www.absoluporn.es'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/wall-date-1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas valorados" , action="lista", url=host + "/wall-note-1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/wall-main-1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="lista", url=host + "/wall-time-1.html"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search-%s-1.html" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = ' <a href="([^"]+)" class="link1">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = scrapedurl.replace(".html", "_date.html")
|
||||
scrapedurl = host +"/" + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
patron = '<div class="thumb-main-titre"><a href="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<div class="time">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fanart=thumbnail, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"')
|
||||
if next_page:
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'servervideo = \'([^\']+)\'.*?'
|
||||
patron += 'path = \'([^\']+)\'.*?'
|
||||
patron += 'filee = \'([^\']+)\'.*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for servervideo,path,filee in matches:
|
||||
scrapedurl = servervideo + path + "56ea912c4df934c216c352fa8d623af3" + filee
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
11
channels/accuradio.json
Executable file
11
channels/accuradio.json
Executable file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"id": "accuradio",
|
||||
"name": "AccuRadio",
|
||||
"active": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "accuradio.png",
|
||||
"banner": "accuradio.png",
|
||||
"categories": ["music"],
|
||||
"not_active":["include_in_global_search"],
|
||||
"settings" :[]
|
||||
}
|
||||
88
channels/accuradio.py
Executable file
88
channels/accuradio.py
Executable file
@@ -0,0 +1,88 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per accuradio
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import random
|
||||
from core import httptools, support
|
||||
from platformcode import logger
|
||||
|
||||
host = 'https://www.accuradio.com'
|
||||
api_url = host + '/c/m/json/{}/'
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
itemlist = []
|
||||
item.action = 'peliculas'
|
||||
js = httptools.downloadpage(api_url.format('brands')).json
|
||||
for it in js.get('features',[]):
|
||||
itemlist.append(
|
||||
item.clone(url= '{}/{}'.format(host,it.get('canonical_url','')),
|
||||
title=support.typo(it['name'],'italic') + support.typo(it.get('channels',''),'_ [] color kod')
|
||||
))
|
||||
for it in js.get('brands',[]):
|
||||
itemlist.append(
|
||||
item.clone(url= '{}/{}'.format(host,it.get('canonical_url','')),
|
||||
title=support.typo(it['name'],'bullet bold') + support.typo(it.get('channels',''),'_ [] color kod')
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title=support.typo('Cerca...', 'bold color kod'), action='search', thumbnail=support.thumb('search')))
|
||||
support.channel_config(item, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
# debug=True
|
||||
action = 'playradio'
|
||||
patron = r'data-id="(?P<id>[^"]+)"\s*data-oldid="(?P<oldid>[^"]+)".*?data-name="(?P<title>[^"]+)(?:[^>]+>){2}<img src="(?P<thumb>[^"]+)(?:[^>]+>){16}\s*(?P<plot>[^<]+)'
|
||||
return locals()
|
||||
|
||||
|
||||
def playradio(item):
|
||||
import xbmcgui, xbmc
|
||||
items = httptools.downloadpage('{}/playlist/json/{}/?ando={}&rand={}'.format(host, item.id, item.oldid, random.random())).json
|
||||
playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
|
||||
playlist.clear()
|
||||
for i in items:
|
||||
if 'id' in i:
|
||||
url = i['primary'] + i['fn'] + '.m4a'
|
||||
title = i['title']
|
||||
artist = i['track_artist']
|
||||
album = i['album']['title']
|
||||
year = i['album']['year']
|
||||
thumb = 'https://www.accuradio.com/static/images/covers300' + i['album']['cdcover']
|
||||
duration = i.get('duration',0)
|
||||
info = {'duration':duration,
|
||||
'album':album,
|
||||
'artist':artist,
|
||||
'title':title,
|
||||
'year':year,
|
||||
'mediatype':'music'}
|
||||
item = xbmcgui.ListItem(title, path=url)
|
||||
item.setArt({'thumb':thumb, 'poster':thumb, 'icon':thumb})
|
||||
item.setInfo('music',info)
|
||||
playlist.add(url, item)
|
||||
xbmc.Player().play(playlist)
|
||||
|
||||
|
||||
def search(item, text):
|
||||
support.info(text)
|
||||
item.url = host + '/search/' + text
|
||||
itemlist = []
|
||||
try:
|
||||
data = support.match(item.url).data
|
||||
artists = support.match(data, patronBlock=r'artistResults(.*?)</ul', patron=r'href="(?P<url>[^"]+)"\s*>(?P<title>[^<]+)').matches
|
||||
if artists:
|
||||
for url, artist in artists:
|
||||
itemlist.append(item.clone(title=support.typo(artist,'bullet bold'), thumbnail=support.thumb('music'), url=host+url, action='peliculas'))
|
||||
item.data = data
|
||||
itemlist += peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return itemlist
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"id": "alsoporn",
|
||||
"name": "alsoporn",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://alsoporn.com/images/alsoporn.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = 'http://www.alsoporn.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
# itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/en/g/All/new/1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Top" , action="lista", url=host + "/g/All/top/1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/=%s/" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)" />'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="alsoporn_prev">.*?'
|
||||
patron += '<a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron += '<span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
|
||||
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'([^\']+)\'')
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl1 = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl1 = scrapedurl1.replace("//www.playercdn.com/ec/i2.php?", "https://www.trinitytube.xyz/ec/i2.php?")
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl2 = scrapertools.find_single_match(data,'<source src="(.*?)"')
|
||||
itemlist.append(item.clone(action="play", title=item.title, fulltitle = item.title, url=scrapedurl2))
|
||||
return itemlist
|
||||
|
||||
11
channels/altadefinizione.json
Executable file
11
channels/altadefinizione.json
Executable file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"id": "altadefinizione",
|
||||
"name": "Altadefinizione",
|
||||
"language": ["ita", "sub-ita"],
|
||||
"active": true,
|
||||
"thumbnail": "altadefinizione.png",
|
||||
"banner": "altadefinizione.png",
|
||||
"categories": ["movie", "tvshow", "vos"],
|
||||
"settings": [],
|
||||
"not_active": ["include_in_newest"]
|
||||
}
|
||||
136
channels/altadefinizione.py
Executable file
136
channels/altadefinizione.py
Executable file
@@ -0,0 +1,136 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per altadefinizione
|
||||
# ------------------------------------------------------------
|
||||
|
||||
|
||||
from core import httptools, support, tmdb, scrapertools
|
||||
from platformcode import config, logger
|
||||
import re
|
||||
|
||||
def findhost(url):
|
||||
host = support.match(url, patron=r'<h2[^>]+><a href="([^"]+)').match.rstrip('/')
|
||||
permUrl = httptools.downloadpage(host, follow_redirects=False, only_headers=True).headers
|
||||
|
||||
if 'location' in permUrl.keys(): # handle redirection
|
||||
return permUrl['location']
|
||||
return host
|
||||
|
||||
host = config.get_channel_url(findhost)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
menu = [('Film',['/category/film/', 'peliculas', '', 'movie']),
|
||||
('Film al cinema {submenu}',['/category/ora-al-cinema/', 'peliculas', '', 'movie']),
|
||||
('Generi',['', 'genres', '', 'undefined']),
|
||||
('Saghe',['', 'genres', 'saghe', 'undefined']),
|
||||
('Serie TV',['/category/serie-tv/', 'peliculas', '', 'tvshow']),
|
||||
#('Aggiornamenti Serie TV', ['/aggiornamenti-serie-tv/', 'peliculas']) da fixare
|
||||
]
|
||||
search = ''
|
||||
return locals()
|
||||
|
||||
@support.scrape
|
||||
def genres(item):
|
||||
action = 'peliculas'
|
||||
blacklist = ['Scegli il Genere', 'Film', 'Serie Tv', 'Sub-Ita', 'Anime', "Non reperibile", 'Anime Sub-ITA', 'Prossimamente',]
|
||||
wantSaga = True if item.args == 'saghe' else False
|
||||
|
||||
patronBlock = r'<div class=\"categories-buttons-container\"(?P<block>.*?)</div>'
|
||||
if not wantSaga: # se non richiedo le sage carico le icone in automatico
|
||||
patronMenu = r'<a href=\"(?P<url>https:\/\/.*?)\".*?>(?P<title>.*?)</a>'
|
||||
else: # mantengo l'icona del padre
|
||||
patron = r'<a href=\"(?P<url>https:\/\/.*?)\".*?>(?P<title>.*?)</a>'
|
||||
|
||||
def itemlistHook(itemlist):
|
||||
itl = []
|
||||
for item in itemlist:
|
||||
isSaga = item.fulltitle.startswith('Saga')
|
||||
|
||||
if len(item.fulltitle) != 3:
|
||||
if (isSaga and wantSaga) or (not isSaga and not wantSaga):
|
||||
itl.append(item)
|
||||
return itl
|
||||
return locals()
|
||||
|
||||
|
||||
def search(item, text):
|
||||
item.url = "{}/?{}".format(host, support.urlencode({'s': text}))
|
||||
item.args = 'search'
|
||||
|
||||
try:
|
||||
return peliculas(item)
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("search except: %s" % line)
|
||||
return []
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
if not item.args == 'search': # pagination not works
|
||||
if not item.nextpage:
|
||||
item.page = 1
|
||||
else:
|
||||
item.page = item.nextpage
|
||||
|
||||
if not item.parent_url:
|
||||
item.parent_url = item.url
|
||||
|
||||
item.nextpage = item.page + 1
|
||||
nextPageUrl = "{}/page/{}".format(item.parent_url, item.nextpage)
|
||||
resp = httptools.downloadpage(nextPageUrl, only_headers = True)
|
||||
if (resp.code > 399): # no more elements
|
||||
nextPageUrl = ''
|
||||
else:
|
||||
action = 'check'
|
||||
|
||||
patron= r'<article class=\"elementor-post.*?(<img .*?src=\"(?P<thumb>[^\"]+).*?)?<h1 class=\"elementor-post__title\".*?<a href=\"(?P<url>[^\"]+)\" >\s*(?P<title>[^<]+?)\s*(\((?P<lang>Sub-[a-zA-Z]+)*\))?\s*(\[(?P<quality>[A-Z]*)\])?\s*(\((?P<year>[0-9]{4})\))?\s+<'
|
||||
|
||||
return locals()
|
||||
|
||||
def episodios(item):
|
||||
item.quality = ''
|
||||
data = item.data if item.data else httptools.downloadpage(item.url).data
|
||||
itemlist = []
|
||||
|
||||
for it in support.match(data, patron=[r'div class=\"single-season.*?(?P<id>season_[0-9]+).*?>Stagione:\s(?P<season>[0-9]+).*?(\s-\s(?P<lang>[a-zA-z]+?))?<']).matches:
|
||||
block = support.match(data, patron = r'div id=\"'+ it[0] +'\".*?</div').match
|
||||
for ep in support.match(block, patron=[r'<li><a href=\"(?P<url>[^\"]+).*?img\" src=\"(?P<thumb>[^\"]+).*?title\">(?P<episode>[0-9]+)\.\s+(?P<title>.*?)</span>']).matches:
|
||||
itemlist.append(item.clone(contentType = 'episode',
|
||||
action='findvideos',
|
||||
thumb = ep[1],
|
||||
title = support.format_longtitle(support.cleantitle(ep[3]), season = it[1], episode = ep[2], lang= it[3]),
|
||||
url = ep[0], data = '')
|
||||
)
|
||||
|
||||
support.check_trakt(itemlist)
|
||||
support.videolibrary(itemlist, item)
|
||||
if (config.get_setting('downloadenabled')):
|
||||
support.download(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
def check(item):
|
||||
item.data = httptools.downloadpage(item.url).data
|
||||
if 'season-details' in item.data.lower():
|
||||
item.contentType = 'tvshow'
|
||||
return episodios(item)
|
||||
else:
|
||||
return findvideos(item)
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
video_url = item.url
|
||||
|
||||
if item.contentType == 'movie':
|
||||
video_url = support.match(item, patron=[r'<div class="video-wrapper">.*?<iframe src=\"(https://.*?)\"',
|
||||
r'window.open\(\'([^\']+).*?_blank']).match
|
||||
if (video_url == ''):
|
||||
return []
|
||||
itemlist = [item.clone(action="play", url=srv) for srv in support.match(video_url, patron='<div class="megaButton" meta-type="v" meta-link="([^"]+).*?(?=>)>').matches]
|
||||
itemlist = support.server(item,itemlist=itemlist)
|
||||
|
||||
return itemlist
|
||||
62
channels/altadefinizione01.json
Normal file → Executable file
62
channels/altadefinizione01.json
Normal file → Executable file
@@ -1,62 +1,10 @@
|
||||
{
|
||||
"id": "altadefinizione01",
|
||||
"name": "Altadefinizione01",
|
||||
"language": ["ita"],
|
||||
"language": ["ita", "sub-ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/altadefinizione01.png",
|
||||
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/altadefinizione01.png",
|
||||
"categories": ["movie"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi in Ricerca Globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero di link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
"thumbnail": "altadefinizione01.png",
|
||||
"banner": "altadefinizione01.png",
|
||||
"categories": ["movie", "vos"],
|
||||
"settings": []
|
||||
}
|
||||
|
||||
246
channels/altadefinizione01.py
Normal file → Executable file
246
channels/altadefinizione01.py
Normal file → Executable file
@@ -2,72 +2,137 @@
|
||||
# ------------------------------------------------------------
|
||||
# Canale per altadefinizione01
|
||||
# ------------------------------------------------------------
|
||||
|
||||
from core import servertools, httptools, tmdb, scrapertoolsV2, support
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
|
||||
#URL che reindirizza sempre al dominio corrente
|
||||
#host = "https://altadefinizione01.to"
|
||||
|
||||
__channel__ = "altadefinizione01"
|
||||
host = config.get_channel_url(__channel__)
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango', 'rapidvideo', 'streamcherry', 'megadrive']
|
||||
list_quality = ['default']
|
||||
|
||||
checklinks = config.get_setting('checklinks', 'altadefinizione01')
|
||||
checklinks_number = config.get_setting('checklinks_number', 'altadefinizione01')
|
||||
|
||||
headers = [['Referer', host]]
|
||||
blacklist_categorie = ['Altadefinizione01', 'Altadefinizione.to']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
support.log()
|
||||
|
||||
itemlist =[]
|
||||
|
||||
support.menu(itemlist, 'Al Cinema','peliculas',host+'/cinema/')
|
||||
support.menu(itemlist, 'Ultimi Film Inseriti','peliculas',host)
|
||||
support.menu(itemlist, 'Film Sub-ITA','peliculas',host+'/sub-ita/')
|
||||
support.menu(itemlist, 'Film Ordine Alfabetico ','AZlist',host+'/catalog/')
|
||||
support.menu(itemlist, 'Categorie Film','categories',host)
|
||||
support.menu(itemlist, 'Cerca...','search')
|
||||
"""
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
Eccezioni note che non superano il test del canale:
|
||||
|
||||
return itemlist
|
||||
Avvisi:
|
||||
- L'url si prende da questo file.
|
||||
- è presente nelle novità-> Film.
|
||||
|
||||
Ulteriori info:
|
||||
|
||||
"""
|
||||
from core import scrapertools, httptools, support
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
def categories(item):
|
||||
support.log(item)
|
||||
itemlist = support.scrape(item,'<li><a href="([^"]+)">(.*?)</a></li>',['url','title'],headers,'Altadefinizione01',patron_block='<ul class="kategori_list">(.*?)</ul>',action='peliculas')
|
||||
return support.thumb(itemlist)
|
||||
# def findhost(url):
|
||||
# data = httptools.downloadpage(url).data
|
||||
# host = scrapertools.find_single_match(data, '<div class="elementor-button-wrapper"> <a href="([^"]+)"')
|
||||
# return host
|
||||
|
||||
def AZlist(item):
|
||||
support.log()
|
||||
return support.scrape(item,r'<a title="([^"]+)" href="([^"]+)"',['title','url'],headers,patron_block=r'<div class="movies-letter">(.*?)<\/div>',action='peliculas_list')
|
||||
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
|
||||
film = [
|
||||
('Al Cinema', ['/cinema/', 'peliculas', 'pellicola']),
|
||||
('Ultimi Aggiornati-Aggiunti', ['','peliculas', 'update']),
|
||||
('Generi', ['', 'genres', 'genres']),
|
||||
('Lettera', ['/catalog/a/', 'genres', 'orderalf']),
|
||||
('Anni', ['', 'genres', 'years']),
|
||||
('Sub-ITA', ['/sub-ita/', 'peliculas', 'pellicola'])
|
||||
]
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
support.info('peliculas', item)
|
||||
|
||||
## deflang = 'ITA'
|
||||
action="findvideos"
|
||||
|
||||
patron = r'<div class="cover boxcaption"> +<h2>\s*<a href="(?P<url>[^"]+)">(?P<title>[^<]+).*?src="(?P<thumb>[^"]+).*?<div class="trdublaj">(?P<quality>[^<]+).*?<span class="ml-label">(?P<year>[0-9]+).*?<span class="ml-label">(?P<duration>[^<]+).*?<p>(?P<plot>[^<]+)'
|
||||
patronNext = '<span>\d</span> <a href="([^"]+)">'
|
||||
|
||||
if item.args == "search":
|
||||
patronBlock = r'</script> <div class="boxgrid caption">(?P<block>.*)<div id="right_bar">'
|
||||
elif item.args == 'update':
|
||||
patronBlock = r'<div class="widget-title">Ultimi Film Aggiunti/Aggiornati</div>(?P<block>.*?)<div id="alt_menu">'
|
||||
patron = r'style="background-image:url\((?P<thumb>[^\)]+).+?<p class="h4">(?P<title>.*?)</p>[^>]+> [^>]+> [^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> [^>]+> [^>]+>[^>]+>(?P<year>\d{4})[^>]+>[^>]+> [^>]+>[^>]+>(?P<duration>\d+|N/A)?.+?>.*?(?:>Film (?P<lang>Sub ITA)</a></p> )?<p>(?P<plot>[^<]+)<.*?href="(?P<url>[^"]+)'
|
||||
patronNext = '' # non ha nessuna paginazione
|
||||
elif item.args == 'orderalf':
|
||||
patron = r'<td class="mlnh-thumb"><a href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"' \
|
||||
'.+?[^>]+>[^>]+ [^>]+[^>]+ [^>]+>(?P<title>[^<]+).*?[^>]+>(?P<year>\d{4})<' \
|
||||
'[^>]+>[^>]+>(?P<quality>[A-Z]+)[^>]+> <td class="mlnh-5">(?P<lang>.*?)</td>'
|
||||
else:
|
||||
patronBlock = r'<div class="cover_kapsul ml-mask">(?P<block>.*)<div class="page_nav">'
|
||||
|
||||
# debug = True
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def genres(item):
|
||||
support.info('genres',item)
|
||||
action = "peliculas"
|
||||
|
||||
blacklist = ['Altadefinizione01']
|
||||
if item.args == 'genres':
|
||||
patronBlock = r'<ul class="kategori_list">(?P<block>.*?)<div class="tab-pane fade" id="wtab2">'
|
||||
patronMenu = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>'
|
||||
elif item.args == 'years':
|
||||
patronBlock = r'<ul class="anno_list">(?P<block>.*?)</li> </ul> </div>'
|
||||
patronMenu = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>'
|
||||
elif item.args == 'orderalf':
|
||||
patronBlock = r'<div class="movies-letter">(?P<block>.*?)<div class="clearfix">'
|
||||
patronMenu = '<a title=.*?href="(?P<url>[^"]+)"><span>(?P<title>.*?)</span>'
|
||||
|
||||
#debug = True
|
||||
return locals()
|
||||
|
||||
@support.scrape
|
||||
def orderalf(item):
|
||||
support.info('orderalf',item)
|
||||
|
||||
action = 'findvideos'
|
||||
patron = r'<td class="mlnh-thumb"><a href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"'\
|
||||
'.+?[^>]+>[^>]+ [^>]+[^>]+ [^>]+>(?P<title>[^<]+).*?[^>]+>(?P<year>\d{4})<'\
|
||||
'[^>]+>[^>]+>(?P<quality>[A-Z]+)[^>]+> <td class="mlnh-5">(?P<lang>.*?)</td>'
|
||||
patronNext = r'<span>[^<]+</span>[^<]+<a href="(.*?)">'
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def search(item, text):
|
||||
support.info(item, text)
|
||||
|
||||
|
||||
itemlist = []
|
||||
text = text.replace(" ", "+")
|
||||
item.url = host + "/index.php?do=search&story=%s&subaction=search" % (text)
|
||||
item.args = "search"
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Cattura la eccezione così non interrompe la ricerca globle se il canale si rompe!
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("search except: %s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
# import web_pdb; web_pdb.set_trace()
|
||||
support.log(categoria)
|
||||
support.info(categoria)
|
||||
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "peliculas":
|
||||
item.url = host
|
||||
item.action = "peliculas"
|
||||
item.contentType = 'movie'
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -77,77 +142,16 @@ def newest(categoria):
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
support.log(texto)
|
||||
item.url = "%s/index.php?do=search&story=%s&subaction=search" % (
|
||||
host, texto)
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return subIta(item)
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_tv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
support.log()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = r'<div class="cover_kapsul ml-mask".*?<a href="(.*?)">(.*?)<\/a>.*?<img .*?src="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())'
|
||||
matches = scrapertoolsV2.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText, empty in matches:
|
||||
info = scrapertoolsV2.find_multiple_matches(data, r'<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">')
|
||||
infoLabels = {}
|
||||
for infoLabels['year'], duration, scrapedplot, checkUrl in info:
|
||||
if checkUrl == scrapedurl:
|
||||
break
|
||||
|
||||
infoLabels['duration'] = int(duration.replace(' min', '')) * 60 # calcolo la durata in secondi
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
|
||||
fulltitle = scrapedtitle
|
||||
if subDiv:
|
||||
fulltitle += support.typo(subText + ' _ () color limegreen')
|
||||
fulltitle += support.typo(scrapedquality.strip()+ ' _ [] color kod')
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType=item.contenType,
|
||||
contentTitle=scrapedtitle,
|
||||
contentQuality=scrapedquality.strip(),
|
||||
plot=scrapedplot,
|
||||
title=fulltitle,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
infoLabels=infoLabels,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
support.nextPage(itemlist,item,data,'<span>[^<]+</span>[^<]+<a href="(.*?)">')
|
||||
|
||||
return itemlist
|
||||
|
||||
def peliculas_list(item):
|
||||
support.log()
|
||||
item.fulltitle = ''
|
||||
block = r'<tbody>(.*)<\/tbody>'
|
||||
patron = r'<a href="([^"]+)" title="([^"]+)".*?> <img.*?src="([^"]+)".*?<td class="mlnh-3">([0-9]{4}).*?mlnh-4">([A-Z]+)'
|
||||
return support.scrape(item,patron, ['url', 'title', 'thumb', 'year', 'quality'], patron_block=block)
|
||||
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
support.log()
|
||||
support.info('findvideos', item)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
iframe = support.match(data, patron='src="(http[^"]+)" frameborder=\"0\" allow=\"accelerometer; autoplay;').match
|
||||
if iframe:
|
||||
item.url = iframe
|
||||
return support.server(item)
|
||||
|
||||
itemlist = support.server(item, headers=headers)
|
||||
|
||||
return itemlist
|
||||
# TODO: verificare se si puo' reinsierire il trailer youtube
|
||||
#itemlist = [item.clone(action="play", url=srv[0], quality=srv[1]) for srv in support.match(item, patron='<a href="#" data-link="([^"]+).*?<span class="d">([^<]+)').matches]
|
||||
#itemlist = support.server(item, itemlist=itemlist, headers=headers)
|
||||
|
||||
#return itemlist
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
{
|
||||
"id": "altadefinizione01_club",
|
||||
"name": "Altadefinizione01 C",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"fanart": "https://www.altadefinizione01.vision/templates/Darktemplate/images/logo.png",
|
||||
"thumbnail": "https://www.altadefinizione01.vision/templates/Darktemplate/images/logo.png",
|
||||
"banner": "https://www.altadefinizione01.vision/templates/Darktemplate/images/logo.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Cerca informazioni extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,269 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Altadefinizione01C Film -*-
|
||||
# -*- Riscritto per KOD -*-
|
||||
# -*- By Greko -*-
|
||||
# -*- last change: 04/05/2019
|
||||
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools, channeltools, scrapertools, servertools, tmdb, support
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from specials import autoplay, filtertools
|
||||
|
||||
__channel__ = "altadefinizione01_club"
|
||||
host = config.get_channel_url(__channel__)
|
||||
|
||||
# ======== Funzionalità =============================
|
||||
|
||||
checklinks = config.get_setting('checklinks', __channel__)
|
||||
checklinks_number = config.get_setting('checklinks_number', __channel__)
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['verystream','openload','rapidvideo','streamango'] # per l'autoplay
|
||||
list_quality = ['default'] #'rapidvideo', 'streamango', 'openload', 'streamcherry'] # per l'autoplay
|
||||
|
||||
|
||||
# =========== home menu ===================
|
||||
|
||||
def mainlist(item):
|
||||
"""
|
||||
Creo il menu principale del canale
|
||||
:param item:
|
||||
:return: itemlist []
|
||||
"""
|
||||
logger.info("%s mainlist log: %s" % (__channel__, item))
|
||||
itemlist = []
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
# Menu Principale
|
||||
support.menu(itemlist, 'Film Ultimi Arrivi bold', 'peliculas', host, args='pellicola')
|
||||
support.menu(itemlist, 'Genere', 'categorie', host, args='genres')
|
||||
support.menu(itemlist, 'Per anno submenu', 'categorie', host, args=['Film per Anno','years'])
|
||||
support.menu(itemlist, 'Per lettera', 'categorie', host + '/catalog/a/', args=['Film per Lettera','orderalf'])
|
||||
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host + '/cinema/', args='pellicola')
|
||||
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host + '/sub-ita/', args='pellicola')
|
||||
support.menu(itemlist, 'Cerca film submenu', 'search', host)
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
# ======== def in ordine di menu ===========================
|
||||
# =========== def per vedere la lista dei film =============
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("%s mainlist peliculas log: %s" % (__channel__, item))
|
||||
itemlist = []
|
||||
# scarico la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# da qui fare le opportuni modifiche
|
||||
if item.args != 'orderalf':
|
||||
if item.args == 'pellicola' or item.args == 'years':
|
||||
bloque = scrapertools.find_single_match(data, '<div class="cover boxcaption">(.*?)<div id="right_bar">')
|
||||
elif item.args == "search":
|
||||
bloque = scrapertools.find_single_match(data, '<div class="cover boxcaption">(.*?)</a>')
|
||||
else:
|
||||
bloque = scrapertools.find_single_match(data, '<div class="cover boxcaption">(.*?)<div class="page_nav">')
|
||||
patron = '<h2>.<a href="(.*?)".*?src="(.*?)".*?class="trdublaj">(.*?)<div class="ml-item-hiden".*?class="h4">(.*?)<.*?label">(.*?)</span'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedimg, scrapedqualang, scrapedtitle, scrapedyear in matches:
|
||||
|
||||
if 'sub ita' in scrapedqualang.lower():
|
||||
scrapedlang = 'Sub-Ita'
|
||||
else:
|
||||
scrapedlang = 'ITA'
|
||||
itemlist.append(Item(
|
||||
channel=item.channel,
|
||||
action="findvideos",
|
||||
contentTitle=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
infoLabels={'year': scrapedyear},
|
||||
contenType="movie",
|
||||
thumbnail=host+scrapedimg,
|
||||
title= "%s [%s]" % (scrapedtitle, scrapedlang),
|
||||
language=scrapedlang
|
||||
))
|
||||
|
||||
# poichè il sito ha l'anno del film con TMDB la ricerca titolo-anno è esatta quindi inutile fare lo scrap delle locandine
|
||||
# e della trama dal sito che a volte toppano
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
support.nextPage(itemlist,item,data,'<span>[^<]+</span>[^<]+<a href="(.*?)">')
|
||||
|
||||
return itemlist
|
||||
|
||||
# =========== def pagina categorie ======================================
|
||||
|
||||
def categorie(item):
|
||||
logger.info("%s mainlist categorie log: %s" % (__channel__, item))
|
||||
itemlist = []
|
||||
# scarico la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# da qui fare le opportuni modifiche
|
||||
if item.args == 'genres':
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="kategori_list">(.*?)</ul>')
|
||||
patron = '<li><a href="/(.*?)">(.*?)</a>'
|
||||
elif item.args[1] == 'years':
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="anno_list">(.*?)</ul>')
|
||||
patron = '<li><a href="/(.*?)">(.*?)</a>'
|
||||
elif item.args[1] == 'orderalf':
|
||||
bloque = scrapertools.find_single_match(data, '<div class="movies-letter">(.*)<div class="clearfix">')
|
||||
patron = '<a title=.*?href="(.*?)"><span>(.*?)</span>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
|
||||
for scrapurl, scraptitle in sorted(matches):
|
||||
|
||||
if "01" in scraptitle:
|
||||
continue
|
||||
else:
|
||||
scrapurl = host+scrapurl
|
||||
|
||||
if item.args[1] != 'orderalf': action = "peliculas"
|
||||
else: action = 'orderalf'
|
||||
itemlist.append(Item(
|
||||
channel=item.channel,
|
||||
action= action,
|
||||
title = scraptitle,
|
||||
url= scrapurl,
|
||||
thumbnail = get_thumb(scraptitle, auto = True),
|
||||
extra = item.extra,
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
# =========== def pagina lista alfabetica ===============================
|
||||
|
||||
def orderalf(item):
|
||||
logger.info("%s mainlist orderalf log: %s" % (__channel__, item))
|
||||
itemlist = []
|
||||
# scarico la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# da qui fare le opportuni modifiche
|
||||
patron = '<td class="mlnh-thumb"><a href="(.*?)".title="(.*?)".*?src="(.*?)".*?mlnh-3">(.*?)<.*?"mlnh-5">.<(.*?)<td' #scrapertools.find_single_match(data, '<td class="mlnh-thumb"><a href="(.*?)".title="(.*?)".*?src="(.*?)".*?mlnh-3">(.*?)<.*?"mlnh-5">.<(.*?)<td')
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedimg, scrapedyear, scrapedqualang in matches:
|
||||
|
||||
if 'sub ita' in scrapedqualang.lower():
|
||||
scrapedlang = 'Sub-ita'
|
||||
else:
|
||||
scrapedlang = 'ITA'
|
||||
itemlist.append(Item(
|
||||
channel=item.channel,
|
||||
action="findvideos_film",
|
||||
contentTitle=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
infoLabels={'year': scrapedyear},
|
||||
contenType="movie",
|
||||
thumbnail=host+scrapedimg,
|
||||
title = "%s [%s]" % (scrapedtitle, scrapedlang),
|
||||
language=scrapedlang,
|
||||
context="buscar_trailer"
|
||||
))
|
||||
|
||||
# se il sito permette l'estrazione dell'anno del film aggiungere la riga seguente
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
support.nextPage(itemlist,item,data,'<span>[^<]+</span>[^<]+<a href="(.*?)">')
|
||||
|
||||
return itemlist
|
||||
|
||||
# =========== def pagina del film con i server per verderlo =============
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("%s mainlist findvideos_film log: %s" % (__channel__, item))
|
||||
itemlist = []
|
||||
|
||||
# scarico la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# da qui fare le opportuni modifiche
|
||||
patron = '<a href="#" data-link="(.*?)">'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl in matches:
|
||||
logger.info("altadefinizione01_club scrapedurl log: %s" % scrapedurl)
|
||||
try:
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
logger.info("Videoitemlist2: %s" % videoitem)
|
||||
videoitem.title = "%s [%s]" % (item.contentTitle, videoitem.title)
|
||||
videoitem.show = item.show
|
||||
videoitem.contentTitle = item.contentTitle
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.channel = item.channel
|
||||
videoitem.year = item.infoLabels['year']
|
||||
videoitem.infoLabels['plot'] = item.infoLabels['plot']
|
||||
except AttributeError:
|
||||
logger.error("data doesn't contain expected URL")
|
||||
|
||||
# Controlla se i link sono validi
|
||||
if checklinks:
|
||||
itemlist = servertools.check_list_links(itemlist, checklinks_number)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
# Aggiunge alla videoteca
|
||||
if item.extra != 'findvideos' and item.extra != "library" and config.get_videolibrary_support() and len(itemlist) != 0 :
|
||||
support.videolibrary(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# =========== def per cercare film/serietv =============
|
||||
#http://altadefinizione01.link/index.php?do=search&story=avatar&subaction=search
|
||||
def search(item, text):
|
||||
logger.info("%s mainlist search log: %s %s" % (__channel__, item, text))
|
||||
itemlist = []
|
||||
text = text.replace(" ", "+")
|
||||
item.url = host + "/index.php?do=search&story=%s&subaction=search" % (text)
|
||||
#item.extra = "search"
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Cattura la eccezione così non interrompe la ricerca globle se il canale si rompe!
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s Sono qua: %s" % (__channel__, line))
|
||||
return []
|
||||
|
||||
# =========== def per le novità nel menu principale =============
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("%s mainlist newest log: %s %s %s" % (__channel__, categoria))
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -1,104 +0,0 @@
|
||||
{
|
||||
"id": "altadefinizione01_link",
|
||||
"name": "Altadefinizione01 L",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"fanart": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
|
||||
"thumbnail": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
|
||||
"banner": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
|
||||
"fix" : "reimpostato url e modificato file per KOD",
|
||||
"change_date": "2019-30-04",
|
||||
"categories": [
|
||||
"movie",
|
||||
"vosi"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "https://altadefinizione01.estate/",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Cerca informazioni extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"ITA",
|
||||
"vosi"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "profilo dei colori",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Altadefinizione01L Film - Serie -*-
|
||||
# -*- By Greko -*-
|
||||
|
||||
import channelselector
|
||||
from specials import autoplay
|
||||
from core import servertools, support, jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
__channel__ = "altadefinizione01_link"
|
||||
|
||||
# ======== def per utility INIZIO ============================
|
||||
|
||||
list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
|
||||
list_quality = ['default']
|
||||
|
||||
host = config.get_setting("channel_host", __channel__)
|
||||
|
||||
headers = [['Referer', host]]
|
||||
# =========== home menu ===================
|
||||
|
||||
def mainlist(item):
|
||||
"""
|
||||
Creo il menu principale del canale
|
||||
:param item:
|
||||
:return: itemlist []
|
||||
"""
|
||||
support.log()
|
||||
itemlist = []
|
||||
|
||||
# Menu Principale
|
||||
support.menu(itemlist, 'Novità bold', 'peliculas', host)
|
||||
support.menu(itemlist, 'Film per Genere', 'genres', host, args='genres')
|
||||
support.menu(itemlist, 'Film per Anno submenu', 'genres', host, args='years')
|
||||
support.menu(itemlist, 'Film per Qualità submenu', 'genres', host, args='quality')
|
||||
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host + '/film-del-cinema')
|
||||
support.menu(itemlist, 'Popolari bold', 'peliculas', host + '/piu-visti.html')
|
||||
support.menu(itemlist, 'Mi sento fortunato bold', 'genres', host, args='lucky')
|
||||
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host + '/film-sub-ita/')
|
||||
support.menu(itemlist, 'Cerca film submenu', 'search', host)
|
||||
|
||||
# per autoplay
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
support.channel_config(item, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
# ======== def in ordine di action dal menu ===========================
|
||||
|
||||
def peliculas(item):
|
||||
support.log
|
||||
itemlist = []
|
||||
|
||||
patron = r'class="innerImage">.*?href="([^"]+)".*?src="([^"]+)"'\
|
||||
'.*?class="ml-item-title">([^<]+)</.*?class="ml-item-label"> (\d{4}) <'\
|
||||
'.*?class="ml-item-label">.*?class="ml-item-label ml-item-label-.+?"> '\
|
||||
'(.+?) </div>.*?class="ml-item-label"> (.+?) </'
|
||||
listGroups = ['url', 'thumb', 'title', 'year', 'quality', 'lang']
|
||||
|
||||
patronNext = '<span>\d</span> <a href="([^"]+)">'
|
||||
|
||||
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
|
||||
headers= headers, patronNext=patronNext,
|
||||
action='findvideos')
|
||||
|
||||
return itemlist
|
||||
|
||||
# =========== def pagina categorie ======================================
|
||||
|
||||
def genres(item):
|
||||
support.log
|
||||
itemlist = []
|
||||
#data = httptools.downloadpage(item.url, headers=headers).data
|
||||
action = 'peliculas'
|
||||
if item.args == 'genres':
|
||||
bloque = r'<ul class="listSubCat" id="Film">(.*?)</ul>'
|
||||
elif item.args == 'years':
|
||||
bloque = r'<ul class="listSubCat" id="Anno">(.*?)</ul>'
|
||||
elif item.args == 'quality':
|
||||
bloque = r'<ul class="listSubCat" id="Qualita">(.*?)</ul>'
|
||||
elif item.args == 'lucky': # sono i titoli random nella pagina, cambiano 1 volta al dì
|
||||
bloque = r'FILM RANDOM.*?class="listSubCat">(.*?)</ul>'
|
||||
action = 'findvideos'
|
||||
|
||||
patron = r'<li><a href="([^"]+)">(.*?)<'
|
||||
|
||||
listGroups = ['url','title']
|
||||
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
|
||||
headers= headers, patron_block = bloque,
|
||||
action=action)
|
||||
|
||||
return itemlist
|
||||
|
||||
# =========== def per cercare film/serietv =============
|
||||
#host+/index.php?do=search&story=avatar&subaction=search
|
||||
def search(item, text):
|
||||
support.log()
|
||||
itemlist = []
|
||||
text = text.replace(" ", "+")
|
||||
item.url = host+"/index.php?do=search&story=%s&subaction=search" % (text)
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.info("%s mainlist search log: %s" % (__channel__, line))
|
||||
return []
|
||||
|
||||
# =========== def per le novità nel menu principale =============
|
||||
|
||||
def newest(categoria):
|
||||
support.log(categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "peliculas":
|
||||
item.url = host
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
support.log()
|
||||
|
||||
itemlist = support.server(item, headers=headers)
|
||||
|
||||
# Requerido para FilterTools
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
81
channels/altadefinizioneclick.json
Normal file → Executable file
81
channels/altadefinizioneclick.json
Normal file → Executable file
@@ -1,78 +1,11 @@
|
||||
{
|
||||
"id": "altadefinizioneclick",
|
||||
"name": "AltadefinizioneClick",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneclick.png",
|
||||
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneciclk.png",
|
||||
"categories": ["movie","vosi"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "https://altadefinizione.cloud",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
"active": false,
|
||||
"language": ["ita","sub-ita"],
|
||||
"thumbnail": "altadefinizioneclick.png",
|
||||
"bannermenu": "altadefinizioneciclk.png",
|
||||
"categories": ["tvshow","movie","vos"],
|
||||
"not_active":["include_in_newest_series"],
|
||||
"settings": []
|
||||
}
|
||||
|
||||
195
channels/altadefinizioneclick.py
Normal file → Executable file
195
channels/altadefinizioneclick.py
Normal file → Executable file
@@ -1,112 +1,151 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per altadefinizioneclick
|
||||
# Canale per Altadefinizione Click
|
||||
# ----------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import support
|
||||
from platformcode import config, logger
|
||||
|
||||
from core import servertools, support
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
def findhost(url):
|
||||
return support.match(url, patron=r'<div class="elementor-button-wrapper">\s*<a href="([^"]+)"').match
|
||||
|
||||
#host = config.get_setting("channel_host", 'altadefinizioneclick')
|
||||
__channel__ = 'altadefinizioneclick'
|
||||
host = config.get_channel_url(__channel__)
|
||||
host = config.get_channel_url(findhost)
|
||||
if host.endswith('/'):
|
||||
host = host[:-1]
|
||||
headers = {'Referer': host, 'x-requested-with': 'XMLHttpRequest'}
|
||||
order = ['', 'i_piu_visti', 'i_piu_votati', 'i_piu_votati_dellultimo_mese', 'titolo_az', 'voto_imdb_piu_alto'][config.get_setting('order', 'altadefinizionecommunity')]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['verystream', 'openload', 'streamango', "vidoza", "thevideo", "okru", 'youtube']
|
||||
list_quality = ['1080p']
|
||||
|
||||
checklinks = config.get_setting('checklinks', 'altadefinizioneclick')
|
||||
checklinks_number = config.get_setting('checklinks_number', 'altadefinizioneclick')
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
support.log()
|
||||
itemlist = []
|
||||
logger.debug(item)
|
||||
|
||||
support.menu(itemlist, 'Film', 'peliculas', host + "/nuove-uscite/")
|
||||
support.menu(itemlist, 'Per Genere submenu', 'menu', host, args='Film')
|
||||
support.menu(itemlist, 'Per Anno submenu', 'menu', host, args='Anno')
|
||||
support.menu(itemlist, 'Sub-ITA', 'peliculas', host + "/sub-ita/")
|
||||
support.menu(itemlist, 'Cerca...', 'search', host, 'movie')
|
||||
support.aplay(item, itemlist,list_servers, list_quality)
|
||||
support.channel_config(item, itemlist)
|
||||
film = ['/type/movie',
|
||||
('Generi', ['/type/movie', 'genres', 'genres']),
|
||||
('Anni', ['/type/movie', 'genres', 'year']),]
|
||||
|
||||
return itemlist
|
||||
tvshow = ['/serie-tv/tvshow',
|
||||
('Generi', ['/serie-tv/tvshow', 'genres', 'genres']),
|
||||
('Anni', ['/serie-tv/tvshow', 'genres', 'year'])]
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
support.log("search ", texto)
|
||||
|
||||
item.extra = 'search'
|
||||
item.url = host + "/?s=" + texto
|
||||
logger.debug("search ", texto)
|
||||
|
||||
item.args = 'search'
|
||||
item.url = host + "/search?s={}&f={}&page=1".format(texto, item.contentType)
|
||||
try:
|
||||
return peliculas(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
support.logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
support.log(categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "peliculas":
|
||||
item.url = host + "/nuove-uscite/"
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
@support.scrape
|
||||
def genres(item):
|
||||
logger.debug(item)
|
||||
data = support.httptools.downloadpage(item.url, cloudscraper=True).data
|
||||
blacklist= ['Film', 'Serie TV']
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
if item.args == 'genres':
|
||||
categories ={}
|
||||
res = support.match(host + '/cerca', patron=r'for="cat-(\d+)[^>]+>([^<]+)').matches
|
||||
for _id, name in res:
|
||||
categories[name] = _id
|
||||
|
||||
return itemlist
|
||||
patronBlock = r'{}<span></span>(?P<block>.*?)</ul>\s*</li'.format('Film' if item.contentType == 'movie' else 'Serie TV')
|
||||
patronMenu = r'<a href="[^"]+">(?P<title>[^<]+)'
|
||||
|
||||
def itemHook(it):
|
||||
it.cat_id = categories[it.fulltitle]
|
||||
return it
|
||||
|
||||
if item.args == 'year':
|
||||
patron = r'value="(?P<year_id>[^"]+)"[^>]*>(?P<title>\d+)'
|
||||
patronBlock = r'Anno</option>(?P<block>.*?</select>)'
|
||||
|
||||
elif item.args == 'quality':
|
||||
patronMenu = r'quality/(?P<quality_id>[^"]+)">(?P<title>[^<]+)'
|
||||
patronBlock = r'Risoluzione(?P<block>.*?)</ul>'
|
||||
action = 'peliculas'
|
||||
return locals()
|
||||
|
||||
|
||||
def menu(item):
|
||||
support.log()
|
||||
itemlist = support.scrape(item, '<li><a href="([^"]+)">([^<]+)</a></li>', ['url', 'title'], headers, patron_block='<ul class="listSubCat" id="'+ str(item.args) + '">(.*?)</ul>', action='peliculas')
|
||||
return support.thumb(itemlist)
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
support.log()
|
||||
if item.extra == 'search':
|
||||
patron = r'<a href="([^"]+)">\s*<div class="wrapperImage">(?:<span class="hd">([^<]+)<\/span>)?<img[^s]+src="([^"]+)"[^>]+>[^>]+>[^>]+>([^<]+)<[^<]+>(?:.*?IMDB:\s([^<]+)<\/div>)?'
|
||||
elements = ['url', 'quality', 'thumb', 'title', 'rating']
|
||||
|
||||
item.quality = 'HD'
|
||||
json = {}
|
||||
params ={'type':item.contentType, 'anno':item.year_id, 'quality':item.quality_id, 'cat':item.cat_id, 'order':order}
|
||||
|
||||
|
||||
if item.contentType == 'movie':
|
||||
action = 'findvideos'
|
||||
else:
|
||||
patron = r'<img width[^s]+src="([^"]+)[^>]+><\/a>.*?<a href="([^"]+)">([^(?:\]|<)]+)(?:\[([^\]]+)\])?<\/a>[^>]+>[^>]+>[^>]+>(?:\sIMDB\:\s([^<]+)<)?(?:.*?<span class="hd">([^<]+)<\/span>)?\s*<a'
|
||||
elements =['thumb', 'url', 'title','lang', 'rating', 'quality']
|
||||
itemlist = support.scrape(item, patron, elements, headers, patronNext='<a class="next page-numbers" href="([^"]+)">')
|
||||
return itemlist
|
||||
action = 'episodios'
|
||||
if not item.page: item.page = 1
|
||||
try:
|
||||
# support.dbg()
|
||||
if item.args in ['search']:
|
||||
page = support.httptools.downloadpage(item.url, headers=headers)
|
||||
if page.json:
|
||||
data = "\n".join(page.json['data'])
|
||||
else:
|
||||
data = page.data
|
||||
else:
|
||||
params['page'] = item.page
|
||||
|
||||
url = '{}/load-more-film?{}'.format(host, support.urlencode(params))
|
||||
json = support.httptools.downloadpage(url, headers=headers).json
|
||||
data = "\n".join(json['data'])
|
||||
except:
|
||||
data = ' '
|
||||
|
||||
patron = r'wrapFilm">\s*<a href="(?P<url>[^"]+)">[^>]+>(?P<year>\d+)(?:[^>]+>){2}(?P<rating>[^<]+)(?:[^>]+>){4}\s*<img src="(?P<thumb>[^"]+)(?:[^>]+>){3}(?P<title>[^<[]+)(?:\[(?P<lang>[sSuUbBiItTaA-]+))?'
|
||||
# patron = r'wrapFilm">\s*<a href="(?P<url>[^"]+)">[^>]+>(?P<year>\d+)(?:[^>]+>){2}(?P<rating>[^<]+)(?:[^>]+>){2}(?P<quality>[^<]+)(?:[^>]+>){2}\s*<img src="(?P<thumb>[^"]+)(?:[^>]+>){3}(?P<title>[^<[]+)(?:\[(?P<lang>[sSuUbBiItTaA-]+))?'
|
||||
|
||||
# paginazione
|
||||
if json.get('have_next') or 'have_next_film=true' in data:
|
||||
def fullItemlistHook(itemlist):
|
||||
cat_id = support.match(data, patron=r''''cat':"(\d+)"''').match
|
||||
if cat_id: item.cat_id = cat_id
|
||||
item.page += 1
|
||||
support.nextPage(itemlist, item, function_or_level='peliculas')
|
||||
return itemlist
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
logger.debug(item)
|
||||
# debug = True
|
||||
data = item.data
|
||||
patron = r'class="playtvshow "\s+data-href="(?P<url>[^"]+)'
|
||||
|
||||
def itemHook(it):
|
||||
spl = it.url.split('/')[-2:]
|
||||
it.infoLabels['season'] = int(spl[0])+1
|
||||
it.infoLabels['episode'] = int(spl[1])+1
|
||||
it.url = it.url.replace('/watch-unsubscribed', '/watch-external')
|
||||
it.title = '{}x{:02d} - {}'.format(it.contentSeason, it.contentEpisodeNumber, it.fulltitle)
|
||||
return it
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
support.log()
|
||||
itemlist = []
|
||||
playWindow = support.match(item, patron='(?:playWindow|iframe)" (?:href|src)="([^"]+)').match
|
||||
if host in playWindow:
|
||||
url = support.match(playWindow, patron='allowfullscreen[^<]+src="([^"]+)"').match
|
||||
else:
|
||||
url = playWindow
|
||||
itemlist.append(item.clone(action='play', url=url, quality=''))
|
||||
|
||||
itemlist = support.hdpass_get_servers(item)
|
||||
|
||||
if checklinks:
|
||||
itemlist = servertools.check_list_links(itemlist, checklinks_number)
|
||||
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
support.videolibrary(itemlist, item ,'color kod bold')
|
||||
|
||||
return itemlist
|
||||
return support.server(item, itemlist=itemlist)
|
||||
|
||||
37
channels/altadefinizionecommunity.json
Executable file
37
channels/altadefinizionecommunity.json
Executable file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"id": "altadefinizionecommunity",
|
||||
"name": "Altadefinizione Community",
|
||||
"language": ["ita", "sub-ita"],
|
||||
"active": false,
|
||||
"thumbnail": "altadefinizionecommunity.png",
|
||||
"banner": "",
|
||||
"categories": ["movie", "tvshow", "vos"],
|
||||
"not_active": ["include_in_newest"],
|
||||
"settings": [
|
||||
{
|
||||
"default": "",
|
||||
"enabled": true,
|
||||
"id": "username",
|
||||
"label": "username",
|
||||
"type": "text",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": "",
|
||||
"enabled": true,
|
||||
"id": "password",
|
||||
"label": "password",
|
||||
"type": "text",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "order",
|
||||
"type": "list",
|
||||
"label": "Ordine di Visualizzazione",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [ "Nessuno", "I più visti", "I più votati", "I più votati dell'ultimo mese", "Titolo A-Z", "Voto IMDB più alto"]
|
||||
}
|
||||
]
|
||||
}
|
||||
273
channels/altadefinizionecommunity.py
Executable file
273
channels/altadefinizionecommunity.py
Executable file
@@ -0,0 +1,273 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per Altadefinizione Community
|
||||
|
||||
from core import support
|
||||
from lib.fakeMail import Gmailnator
|
||||
from platformcode import config, platformtools, logger
|
||||
from core import scrapertools, httptools
|
||||
|
||||
|
||||
def findhost(url):
|
||||
return support.match(url, patron=r'<a href="([^"]+)/\w+">Accedi').match
|
||||
|
||||
|
||||
host = config.get_channel_url(findhost)
|
||||
register_url = 'https://altaregistrazione.net'
|
||||
|
||||
if 'altadefinizionecommunity' not in host:
|
||||
config.get_channel_url(findhost, forceFindhost=True)
|
||||
|
||||
if host.endswith('/'):
|
||||
host = host[:-1]
|
||||
|
||||
headers = {'Referer': host}
|
||||
order = ['', 'i_piu_visti', 'i_piu_votati', 'i_piu_votati_dellultimo_mese', 'titolo_az', 'voto_imdb_piu_alto'][config.get_setting('order', 'altadefinizionecommunity')]
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
logger.debug(item)
|
||||
|
||||
film = ['/type/movie',
|
||||
('Generi', ['/type/movie', 'genres', 'genres']),
|
||||
('Anni', ['/type/movie', 'genres', 'year']),]
|
||||
|
||||
tvshow = ['/serie-tv/tvshow',
|
||||
('Generi', ['/serie-tv/tvshow', 'genres', 'genres']),
|
||||
('Anni', ['/serie-tv/tvshow', 'genres', 'year'])]
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def search(item, text):
|
||||
logger.debug("search ", text)
|
||||
# per evitare fastidi da ricerca globale
|
||||
if not item.globalsearch:
|
||||
registerOrLogin()
|
||||
|
||||
item.args = 'search'
|
||||
item.url = host + "/search?s={}&f={}".format(text.replace(' ', '+'), item.contentType)
|
||||
try:
|
||||
return peliculas(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
@support.scrape
|
||||
def genres(item):
|
||||
registerOrLogin()
|
||||
logger.debug(item)
|
||||
data = support.httptools.downloadpage(item.url).data
|
||||
blacklist= ['Film', 'Serie TV']
|
||||
|
||||
if item.args == 'genres':
|
||||
categories ={}
|
||||
res = support.match(host + '/cerca', patron=r'for="cat-(\d+)[^>]+>([^<]+)').matches
|
||||
for _id, name in res:
|
||||
categories[name] = _id
|
||||
|
||||
patronBlock = r'{}<span></span>(?P<block>.*?)</ul>\s*</li'.format('Film' if item.contentType == 'movie' else 'Serie TV')
|
||||
patronMenu = r'<a href="[^"]+">(?P<title>[^<]+)'
|
||||
|
||||
def itemHook(it):
|
||||
it.cat_id = categories[it.fulltitle]
|
||||
return it
|
||||
|
||||
if item.args == 'year':
|
||||
patronMenu = r'value="(?P<year_id>[^"]+)"[^>]*>(?P<title>\d+)'
|
||||
patronBlock = r'Anno</option>(?P<block>.*?</select>)'
|
||||
|
||||
elif item.args == 'quality':
|
||||
patronMenu = r'quality/(?P<quality_id>[^"]+)">(?P<title>[^<]+)'
|
||||
patronBlock = r'Risoluzione(?P<block>.*?)</ul>'
|
||||
|
||||
action = 'peliculas'
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
item.quality = 'HD'
|
||||
json = {}
|
||||
if not item.page: item.page = 1
|
||||
params ={'type':item.contentType, 'anno':item.year_id, 'quality':item.quality_id, 'cat':item.cat_id, 'order':order, 'page':item.page}
|
||||
# debug = True
|
||||
|
||||
action = 'findvideos' if item.contentType == 'movie' else 'episodios'
|
||||
|
||||
try:
|
||||
# support.dbg()
|
||||
if item.args in ['search']:
|
||||
page = support.httptools.downloadpage(item.url, headers=headers)
|
||||
if page.json:
|
||||
data = "\n".join(page.json['data'])
|
||||
else:
|
||||
data = page.data
|
||||
else:
|
||||
params['page'] = item.page
|
||||
|
||||
url = '{}/load-more-film?{}'.format(host, support.urlencode(params))
|
||||
json = support.httptools.downloadpage(url, headers=headers).json
|
||||
data = "\n".join(json['data'])
|
||||
except:
|
||||
data = ' '
|
||||
|
||||
patron = r'wrapFilm"[^>]*>\s*<a href="(?P<url>[^"]+)">[^>]+>(?P<year>\d+)(?:[^>]+>){2}(?P<rating>[^<]+)(?:[^>]+>){4}\s*<img src="(?P<thumb>[^"]+)(?:[^>]+>){2,6}\s+<h3>(?P<title>[^<[]+)(?:\[(?P<lang>[sSuUbBiItTaA -]+))?'
|
||||
# patron = r'wrapFilm">\s*<a href="(?P<url>[^"]+)">[^>]+>(?P<year>\d+)(?:[^>]+>){2}(?P<rating>[^<]+)(?:[^>]+>){4}\s*<img src="(?P<thumb>[^"]+)(?:[^>]+>){3}(?P<title>[^<[]+)(?:\[(?P<lang>[sSuUbBiItTaA-]+))?'
|
||||
|
||||
def itemHook(item):
|
||||
item.quality = item.quality.replace('2K', 'HD').replace('4K', 'HD')
|
||||
item.title = item.title.replace('2K', 'HD').replace('4K', 'HD')
|
||||
return item
|
||||
|
||||
# paginazione
|
||||
if json.get('have_next') or support.match(data, patron=r'have_next_film\s*=\s*true').match:
|
||||
def fullItemlistHook(itemlist):
|
||||
cat_id = support.match(data, patron=r''''cat':"(\d+)"''').match
|
||||
if cat_id: item.cat_id = cat_id
|
||||
item.page += 1
|
||||
support.nextPage(itemlist, item, function_or_level='peliculas')
|
||||
return itemlist
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
registerOrLogin()
|
||||
logger.debug(item)
|
||||
# debug = True
|
||||
data = item.data
|
||||
patron = r'class="playtvshow "\s+data-href="(?P<url>[^"]+)'
|
||||
|
||||
def itemHook(it):
|
||||
spl = it.url.split('/')[-2:]
|
||||
it.infoLabels['season'] = int(spl[0])+1
|
||||
it.infoLabels['episode'] = int(spl[1])+1
|
||||
it.url = it.url.replace('/watch-unsubscribed', '/watch-external')
|
||||
it.title = '{}x{:02d} - {}'.format(it.contentSeason, it.contentEpisodeNumber, it.fulltitle)
|
||||
return it
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
itemlist = []
|
||||
resolve_url(item)
|
||||
|
||||
itemlist.append(item.clone(action='play', url=support.match(item.url, patron='allowfullscreen[^<]+src="([^"]+)"', cloudscraper=True).match, quality=''))
|
||||
|
||||
return support.server(item, itemlist=itemlist)
|
||||
|
||||
|
||||
def play(item):
|
||||
if host in item.url: # intercetto il server proprietario
|
||||
# if registerOrLogin():
|
||||
return support.get_jwplayer_mediaurl(support.httptools.downloadpage(item.url, cloudscraper=True).data, 'Diretto')
|
||||
# else:
|
||||
# platformtools.play_canceled = True
|
||||
# return []
|
||||
else:
|
||||
return [item]
|
||||
|
||||
|
||||
def resolve_url(item):
|
||||
registerOrLogin()
|
||||
if '/watch-unsubscribed' not in item.url and '/watch-external' not in item.url:
|
||||
playWindow = support.match(support.httptools.downloadpage(item.url, cloudscraper=True).data, patron='playWindow" href="([^"]+)')
|
||||
video_url = playWindow.match
|
||||
item.data = playWindow.data
|
||||
item.url = video_url.replace('/watch-unsubscribed', '/watch-external')
|
||||
return item
|
||||
|
||||
|
||||
def login():
|
||||
r = support.httptools.downloadpage(host, cloudscraper=True)
|
||||
Token = support.match(r.data, patron=r'name=\s*"_token"\s*value=\s*"([^"]+)', cloudscraper=True).match
|
||||
if 'id="logged"' in r.data:
|
||||
logger.info('Già loggato')
|
||||
else:
|
||||
logger.info('Login in corso')
|
||||
post = {'_token': '',
|
||||
'form_action':'login',
|
||||
'email': config.get_setting('username', channel='altadefinizionecommunity'),
|
||||
'password':config.get_setting('password', channel='altadefinizionecommunity')}
|
||||
|
||||
r = support.httptools.downloadpage(host + '/login', post=post, headers={'referer': host}, cloudscraper=True)
|
||||
if r.code not in [200, 302] or 'Email o Password non validi' in r.data:
|
||||
platformtools.dialog_ok('AltadefinizioneCommunity', 'Username/password non validi')
|
||||
return False
|
||||
|
||||
return 'id="logged"' in r.data
|
||||
|
||||
|
||||
def registerOrLogin():
|
||||
if config.get_setting('username', channel='altadefinizionecommunity') and config.get_setting('password', channel='altadefinizionecommunity'):
|
||||
if login():
|
||||
return True
|
||||
|
||||
action = platformtools.dialog_yesno('AltadefinizioneCommunity',
|
||||
'Questo server necessita di un account, ne hai già uno oppure vuoi tentare una registrazione automatica?',
|
||||
yeslabel='Accedi', nolabel='Tenta registrazione', customlabel='Annulla')
|
||||
if action == 1: # accedi
|
||||
from specials import setting
|
||||
from core.item import Item
|
||||
user_pre = config.get_setting('username', channel='altadefinizionecommunity')
|
||||
password_pre = config.get_setting('password', channel='altadefinizionecommunity')
|
||||
setting.channel_config(Item(config='altadefinizionecommunity'))
|
||||
user_post = config.get_setting('username', channel='altadefinizionecommunity')
|
||||
password_post = config.get_setting('password', channel='altadefinizionecommunity')
|
||||
|
||||
if user_pre != user_post or password_pre != password_post:
|
||||
return registerOrLogin()
|
||||
else:
|
||||
return []
|
||||
elif action == 0: # tenta registrazione
|
||||
import random
|
||||
import string
|
||||
logger.debug('Registrazione automatica in corso')
|
||||
mailbox = Gmailnator()
|
||||
randPsw = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(10))
|
||||
logger.debug('email: ' + mailbox.address)
|
||||
logger.debug('pass: ' + randPsw)
|
||||
reg = platformtools.dialog_register(register_url, email=True, password=True, email_default=mailbox.address, password_default=randPsw)
|
||||
if not reg:
|
||||
return False
|
||||
regPost = httptools.downloadpage(register_url, post={'email': reg['email'], 'password': reg['password']}, cloudscraper=True)
|
||||
|
||||
if regPost.url == register_url:
|
||||
error = scrapertools.htmlclean(scrapertools.find_single_match(regPost.data, 'Impossibile proseguire.*?</div>'))
|
||||
error = scrapertools.unescape(scrapertools.re.sub('\n\s+', ' ', error))
|
||||
platformtools.dialog_ok('AltadefinizioneCommunity', error)
|
||||
return False
|
||||
if reg['email'] == mailbox.address:
|
||||
if "L'indirizzo email risulta già registrato" in regPost.data:
|
||||
# httptools.downloadpage(baseUrl + '/forgotPassword', post={'email': reg['email']})
|
||||
platformtools.dialog_ok('AltadefinizioneCommunity', 'Indirizzo mail già utilizzato')
|
||||
return False
|
||||
mail = mailbox.waitForMail()
|
||||
if mail:
|
||||
checkUrl = scrapertools.find_single_match(mail.body, '<a href="([^"]+)[^>]+>Verifica').replace(r'\/', '/')
|
||||
logger.debug('CheckURL: ' + checkUrl)
|
||||
httptools.downloadpage(checkUrl, cloudscraper=True)
|
||||
config.set_setting('username', mailbox.address, channel='altadefinizionecommunity')
|
||||
config.set_setting('password', randPsw, channel='altadefinizionecommunity')
|
||||
platformtools.dialog_ok('AltadefinizioneCommunity',
|
||||
'Registrato automaticamente con queste credenziali:\nemail:' + mailbox.address + '\npass: ' + randPsw)
|
||||
else:
|
||||
platformtools.dialog_ok('AltadefinizioneCommunity', 'Impossibile registrarsi automaticamente')
|
||||
return False
|
||||
else:
|
||||
platformtools.dialog_ok('AltadefinizioneCommunity', 'Hai modificato la mail quindi KoD non sarà in grado di effettuare la verifica in autonomia, apri la casella ' + reg['email']
|
||||
+ ' e clicca sul link. Premi ok quando fatto')
|
||||
logger.debug('Registrazione completata')
|
||||
else:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -1,70 +0,0 @@
|
||||
{
|
||||
"id": "altadefinizionehd",
|
||||
"name": "AltadefinizioneHD",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "https://altadefinizione.doctor/wp-content/uploads/2019/02/logo.png",
|
||||
"bannermenu": "https://altadefinizione.doctor/wp-content/uploads/2019/02/logo.png",
|
||||
"categories": ["tvshow","movie"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi in Ricerca Globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,264 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per Altadefinizione HD
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
|
||||
from channelselector import thumb
|
||||
from core import httptools, scrapertools, servertools, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
|
||||
__channel__ = 'altadefinizionehd'
|
||||
host = config.get_channel_url(__channel__)
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['openload']
|
||||
list_quality = ['default']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[altadefinizionehd.py] mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="video",
|
||||
title="[B]Film[/B]",
|
||||
url=host + '/movies/',
|
||||
thumbnail=NovitaThumbnail,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="menu",
|
||||
title="[B] > Film per Genere[/B]",
|
||||
url=host,
|
||||
extra='GENERE',
|
||||
thumbnail=NovitaThumbnail,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="menu",
|
||||
title="[B] > Film per Anno[/B]",
|
||||
url=host,
|
||||
extra='ANNO',
|
||||
thumbnail=NovitaThumbnail,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="video",
|
||||
title="Film Sub-Ita",
|
||||
url=host + "/genre/sub-ita/",
|
||||
thumbnail=NovitaThumbnail,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="video",
|
||||
title="Film Rip",
|
||||
url=host + "/genre/dvdrip-bdrip-brrip/",
|
||||
thumbnail=NovitaThumbnail,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="video",
|
||||
title="Film al Cinema",
|
||||
url=host + "/genre/cinema/",
|
||||
thumbnail=NovitaThumbnail,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
extra="movie",
|
||||
title="[COLOR blue]Cerca Film...[/COLOR]",
|
||||
thumbnail=CercaThumbnail,
|
||||
fanart=FilmFanart)]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
itemlist = thumb(itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu(item):
|
||||
logger.info("[altadefinizionehd.py] menu")
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.info("[altadefinizionehd.py] DATA"+data)
|
||||
patron = r'<li id="menu.*?><a href="#">FILM PER ' + item.extra + r'<\/a><ul class="sub-menu">(.*?)<\/ul>'
|
||||
logger.info("[altadefinizionehd.py] BLOCK"+patron)
|
||||
block = scrapertools.find_single_match(data, patron)
|
||||
logger.info("[altadefinizionehd.py] BLOCK"+block)
|
||||
patron = r'<li id=[^>]+><a href="(.*?)">(.*?)<\/a><\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(block)
|
||||
for url, title in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title=title,
|
||||
url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[altadefinizionehd.py] newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "peliculas":
|
||||
item.url = host
|
||||
item.action = "video"
|
||||
itemlist = video(item)
|
||||
|
||||
if itemlist[-1].action == "video":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def video(item):
|
||||
logger.info("[altadefinizionehd.py] video")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.info("[altadefinizionehd.py] Data" +data)
|
||||
if 'archive-content' in data:
|
||||
regex = r'<div id="archive-content".*?>(.*?)<div class="pagination'
|
||||
else:
|
||||
regex = r'<div class="items".*?>(.*?)<div class="pagination'
|
||||
block = scrapertools.find_single_match(data, regex)
|
||||
logger.info("[altadefinizionehd.py] Block" +block)
|
||||
|
||||
patron = r'<article .*?class="item movies">.*?<img src="([^"]+)".*?<span class="quality">(.*?)<\/span>.*?<a href="([^"]+)">.*?<h4>([^<]+)<\/h4>(.*?)<\/article>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(block)
|
||||
|
||||
for scrapedthumb, scrapedquality, scrapedurl, scrapedtitle, scrapedinfo in matches:
|
||||
title = scrapedtitle + " [" + scrapedquality + "]"
|
||||
|
||||
patron = r'IMDb: (.*?)<\/span> <span>(.*?)<\/span>.*?"texto">(.*?)<\/div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(scrapedinfo)
|
||||
logger.info("[altadefinizionehd.py] MATCHES" + str(matches))
|
||||
for rating, year, plot in matches:
|
||||
|
||||
infoLabels = {}
|
||||
infoLabels['Year'] = year
|
||||
infoLabels['Rating'] = rating
|
||||
infoLabels['Plot'] = plot
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=title,
|
||||
fulltitle=scrapedtitle,
|
||||
infoLabels=infoLabels,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumb))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
patron = '<a class='+ "'arrow_pag'" + ' href="([^"]+)"'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="video",
|
||||
title="[COLOR blue]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
thumbnail=thumb()))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[altadefinizionehd.py] init texto=[" + texto + "]")
|
||||
item.url = host + "/?s=" + texto
|
||||
return search_page(item)
|
||||
|
||||
def search_page(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<img src="([^"]+)".*?.*?<a href="([^"]+)">(.*?)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
patron = '<a class='+ "'arrow_pag'" + ' href="([^"]+)"'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="search_page",
|
||||
title="[COLOR blue]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
thumbnail=thumb()))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = r"<li id='player-.*?'.*?class='dooplay_player_option'\sdata-type='(.*?)'\sdata-post='(.*?)'\sdata-nume='(.*?)'>.*?'title'>(.*?)</"
|
||||
matches = re.compile(patron, re.IGNORECASE).findall(data)
|
||||
|
||||
itemlist = []
|
||||
|
||||
for scrapedtype, scrapedpost, scrapednume, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
fulltitle=item.title + " [" + scrapedtitle + "]",
|
||||
show=scrapedtitle,
|
||||
title=item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]",
|
||||
url=host + "/wp-admin/admin-ajax.php",
|
||||
post=scrapedpost,
|
||||
server=scrapedtitle,
|
||||
nume=scrapednume,
|
||||
type=scrapedtype,
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
def play(item):
|
||||
import urllib
|
||||
payload = urllib.urlencode({'action': 'doo_player_ajax', 'post': item.post, 'nume': item.nume, 'type': item.type})
|
||||
data = httptools.downloadpage(item.url, post=payload).data
|
||||
|
||||
patron = r"<iframe.*src='(([^']+))'\s"
|
||||
matches = re.compile(patron, re.IGNORECASE).findall(data)
|
||||
|
||||
url = matches[0][0]
|
||||
url = url.strip()
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
NovitaThumbnail = "https://superrepo.org/static/images/icons/original/xplugin.video.moviereleases.png.pagespeed.ic.j4bhi0Vp3d.png"
|
||||
GenereThumbnail = "https://farm8.staticflickr.com/7562/15516589868_13689936d0_o.png"
|
||||
FilmFanart = "https://superrepo.org/static/images/fanart/original/script.artwork.downloader.jpg"
|
||||
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
ListTxt = "[COLOR orange]Torna a video principale [/COLOR]"
|
||||
AvantiTxt = config.get_localized_string(30992)
|
||||
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
|
||||
thumbnail = "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"id": "analdin",
|
||||
"name": "analdin",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://www.analdin.com/images/logo-retina.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = 'https://www.analdin.com/es'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/más-reciente/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/más-visto/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/mejor-valorado/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorías/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data,'<strong class="popup-title">Canales</strong>(.*?)<strong>Models</strong>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a class="item" href="([^"]+)" title="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<div class="videos">([^"]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="popup-video-link" href="([^"]+)".*?'
|
||||
patron += 'thumb="([^"]+)".*?'
|
||||
patron += '<div class="duration">(.*?)</div>.*?'
|
||||
patron += '<strong class="title">\s*([^"]+)</strong>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtime,scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fanart=thumbnail, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'video_url: \'([^\']+)\''
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
21
channels/animealtadefinizione.json
Executable file
21
channels/animealtadefinizione.json
Executable file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"id": "animealtadefinizione",
|
||||
"name": "AnimealtAdefinizione",
|
||||
"active": false,
|
||||
"language": ["ita", "sub-ita"],
|
||||
"thumbnail": "animealtadefinizione.png",
|
||||
"banner": "animealtadefinizione.png",
|
||||
"categories": ["anime", "sub-ita"],
|
||||
"default_off": ["include_in_newest"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "perpage",
|
||||
"type": "list",
|
||||
"label": "Elementi per pagina",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["20","30","40","50","60","70","80","90","100"]
|
||||
}
|
||||
]
|
||||
}
|
||||
134
channels/animealtadefinizione.py
Executable file
134
channels/animealtadefinizione.py
Executable file
@@ -0,0 +1,134 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per animealtadefinizione
|
||||
# ----------------------------------------------------------
|
||||
|
||||
from core import support
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
perpage_list = ['20','30','40','50','60','70','80','90','100']
|
||||
perpage = perpage_list[support.config.get_setting('perpage' , 'animealtadefinizione')]
|
||||
epPatron = r'<td>\s*(?P<title>[^<]+)[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>\s*<img[^>]+/Streaming'
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
anime=['/anime/',
|
||||
('Tipo',['', 'menu', 'Anime']),
|
||||
('Anno',['', 'menu', 'Anno']),
|
||||
('Genere', ['', 'menu','Genere']),
|
||||
('Ultimi Episodi',['', 'peliculas', 'last'])]
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
action = 'peliculas'
|
||||
patronBlock= r'<a href="' + host + r'/category/' + item.args.lower() + r'/">' + item.args + r'</a>\s*<ul class="sub-menu">(?P<block>.*?)</ul>'
|
||||
patronMenu = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
|
||||
return locals()
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
support.info(texto)
|
||||
item.search = texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
support.info(categoria)
|
||||
item = support.Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = host
|
||||
item.args = "last"
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
if '/movie/' in item.url:
|
||||
item.contentType = 'movie'
|
||||
action='findvideos'
|
||||
elif item.args == 'last':
|
||||
item.contentType = 'episode'
|
||||
action='episodios'
|
||||
else:
|
||||
item.contentType = 'tvshow'
|
||||
action='episodios'
|
||||
if item.search:
|
||||
query = 's'
|
||||
searchtext = item.search
|
||||
else:
|
||||
query='category_name'
|
||||
searchtext = item.url.split('/')[-2]
|
||||
if not item.pag: item.pag = 1
|
||||
# debug = True
|
||||
anime = True
|
||||
data = support.match(host + '/wp-admin/admin-ajax.php', post='action=itajax-sort&loop=main+loop&location=&thumbnail=1&rating=1sorter=recent&columns=4&numarticles='+perpage+'&paginated='+str(item.pag)+'¤tquery%5B'+query+'%5D='+searchtext).data.replace('\\','')
|
||||
patron = r'<a href="(?P<url>[^"]+)"><img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)" class="[^"]+" alt="" title="(?P<title>[^"]+?)\s+(?P<type>Movie)?\s*(?P<lang>Sub Ita|Ita)?\s*[sS]treaming'
|
||||
typeContentDict = {'movie':['movie']}
|
||||
typeActionDict = {'findvideos':['movie']}
|
||||
|
||||
def itemHook(item):
|
||||
item.url = support.re.sub('episodio-[0-9-]+', '', item.url)
|
||||
return item
|
||||
|
||||
def itemlistHook(itemlist):
|
||||
if item.search:
|
||||
itemlist = [ it for it in itemlist if ' Episodio ' not in it.title ]
|
||||
if len(itemlist) == int(perpage):
|
||||
item.pag += 1
|
||||
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), action='peliculas'))
|
||||
return itemlist
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
anime = True
|
||||
# debug = True
|
||||
pagination = int(perpage)
|
||||
patron = epPatron
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
itemlist = []
|
||||
if item.contentType == 'movie':
|
||||
matches = support.match(item, patron=epPatron).matches
|
||||
for title, url in matches:
|
||||
# support.dbg()
|
||||
get_video_list(item, url, title, itemlist)
|
||||
else:
|
||||
get_video_list(item, item.url, support.config.get_localized_string(30137), itemlist)
|
||||
return support.server(item, itemlist=itemlist)
|
||||
|
||||
|
||||
def get_video_list(item, url, title, itemlist):
|
||||
if 'vvvvid' in url:
|
||||
itemlist.append(item.clone(title='VVVVID', url=url, server='vvvvid', action='play'))
|
||||
else:
|
||||
from requests import get
|
||||
if not url.startswith('http'): url = host + url
|
||||
|
||||
url = support.match(get(url).url, string=True, patron=r'file=([^$]+)').match
|
||||
if 'http' not in url: url = 'http://' + url
|
||||
itemlist.append(item.clone(title=title, url=url, server='directo', action='play'))
|
||||
|
||||
return itemlist
|
||||
44
channels/animeforce.json
Normal file → Executable file
44
channels/animeforce.json
Normal file → Executable file
@@ -2,19 +2,18 @@
|
||||
"id": "animeforce",
|
||||
"name": "AnimeForce",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://www.animeforce.org/wp-content/uploads/2013/05/logo-animeforce.png",
|
||||
"banner": "http://www.animeforce.org/wp-content/uploads/2013/05/logo-animeforce.png",
|
||||
"active": false,
|
||||
"thumbnail": "animeforce.png",
|
||||
"banner": "animeforce.png",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"label": "Includi in Ricerca Globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
@@ -25,12 +24,37 @@
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "autorenumber",
|
||||
"type": "bool",
|
||||
"label": "@70712",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "autorenumber_mode",
|
||||
"type": "bool",
|
||||
"label": "@70688",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
557
channels/animeforce.py
Normal file → Executable file
557
channels/animeforce.py
Normal file → Executable file
@@ -1,505 +1,162 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale per http://animeinstreaming.net/
|
||||
# Canale per AnimeForce
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools, servertools, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from servers.decrypters import adfly
|
||||
|
||||
__channel__ = "animeforge"
|
||||
host = config.get_channel_url(__channel__)
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
from core import support
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
PERPAGE = 20
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
log("mainlist", "mainlist", item.channel)
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="[COLOR azure]Anime [/COLOR]- [COLOR lightsalmon]Lista Completa[/COLOR]",
|
||||
url=host + "/lista-anime/",
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="animeaggiornati",
|
||||
title="[COLOR azure]Anime Aggiornati[/COLOR]",
|
||||
url=host,
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="ultimiep",
|
||||
title="[COLOR azure]Ultimi Episodi[/COLOR]",
|
||||
url=host,
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca ...[/COLOR]",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
anime = ['/anime',
|
||||
('In Corso',['/anime/anime-status/in-corso/', 'peliculas', 'status']),
|
||||
('Completi',['/anime/anime-status/completo/', 'peliculas', 'status']),
|
||||
('Genere',['/anime', 'submenu', 'genre']),
|
||||
('Anno',['/anime', 'submenu', 'anime-year']),
|
||||
('Tipologia',['/anime', 'submenu', 'anime-type']),
|
||||
('Stagione',['/anime', 'submenu', 'anime-season']),
|
||||
('Ultime Serie',['/category/anime/articoli-principali/','peliculas','last'])
|
||||
]
|
||||
return locals()
|
||||
|
||||
|
||||
# =================================================================
|
||||
@support.scrape
|
||||
def submenu(item):
|
||||
action = 'peliculas'
|
||||
patronBlock = r'data-taxonomy="' + item.args + r'"(?P<block>.*?)</select'
|
||||
patronMenu = r'<option class="level-\d+ (?P<url>[^"]+)"[^>]+>(?P<t>[^(]+)[^\(]+\((?P<num>\d+)'
|
||||
def itemHook(item):
|
||||
if not item.url.startswith('http'):
|
||||
item.url = host + '/anime/' + item.args + '/' + item.url
|
||||
item.title = support.typo(item.t, 'bold')
|
||||
return item
|
||||
return locals()
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
log("newest", "newest" + categoria)
|
||||
support.info(categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item = support.Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.contentType = 'tvshow'
|
||||
item.url = host
|
||||
item.action = "ultimiep"
|
||||
itemlist = ultimiep(item)
|
||||
|
||||
if itemlist[-1].action == "ultimiep":
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
item.args = 'newest'
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
support.logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
log("search", "search", item.channel)
|
||||
item.url = host + "/?s=" + texto
|
||||
def search(item, text):
|
||||
support.info('search',text)
|
||||
item.search = text
|
||||
item.url = host + '/lista-anime/'
|
||||
item.contentType = 'tvshow'
|
||||
try:
|
||||
return search_anime(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
support.logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# =================================================================
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
search = item.search
|
||||
anime = True
|
||||
action = 'check'
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def search_anime(item):
|
||||
log("search_anime", "search_anime", item.channel)
|
||||
itemlist = []
|
||||
patron = r'<a href="(?P<url>[^"]+)"[^>]+>\s*<img src="(?P<thumb>[^"]+)" alt="(?P<title>.*?)(?: Sub| sub| SUB|")'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if search:
|
||||
patron = r'<a href="(?P<url>[^"]+)"\s*title="(?P<title>.*?)(?: Sub| sub| SUB|")'
|
||||
|
||||
patron = r'<a href="([^"]+)"><img.*?src="([^"]+)".*?title="([^"]+)".*?/>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if item.args == 'newest': item.action = 'findvideos'
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
if "Sub Ita Download & Streaming" in scrapedtitle or "Sub Ita Streaming":
|
||||
if 'episodio' in scrapedtitle.lower():
|
||||
itemlist.append(episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail))
|
||||
else:
|
||||
scrapedtitle, eptype = clean_title(scrapedtitle, simpleClean=True)
|
||||
cleantitle, eptype = clean_title(scrapedtitle)
|
||||
patronNext = '<li class="page-item disabled">(?:[^>]+>){4}<a class="page-link" href="([^"]+)'
|
||||
|
||||
scrapedurl, total_eps = create_url(scrapedurl, cleantitle)
|
||||
def itemHook(item):
|
||||
if 'sub-ita' in item.url:
|
||||
if item.args != 'newest': item.title = item.title + support.typo('Sub-ITA','_ [] color kod')
|
||||
item.contentLanguage = 'Sub-ITA'
|
||||
return item
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
text_color="azure",
|
||||
contentType="tvshow",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Next Page
|
||||
next_page = scrapertools.find_single_match(data, r'<link rel="next" href="([^"]+)"[^/]+/>')
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="search_anime",
|
||||
text_bold=True,
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
return locals()
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def animeaggiornati(item):
|
||||
log("animeaggiornati", "animeaggiornati", item.channel)
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
if 'Streaming' in scrapedtitle:
|
||||
cleantitle, eptype = clean_title(scrapedtitle)
|
||||
|
||||
# Creazione URL
|
||||
scrapedurl, total_eps = create_url(scrapedurl, scrapedtitle)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
text_color="azure",
|
||||
contentType="tvshow",
|
||||
title=cleantitle,
|
||||
url=scrapedurl,
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
def check(item):
|
||||
m = support.match(item, headers=headers, patron=r'Tipologia[^>]+>\s*<a href="([^"]+)"')
|
||||
item.data = m.data
|
||||
if 'movie' in m.match:
|
||||
item.contentType = 'movie'
|
||||
return findvideos(item)
|
||||
else:
|
||||
return episodios(item)
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def ultimiep(item):
|
||||
log("ultimiep", "ultimiep", item.channel)
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
if 'Streaming' in scrapedtitle:
|
||||
itemlist.append(episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def lista_anime(item):
|
||||
log("lista_anime", "lista_anime", item.channel)
|
||||
|
||||
itemlist = []
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<li>\s*<strong>\s*<a\s*href="([^"]+?)">([^<]+?)</a>\s*</strong>\s*</li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
|
||||
# Pulizia titolo
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
cleantitle, eptype = clean_title(scrapedtitle, simpleClean=True)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodios",
|
||||
text_color="azure",
|
||||
contentType="tvshow",
|
||||
title=cleantitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="lista_anime",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
itemlist = []
|
||||
anime = True
|
||||
pagination = 50
|
||||
data = item.data
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<td style="[^"]*?">\s*.*?<strong>(.*?)</strong>.*?\s*</td>\s*<td style="[^"]*?">\s*<a href="([^"]+?)"[^>]+>\s*<img.*?src="([^"]+?)".*?/>\s*</a>\s*</td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
vvvvid_videos = False
|
||||
for scrapedtitle, scrapedurl, scrapedimg in matches:
|
||||
if 'nodownload' in scrapedimg or 'nostreaming' in scrapedimg:
|
||||
continue
|
||||
if 'vvvvid' in scrapedurl.lower():
|
||||
if not vvvvid_videos: vvvvid_videos = True
|
||||
itemlist.append(Item(title='I Video VVVVID Non sono supportati', text_color="red"))
|
||||
continue
|
||||
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
|
||||
scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
plot=item.plot,
|
||||
fanart=item.fanart,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
# Comandi di servizio
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0 and not vvvvid_videos:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title=config.get_localized_string(30161),
|
||||
text_color="yellow",
|
||||
text_bold=True,
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
if '<h6>Streaming</h6>' in data:
|
||||
patron = r'<td style[^>]+>\s*.*?(?:<span[^>]+)?<strong>(?P<title>[^<]+)<\/strong>.*?<td style[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>(?P<episode>\d+)'
|
||||
else:
|
||||
patron = r'<a\s*href="(?P<url>[^"]+)"\s*title="(?P<title>[^"]+)"\s*class="btn btn-dark mb-1">(?P<episode>\d+)'
|
||||
def itemHook(item):
|
||||
support.info(item)
|
||||
if item.url.startswith('//'): item.url= 'https:' + item.url
|
||||
elif item.url.startswith('/'): item.url= 'https:/' + item.url
|
||||
return item
|
||||
action = 'findvideos'
|
||||
return locals()
|
||||
|
||||
|
||||
# ==================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info("kod.animeforce findvideos")
|
||||
|
||||
support.info(item)
|
||||
itemlist = []
|
||||
|
||||
if item.extra:
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
blocco = scrapertools.find_single_match(data, r'%s(.*?)</tr>' % item.extra)
|
||||
url = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]*>')
|
||||
if 'vvvvid' in url.lower():
|
||||
itemlist = [Item(title='I Video VVVVID Non sono supportati', text_color="red")]
|
||||
return itemlist
|
||||
if 'http' not in url: url = "".join(['https:', url])
|
||||
if item.data:
|
||||
url = support.match(item.data, patron=r'<a\s*href="([^"]+)"\s*title="[^"]+"\s*class="btn btn-dark mb-1">').match
|
||||
else:
|
||||
url = item.url
|
||||
|
||||
if 'adf.ly' in url:
|
||||
url = adfly.get_long_url(url)
|
||||
elif 'bit.ly' in url:
|
||||
url = httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get("location")
|
||||
# if 'adf.ly' in item.url:
|
||||
# from servers.decrypters import adfly
|
||||
# url = adfly.get_long_url(item.url)
|
||||
|
||||
if 'animeforce' in url:
|
||||
headers.append(['Referer', item.url])
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
# elif 'bit.ly' in item.url:
|
||||
# url = support.httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location")
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
url = url.split('&')[0]
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
patron = """<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
headers.append(['Referer', url])
|
||||
for video in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title,
|
||||
url=video + '|' + urllib.urlencode(dict(headers)), folder=False))
|
||||
else:
|
||||
itemlist.extend(servertools.find_video_items(data=url))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
# else:
|
||||
# url = host
|
||||
# for u in item.url.split('/'):
|
||||
# if u and 'animeforce' not in u and 'http' not in u:
|
||||
# url += '/' + u
|
||||
|
||||
|
||||
# ==================================================================
|
||||
# if 'php?' in url:
|
||||
# url = support.httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get("location")
|
||||
# url = support.match(url, patron=r'class="button"><a href=(?:")?([^" ]+)', headers=headers).match
|
||||
# else:
|
||||
# if item.data: url = item.data
|
||||
# if item.contentType == 'movie': url = support.match()
|
||||
# url = support.match(url, patron=r'data-href="([^"]+)" target').match
|
||||
# if not url: url = support.match(url, patron=[r'<source src=(?:")?([^" ]+)',r'name="_wp_http_referer" value="([^"]+)"']).match
|
||||
# if url.startswith('//'): url = 'https:' + url
|
||||
# elif url.startswith('/'): url = 'https:/' + url
|
||||
url = support.match(url, patron=r'data-href="([^"]+)" target').match
|
||||
if 'vvvvid' in url: itemlist.append(item.clone(action="play", title='VVVVID', url=url, server='vvvvid'))
|
||||
else: itemlist.append(item.clone(action="play", title=support.config.get_localized_string(30137), url=url, server='directo'))
|
||||
|
||||
# =================================================================
|
||||
# Funzioni di servizio
|
||||
# -----------------------------------------------------------------
|
||||
def scrapedAll(url="", patron=""):
|
||||
data = httptools.downloadpage(url).data
|
||||
MyPatron = patron
|
||||
matches = re.compile(MyPatron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def create_url(url, title, eptype=""):
|
||||
logger.info()
|
||||
|
||||
if 'download' not in url:
|
||||
url = url.replace('-streaming', '-download-streaming')
|
||||
|
||||
total_eps = ""
|
||||
if not eptype:
|
||||
url = re.sub(r'episodio?-?\d+-?(?:\d+-|)[oav]*', '', url)
|
||||
else: # Solo se è un episodio passa
|
||||
total_eps = scrapertools.find_single_match(title.lower(), r'\((\d+)-(?:episodio|sub-ita)\)') # Questo numero verrà rimosso dall'url
|
||||
if total_eps: url = url.replace('%s-' % total_eps, '')
|
||||
url = re.sub(r'%s-?\d*-' % eptype.lower(), '', url)
|
||||
url = url.replace('-fine', '')
|
||||
|
||||
return url, total_eps
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def clean_title(title, simpleClean=False):
|
||||
logger.info()
|
||||
|
||||
title = title.replace("Streaming", "").replace("&", "")
|
||||
title = title.replace("Download", "")
|
||||
title = title.replace("Sub Ita", "")
|
||||
cleantitle = title.replace("#038;", "").replace("amp;", "").strip()
|
||||
|
||||
if '(Fine)' in title:
|
||||
cleantitle = cleantitle.replace('(Fine)', '').strip() + " (Fine)"
|
||||
eptype = ""
|
||||
if not simpleClean:
|
||||
if "episodio" in title.lower():
|
||||
eptype = scrapertools.find_single_match(title, "((?:Episodio?|OAV))")
|
||||
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', title).strip()
|
||||
|
||||
if 'episodio' not in eptype.lower():
|
||||
cleantitle = re.sub(r'Episodio?\s*\d+\s*(?:\(\d+\)|)\s*[\(OAV\)]*', '', cleantitle).strip()
|
||||
|
||||
if '(Fine)' in title:
|
||||
cleantitle = cleantitle.replace('(Fine)', '')
|
||||
|
||||
return cleantitle, eptype
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail):
|
||||
scrapedtitle, eptype = clean_title(scrapedtitle, simpleClean=True)
|
||||
cleantitle, eptype = clean_title(scrapedtitle)
|
||||
|
||||
# Creazione URL
|
||||
scrapedurl, total_eps = create_url(scrapedurl, scrapedtitle, eptype)
|
||||
|
||||
epnumber = ""
|
||||
if 'episodio' in eptype.lower():
|
||||
epnumber = scrapertools.find_single_match(scrapedtitle.lower(), r'episodio?\s*(\d+)')
|
||||
eptype += ":? %s%s" % (epnumber, (r" \(%s\):?" % total_eps) if total_eps else "")
|
||||
|
||||
extra = "<tr>\s*<td[^>]+><strong>(?:[^>]+>|)%s(?:[^>]+>[^>]+>|[^<]*|[^>]+>)</strong>" % eptype
|
||||
item = Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="tvshow",
|
||||
title=scrapedtitle,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
fulltitle=cleantitle,
|
||||
extra=extra,
|
||||
show=cleantitle,
|
||||
thumbnail=scrapedthumbnail)
|
||||
return item
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def scrapedSingle(url="", single="", patron=""):
|
||||
data = httptools.downloadpage(url).data
|
||||
paginazione = scrapertools.find_single_match(data, single)
|
||||
matches = re.compile(patron, re.DOTALL).findall(paginazione)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def Crea_Url(pagina="1", azione="ricerca", categoria="", nome=""):
|
||||
# esempio
|
||||
# chiamate.php?azione=ricerca&cat=&nome=&pag=
|
||||
Stringa = host + "/chiamate.php?azione=" + azione + "&cat=" + categoria + "&nome=" + nome + "&pag=" + pagina
|
||||
log("crea_Url", Stringa)
|
||||
return Stringa
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def log(funzione="", stringa="", canale=""):
|
||||
logger.debug("[" + canale + "].[" + funzione + "] " + stringa)
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# =================================================================
|
||||
# riferimenti di servizio
|
||||
# -----------------------------------------------------------------
|
||||
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
|
||||
AnimeFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
|
||||
CategoriaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
AvantiTxt = config.get_localized_string(30992)
|
||||
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
|
||||
return support.server(item, itemlist=itemlist)
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
{
|
||||
"id": "animeleggendari",
|
||||
"name": "AnimePerTutti",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "animepertutti.png",
|
||||
"bannermenu": "animepertutti.png",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare", "IT"]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per animeleggendari
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
from core import servertools, httptools, scrapertoolsV2, tmdb, support
|
||||
from core.item import Item
|
||||
from core.support import log, menu
|
||||
from lib.js2py.host import jsfunctions
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay, autorenumber
|
||||
|
||||
__channel__ = "animeleggendari"
|
||||
host = config.get_channel_url(__channel__)
|
||||
|
||||
# Richiesto per Autoplay
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['verystream', 'openload', 'streamango']
|
||||
list_quality = ['default']
|
||||
|
||||
checklinks = config.get_setting('checklinks', 'animeleggendari')
|
||||
checklinks_number = config.get_setting('checklinks_number', 'animeleggendari')
|
||||
|
||||
def mainlist(item):
|
||||
log()
|
||||
|
||||
itemlist = []
|
||||
menu(itemlist, 'Anime Leggendari', 'peliculas', host + '/category/anime-leggendari/')
|
||||
menu(itemlist, 'Anime ITA', 'peliculas', host + '/category/anime-ita/')
|
||||
menu(itemlist, 'Anime SUB-ITA', 'peliculas', host + '/category/anime-sub-ita/')
|
||||
menu(itemlist, 'Anime Conclusi', 'peliculas', host + '/category/serie-anime-concluse/')
|
||||
menu(itemlist, 'Anime in Corso', 'peliculas', host + '/category/anime-in-corso/')
|
||||
menu(itemlist, 'Genere', 'genres', host)
|
||||
menu(itemlist, 'Cerca...', 'search')
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
log(texto)
|
||||
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def last_ep(item):
|
||||
log('ANIME PER TUTTI')
|
||||
return support.scrape(item, '<a href="([^"]+)">([^<]+)<', ['url','title'],patron_block='<ul class="mh-tab-content-posts">(.*?)<\/ul>', action='findvideos')
|
||||
|
||||
def newest(categoria):
|
||||
log('ANIME PER TUTTI')
|
||||
log(categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = host
|
||||
item.action = "last_ep"
|
||||
itemlist = last_ep(item)
|
||||
|
||||
if itemlist[-1].action == "last_ep":
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
def genres(item):
|
||||
itemlist = support.scrape(item, '<a href="([^"]+)">([^<]+)<', ['url', 'title'], action='peliculas', patron_block=r'Generi.*?<ul.*?>(.*?)<\/ul>', blacklist=['Contattaci','Privacy Policy', 'DMCA'])
|
||||
return support.thumb(itemlist)
|
||||
|
||||
def peliculas(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
blacklist = ['top 10 anime da vedere']
|
||||
matches, data = support.match(item, r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+')
|
||||
|
||||
for url, title, thumb in matches:
|
||||
title = scrapertoolsV2.decodeHtmlentities(title.strip()).replace("streaming", "")
|
||||
lang = scrapertoolsV2.find_single_match(title, r"((?:SUB ITA|ITA))")
|
||||
videoType = ''
|
||||
if 'movie' in title.lower():
|
||||
videoType = ' - (MOVIE)'
|
||||
if 'ova' in title.lower():
|
||||
videoType = ' - (OAV)'
|
||||
|
||||
cleantitle = title.replace(lang, "").replace('(Streaming & Download)', '').replace('( Streaming & Download )', '').replace('OAV', '').replace('OVA', '').replace('MOVIE', '').strip()
|
||||
|
||||
if not videoType :
|
||||
contentType="tvshow"
|
||||
action="episodios"
|
||||
else:
|
||||
contentType="movie"
|
||||
action="findvideos"
|
||||
|
||||
if not title.lower() in blacklist:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action=action,
|
||||
contentType=contentType,
|
||||
title=support.typo(cleantitle + videoType, 'bold') + support.typo(lang,'_ [] color kod'),
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
url=url,
|
||||
thumbnail=thumb))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
autorenumber.renumber(itemlist)
|
||||
support.nextPage(itemlist, item, data, r'<a class="next page-numbers" href="([^"]+)">')
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
block = scrapertoolsV2.find_single_match(data, r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(.*?)</span></a></div>')
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='findvideos',
|
||||
contentType='episode',
|
||||
title=support.typo('Episodio 1 bold'),
|
||||
fulltitle=item.title,
|
||||
url=item.url,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
if block:
|
||||
matches = re.compile(r'<a href="([^"]+)".*?><span class="pagelink">(\d+)</span></a>', re.DOTALL).findall(data)
|
||||
for url, number in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='findvideos',
|
||||
contentType='episode',
|
||||
title=support.typo('Episodio ' + number,'bold'),
|
||||
fulltitle=item.title,
|
||||
url=url,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
autorenumber.renumber(itemlist, item)
|
||||
support.videolibrary
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
log()
|
||||
data = ''
|
||||
matches = support.match(item, 'str="([^"]+)"')[0]
|
||||
if matches:
|
||||
for match in matches:
|
||||
data += str(jsfunctions.unescape(re.sub('@|g','%', match)))
|
||||
data += str(match)
|
||||
log('DATA',data)
|
||||
if 'animepertutti' in data:
|
||||
log('ANIMEPERTUTTI!')
|
||||
|
||||
else:
|
||||
data = ''
|
||||
|
||||
itemlist = support.server(item,data)
|
||||
|
||||
if checklinks:
|
||||
itemlist = servertools.check_list_links(itemlist, checklinks_number)
|
||||
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
62
channels/animesaturn.json
Normal file → Executable file
62
channels/animesaturn.json
Normal file → Executable file
@@ -2,69 +2,9 @@
|
||||
"id": "animesaturn",
|
||||
"name": "AnimeSaturn",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "animesaturn.png",
|
||||
"banner": "animesaturn.png",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "https://www.animesaturn.com",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
"settings": []
|
||||
}
|
||||
|
||||
515
channels/animesaturn.py
Normal file → Executable file
515
channels/animesaturn.py
Normal file → Executable file
@@ -1,379 +1,194 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per AnimeSaturn
|
||||
# Thanks to 4l3x87
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
|
||||
import urlparse
|
||||
from core import support
|
||||
|
||||
import channelselector
|
||||
from core import httptools, tmdb, support, scrapertools, jsontools
|
||||
from core.item import Item
|
||||
from core.support import log
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay, autorenumber
|
||||
|
||||
__channel__ = "animesaturn"
|
||||
host = config.get_setting("channel_host", __channel__)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'fembed', 'animeworld']
|
||||
list_quality = ['default', '480p', '720p', '1080p']
|
||||
host = support.config.get_channel_url()
|
||||
__channel__ = 'animesaturn'
|
||||
cookie = support.config.get_setting('cookie', __channel__)
|
||||
headers = {'X-Requested-With': 'XMLHttpRequest', 'Cookie': cookie}
|
||||
|
||||
|
||||
def get_cookie(data):
|
||||
global cookie, headers
|
||||
cookie = support.match(data, patron=r'document.cookie="([^\s]+)').match
|
||||
support.config.set_setting('cookie', cookie, __channel__)
|
||||
headers = [['Cookie', cookie]]
|
||||
|
||||
|
||||
def get_data(item):
|
||||
# support.dbg()
|
||||
# url = support.match(item.url, headers=headers, follow_redirects=True, only_headers=True).url
|
||||
data = support.match(item.url, headers=headers, follow_redirects=True).data
|
||||
if 'ASCookie' in data:
|
||||
get_cookie(data)
|
||||
data = get_data(item)
|
||||
return data
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
log()
|
||||
itemlist = []
|
||||
support.menu(itemlist, 'Novità bold', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host, 'tvshow')
|
||||
support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host)
|
||||
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host, args=['tvshow', 'alfabetico'])
|
||||
support.menu(itemlist, 'Cerca', 'search', host)
|
||||
support.aplay(item, itemlist, list_servers, list_quality)
|
||||
support.channel_config(item, itemlist)
|
||||
|
||||
return itemlist
|
||||
anime = ['/animelist?load_all=1&d=1',
|
||||
('ITA',['', 'submenu', '/filter?language%5B0%5D=1']),
|
||||
('SUB-ITA',['', 'submenu', '/filter?language%5B0%5D=0']),
|
||||
('Più Votati',['/toplist','menu', 'top']),
|
||||
('In Corso',['/animeincorso','peliculas','incorso']),
|
||||
('Ultimi Episodi',['/fetch_pages.php?request=episodes&d=1','peliculas','updated'])]
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def cleantitle(scrapedtitle):
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
|
||||
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×', 'x').replace('"', "'")
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
|
||||
if year:
|
||||
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
|
||||
|
||||
return scrapedtitle.strip()
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def lista_anime(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
PERPAGE = 15
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
if '||' in item.url:
|
||||
series = item.url.split('\n\n')
|
||||
matches = []
|
||||
for i, serie in enumerate(series):
|
||||
matches.append(serie.split('||'))
|
||||
else:
|
||||
# Estrae i contenuti
|
||||
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
|
||||
matches = support.match(item, patron, headers=headers)[0]
|
||||
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
title = cleantitle(scrapedtitle).replace('(ita)', '(ITA)')
|
||||
movie = False
|
||||
showtitle = title
|
||||
if '(ITA)' in title:
|
||||
title = title.replace('(ITA)', '').strip()
|
||||
showtitle = title
|
||||
else:
|
||||
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
|
||||
|
||||
infoLabels = {}
|
||||
if 'Akira' in title:
|
||||
movie = True
|
||||
infoLabels['year'] = 1988
|
||||
|
||||
if 'Dragon Ball Super Movie' in title:
|
||||
movie = True
|
||||
infoLabels['year'] = 2019
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodios" if movie == False else 'findvideos',
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=showtitle,
|
||||
show=showtitle,
|
||||
contentTitle=showtitle,
|
||||
plot=scrapedplot,
|
||||
contentType='episode' if movie == False else 'movie',
|
||||
originalUrl=scrapedurl,
|
||||
infoLabels=infoLabels,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
autorenumber.renumber(itemlist)
|
||||
|
||||
# Paginazione
|
||||
if len(matches) >= p * PERPAGE:
|
||||
support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodios(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
|
||||
anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)')
|
||||
# movie or series
|
||||
movie = scrapertools.find_single_match(data, r'\Episodi:</b>\s(\d*)\sMovie')
|
||||
|
||||
data = httptools.downloadpage(
|
||||
host + "/loading_anime?anime_id=" + anime_id,
|
||||
headers={
|
||||
'X-Requested-With': 'XMLHttpRequest'
|
||||
}).data
|
||||
|
||||
patron = r'<td style="[^"]+"><b><strong" style="[^"]+">(.+?)</b></strong></td>\s*'
|
||||
patron += r'<td style="[^"]+"><a href="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
|
||||
scrapedtitle = '[B]' + scrapedtitle + '[/B]'
|
||||
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
plot=item.plot,
|
||||
fanart=item.thumbnail,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
if ((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie) and item.contentType != 'movie':
|
||||
item.url = itemlist[0].url
|
||||
item.contentType = 'movie'
|
||||
return findvideos(item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
autorenumber.renumber(itemlist, item)
|
||||
support.videolibrary(itemlist, item, 'bold color kod')
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
log()
|
||||
originalItem = item
|
||||
|
||||
if item.contentType == 'movie':
|
||||
episodes = episodios(item)
|
||||
if len(episodes) > 0:
|
||||
item.url = episodes[0].url
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
|
||||
data = re.sub(r'\n|\t|\s+', ' ', data)
|
||||
patron = r'<a href="([^"]+)"><div class="downloadestreaming">'
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data
|
||||
data = re.sub(r'\n|\t|\s+', ' ', data)
|
||||
itemlist = support.server(item, data=data)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
def ultimiep(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
post = "page=%s" % p if p > 1 else None
|
||||
|
||||
data = httptools.downloadpage(
|
||||
item.url, post=post, headers={
|
||||
'X-Requested-With': 'XMLHttpRequest'
|
||||
}).data
|
||||
|
||||
patron = r"""<a href='[^']+'><div class="locandina"><img alt="[^"]+" src="([^"]+)" title="[^"]+" class="grandezza"></div></a>\s*"""
|
||||
patron += r"""<a href='([^']+)'><div class="testo">(.+?)</div></a>\s*"""
|
||||
patron += r"""<a href='[^']+'><div class="testo2">(.+?)</div></a>"""
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle1, scrapedtitle2 in matches:
|
||||
scrapedtitle1 = cleantitle(scrapedtitle1)
|
||||
scrapedtitle2 = cleantitle(scrapedtitle2)
|
||||
scrapedtitle = scrapedtitle1 + ' - ' + scrapedtitle2 + ''
|
||||
|
||||
title = scrapedtitle
|
||||
showtitle = scrapedtitle
|
||||
if '(ITA)' in title:
|
||||
title = title.replace('(ITA)', '').strip()
|
||||
showtitle = title
|
||||
else:
|
||||
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
|
||||
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
contentType="episode",
|
||||
action="findvideos",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
fulltitle=scrapedtitle1,
|
||||
show=showtitle,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Pagine
|
||||
patronvideos = r'data-page="(\d+)" title="Next">Pagina Successiva'
|
||||
next_page = scrapertools.find_single_match(data, patronvideos)
|
||||
if next_page:
|
||||
support.nextPage(itemlist, item, next_page=(item.url + '{}' + next_page))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
log(categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.url = host
|
||||
item.extra = ''
|
||||
def search(item, texto):
|
||||
support.info(texto)
|
||||
item.url = host + '/animelist?search=' + texto
|
||||
item.contentType = 'tvshow'
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = "%s/fetch_pages?request=episodios" % host
|
||||
item.action = "ultimiep"
|
||||
itemlist = ultimiep(item)
|
||||
|
||||
if itemlist[-1].action == "ultimiep":
|
||||
itemlist.pop()
|
||||
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
support.logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search_anime(item, texto):
|
||||
log(texto)
|
||||
def newest(categoria):
|
||||
support.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(host + "/index.php?search=1&key=%s" % texto).data
|
||||
jsondata = jsontools.load(data)
|
||||
|
||||
for title in jsondata:
|
||||
data = str(httptools.downloadpage("%s/templates/header?check=1" % host, post="typeahead=%s" % title).data)
|
||||
|
||||
if 'Anime non esistente' in data:
|
||||
continue
|
||||
else:
|
||||
title = title.replace('(ita)', '(ITA)')
|
||||
showtitle = title
|
||||
if '(ITA)' in title:
|
||||
title = title.replace('(ITA)', '').strip()
|
||||
showtitle = title
|
||||
else:
|
||||
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
|
||||
|
||||
url = "%s/anime/%s" % (host, data)
|
||||
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
contentType="episode",
|
||||
action="episodios",
|
||||
title=title,
|
||||
url=url,
|
||||
fulltitle=title,
|
||||
show=showtitle,
|
||||
thumbnail=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
log(texto)
|
||||
itemlist = []
|
||||
|
||||
item = support.Item()
|
||||
try:
|
||||
return search_anime(item, texto)
|
||||
|
||||
if categoria == "anime":
|
||||
item.url = host + '/fetch_pages.php?request=episodes&d=1'
|
||||
item.args = "updated"
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
support.logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def list_az(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
alphabet = dict()
|
||||
|
||||
# Articoli
|
||||
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
|
||||
matches = support.match(item, patron, headers=headers)[0]
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
letter = scrapedtitle[0].upper()
|
||||
if letter not in alphabet:
|
||||
alphabet[letter] = []
|
||||
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
|
||||
|
||||
for letter in sorted(alphabet):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
url='\n\n'.join(alphabet[letter]),
|
||||
title=letter,
|
||||
fulltitle=letter))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
@support.scrape
|
||||
def submenu(item):
|
||||
data = support.match(item.url + item.args).data
|
||||
action = 'filter'
|
||||
patronMenu = r'<h5 class="[^"]+">(?P<title>[^<]+)[^>]+>[^>]+>\s*<select id="(?P<parameter>[^"]+)"[^>]+>(?P<data>.*?)</select>'
|
||||
def itemlistHook(itemlist):
|
||||
itemlist.insert(0, item.clone(title=support.typo('Tutti','bold'), url=item.url + item.args, action='peliculas'))
|
||||
return itemlist[:-1]
|
||||
return locals()
|
||||
|
||||
|
||||
def filter(item):
|
||||
itemlist = []
|
||||
matches = support.match(item.data if item.data else item.url, patron=r'<option value="(?P<value>[^"]+)"[^>]*>(?P<title>[^<]+)').matches
|
||||
for value, title in matches:
|
||||
itemlist.append(item.clone(title= support.typo(title,'bold'), url='{}{}&{}%5B0%5D={}'.format(host, item.args, item.parameter, value), action='peliculas', args='filter'))
|
||||
support.thumb(itemlist, genre=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
patronMenu = r'<div class="col-md-13 bg-dark-as-box-shadow p-2 text-white text-center">(?P<title>[^"<]+)<(?P<other>.*?)(?:"lista-top"|"clearfix")'
|
||||
action = 'peliculas'
|
||||
item.args = 'top'
|
||||
def itemHook(item2):
|
||||
item2.url = item.url
|
||||
return item2
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
anime = True
|
||||
|
||||
deflang= 'Sub-ITA'
|
||||
action = 'check'
|
||||
page = None
|
||||
post = "page=" + str(item.page if item.page else 1) if item.page and int(item.page) > 1 else None
|
||||
data = get_data(item)
|
||||
|
||||
# debug = True
|
||||
|
||||
if item.args == 'top':
|
||||
data = item.other
|
||||
patron = r'light">(?P<title2>[^<]+)</div>\s*(?P<title>[^<]+)[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)">(?:<a[^>]+>|\s*)<img.*?src="(?P<thumb>[^"]+)"'
|
||||
else:
|
||||
data = support.match(item, post=post, headers=headers).data
|
||||
if item.args == 'updated':
|
||||
page = support.match(data, patron=r'data-page="(\d+)" title="Next">').match
|
||||
patron = r'<a href="(?P<url>[^"]+)" title="(?P<title>[^"(]+)(?:\s*\((?P<year>\d+)\))?(?:\s*\((?P<lang>[A-Za-z-]+)\))?">\s*<img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s\s*(?P<type>[^\s]+)\s*(?P<episode>\d+)'
|
||||
typeContentDict = {'Movie':'movie', 'Episodio':'episode'} #item.contentType='episode'
|
||||
action = 'findvideos'
|
||||
def itemlistHook(itemlist):
|
||||
if page:
|
||||
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),page= page, thumbnail=support.thumb()))
|
||||
return itemlist
|
||||
elif 'filter' in item.args:
|
||||
page = support.match(data, patron=r'totalPages:\s*(\d+)').match
|
||||
patron = r'<a href="(?P<url>[^"]+)" title="(?P<title>[^"(]+)(?:\s*\((?P<year>\d+)\))?(?:\s*\((?P<lang>[A-Za-z-]+)\))?">\s*<img src="(?P<thumb>[^"]+)"'
|
||||
def itemlistHook(itemlist):
|
||||
if item.nextpage: item.nextpage += 1
|
||||
else: item.nextpage = 2
|
||||
if page and item.nextpage < int(page):
|
||||
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), url= '{}&page={}'.format(item.url, item.nextpage), infoLabels={}, thumbnail=support.thumb()))
|
||||
return itemlist
|
||||
|
||||
else:
|
||||
# pagination = ''
|
||||
if item.args == 'incorso':
|
||||
patron = r'<a href="(?P<url>[^"]+)"[^>]+>(?P<title>[^<(]+)(?:\s*\((?P<year>\d+)\))?(?:\s*\((?P<lang>[A-za-z-]+)\))?</a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<plot>[^<]+)<'
|
||||
else:
|
||||
# debug=True
|
||||
patron = r'<img src="(?P<thumb>[^"]+)" alt="(?P<title>[^"\(]+)(?:\((?P<lang>[Ii][Tt][Aa])\))?(?:\s*\((?P<year>\d+)\))?[^"]*"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<a class="[^"]+" href="(?P<url>[^"]+)">[^>]+>[^>]+>[^>]+>\s*<p[^>]+>(?:(?P<plot>[^<]+))?<'
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def check(item):
|
||||
movie = support.match(item, patron=r'Episodi:</b> (\d*) Movie')
|
||||
if movie.match:
|
||||
episodes = episodios(item)
|
||||
if len(episodes) > 0:
|
||||
it = episodes[0].clone(contentType = 'movie', contentTitle=item.fulltitle, contentSerieName='')
|
||||
return findvideos(it)
|
||||
else:
|
||||
item.contentType = 'tvshow'
|
||||
return episodios(item)
|
||||
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
if item.contentType != 'movie': anime = True
|
||||
patron = r'episodi-link-button">\s*<a href="(?P<url>[^"]+)"[^>]+>\s*(?P<title>[^\d<]+(?P<episode>\d+))\s*</a>'
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
support.info()
|
||||
itemlist = []
|
||||
links = []
|
||||
|
||||
main_url = support.match(item, patron=r'<a href="([^"]+)">[^>]+>[^>]+>G').match
|
||||
urls = support.match(support.match(main_url, headers=headers).data, patron=r'<a class="dropdown-item"\s*href="([^"]+)', headers=headers).matches
|
||||
itemlist.append(item.clone(action="play", title='Primario', url=main_url, server='directo'))
|
||||
itemlist.append(item.clone(action="play", title='Secondario', url=main_url + '&s=alt', server='directo'))
|
||||
for url in urls:
|
||||
link = support.match(url, patron=r'<a href="([^"]+)"[^>]+><button', headers=headers).match
|
||||
if link:
|
||||
links.append(link)
|
||||
return support.server(item, data=links, itemlist=itemlist)
|
||||
|
||||
|
||||
def play(item):
|
||||
if item.server == 'directo':
|
||||
item.url = support.match(item.url, patron=r'(?:source type="[^"]+"\s*src=|file:[^"]+)"([^"]+)').match
|
||||
return[item]
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
{
|
||||
"id": "animespace",
|
||||
"name": "AnimeSpace",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"anime",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,263 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel AnimeSpace -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
from specials import renumbertools
|
||||
|
||||
__channel__ = "animespace"
|
||||
host = config.get_channel_url(__channel__)
|
||||
|
||||
checklinks = config.get_setting('checklinks', 'animespace')
|
||||
checklinks_number = config.get_setting('checklinks_number', 'animespace')
|
||||
|
||||
IDIOMAS = {'VOSE': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['directo', 'openload', 'streamango']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios",
|
||||
action="new_episodes",
|
||||
thumbnail=get_thumb('new_episodes', auto=True),
|
||||
url=host))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('last', auto=True),
|
||||
url=host + '/emision'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('all', auto=True),
|
||||
url=host + '/animes'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Anime",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('anime', auto=True),
|
||||
url=host + '/categoria/anime'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Películas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('movies', auto=True),
|
||||
url=host + '/categoria/pelicula'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="OVAs",
|
||||
action="list_all",
|
||||
thumbnail='',
|
||||
url=host + '/categoria/ova'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="ONAs",
|
||||
action="list_all",
|
||||
thumbnail='',
|
||||
url=host + '/categoria/ona'))
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Especiales",
|
||||
action="list_all",
|
||||
thumbnail='',
|
||||
url=host + '/categoria/especial'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar",
|
||||
action="search",
|
||||
url=host + '/search?q=',
|
||||
thumbnail=get_thumb('search', auto=True),
|
||||
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
|
||||
))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<article.*?href="([^"]+)">.*?src="([^"]+)".*?'
|
||||
patron += '<h3 class="Title">([^<]+)</h3>.*?"fecha">([^<]+)<.*?</i>([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
|
||||
type = type.strip().lower()
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
lang = 'VOSE'
|
||||
title = scrapedtitle
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
new_item= Item(channel=item.channel,
|
||||
action='episodios',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
language = lang,
|
||||
infoLabels={'year':year}
|
||||
)
|
||||
if type != 'anime':
|
||||
new_item.contentTitle=title
|
||||
else:
|
||||
new_item.plot=type
|
||||
new_item.contentSerieName=title
|
||||
new_item.context = context
|
||||
itemlist.append(new_item)
|
||||
|
||||
# Paginacion
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
'"page-item active">.*?</a>.*?<a class="page-link" href="([^"]+)">')
|
||||
|
||||
if next_page != "":
|
||||
actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?')
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="list_all",
|
||||
title=">> Página siguiente",
|
||||
url=actual_page + next_page,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
try:
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def new_episodes(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
full_data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(full_data, '<section class="caps">.*?</section>')
|
||||
patron = '<article.*?<a href="([^"]+)">.*?src="([^"]+)".*?'
|
||||
patron += '<span class="episode">.*?</i>([^<]+)</span>.*?<h2 class="Title">([^<]+)</h2>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, epi, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
lang = 'VOSE'
|
||||
title = '%s - %s' % (scrapedtitle, epi)
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail,
|
||||
action='findvideos', language=lang))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<a class="item" href="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedurl in matches:
|
||||
episode = scrapertools.find_single_match(scrapedurl, '.*?capitulo-(\d+)')
|
||||
lang = 'VOSE'
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
|
||||
title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName)
|
||||
url = scrapedurl
|
||||
infoLabels['season'] = season
|
||||
infoLabels['episode'] = episode
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
|
||||
action='findvideos', language=lang, infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
itemlist = itemlist[::-1]
|
||||
if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
|
||||
extra1='library'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
import urllib
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = 'id="Opt\d+">.*?src=(.*?) frameborder'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl in matches:
|
||||
server = ''
|
||||
scrapedurl = scrapedurl.replace('"', '')
|
||||
new_data = get_source(scrapedurl)
|
||||
|
||||
if "/stream/" in scrapedurl:
|
||||
scrapedurl = scrapertools.find_single_match(new_data, '<source src="([^"]+)"')
|
||||
server = "directo"
|
||||
else:
|
||||
scrapedurl = scrapertools.find_single_match(scrapedurl, '.*?url=([^&]+)?')
|
||||
scrapedurl = urllib.unquote(scrapedurl)
|
||||
|
||||
if scrapedurl != '':
|
||||
itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play',
|
||||
language = item.language, infoLabels=item.infoLabels, server=server))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
|
||||
|
||||
if checklinks:
|
||||
itemlist = servertools.check_list_links(itemlist, checklinks_number)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
if categoria == 'anime':
|
||||
item.url=host
|
||||
itemlist = new_episodes(item)
|
||||
return itemlist
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"id": "animesubita",
|
||||
"name": "AnimeSubIta",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "animesubita.png",
|
||||
"bannermenu": "animesubita.png",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,343 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Ringraziamo Icarus crew
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale per AnimeSubIta
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools, tmdb, support
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
__channel__ = "animesubita"
|
||||
host = config.get_channel_url(__channel__)
|
||||
PERPAGE = 20
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="lista_anime_completa",
|
||||
title=support.color("Lista Anime", "azure"),
|
||||
url="%s/lista-anime/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="ultimiep",
|
||||
title=support.color("Ultimi Episodi", "azure"),
|
||||
url="%s/category/ultimi-episodi/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title=support.color("Anime in corso", "azure"),
|
||||
url="%s/category/anime-in-corso/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="categorie",
|
||||
title=support.color("Categorie", "azure"),
|
||||
url="%s/generi/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title=support.color("Cerca anime ...", "yellow"),
|
||||
extra="anime",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")
|
||||
]
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = host
|
||||
item.action = "ultimiep"
|
||||
itemlist = ultimiep(item)
|
||||
|
||||
if itemlist[-1].action == "ultimiep":
|
||||
itemlist.pop()
|
||||
# Continua l'esecuzione in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return lista_anime(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def categorie(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = r'<li><a title="[^"]+" href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title=scrapedtitle.replace('Anime', '').strip(),
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def ultimiep(item):
|
||||
logger.info("ultimiep")
|
||||
itemlist = lista_anime(item, False, False)
|
||||
|
||||
for itm in itemlist:
|
||||
title = scrapertools.decodeHtmlentities(itm.title)
|
||||
# Pulizia titolo
|
||||
title = title.replace("Streaming", "").replace("&", "")
|
||||
title = title.replace("Download", "")
|
||||
title = title.replace("Sub Ita", "").strip()
|
||||
eptype = scrapertools.find_single_match(title, "((?:Episodio?|OAV))")
|
||||
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', title).strip()
|
||||
# Creazione URL
|
||||
url = re.sub(r'%s-?\d*-' % eptype.lower(), '', itm.url)
|
||||
if "-streaming" not in url:
|
||||
url = url.replace("sub-ita", "sub-ita-streaming")
|
||||
|
||||
epnumber = ""
|
||||
if 'episodio' in eptype.lower():
|
||||
epnumber = scrapertools.find_single_match(title.lower(), r'episodio?\s*(\d+)')
|
||||
eptype += ":? " + epnumber
|
||||
|
||||
extra = "<tr>\s*<td[^>]+><strong>(?:[^>]+>|)%s(?:[^>]+>[^>]+>|[^<]*|[^>]+>)</strong>" % eptype
|
||||
itm.title = support.color(title, 'azure').strip()
|
||||
itm.action = "findvideos"
|
||||
itm.url = url
|
||||
itm.fulltitle = cleantitle
|
||||
itm.extra = extra
|
||||
itm.show = re.sub(r'Episodio\s*', '', title)
|
||||
itm.thumbnail = item.thumbnail
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def lista_anime(item, nextpage=True, show_lang=True):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.find_single_match(data, r'<div class="post-list group">(.*?)</nav><!--/.pagination-->')
|
||||
# patron = r'<a href="([^"]+)" title="([^"]+)">\s*<img[^s]+src="([^"]+)"[^>]+>' # Patron con thumbnail, Kodi non scarica le immagini dal sito
|
||||
patron = r'<a href="([^"]+)" title="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = re.sub(r'\s+', ' ', scrapedtitle)
|
||||
# Pulizia titolo
|
||||
scrapedtitle = scrapedtitle.replace("Streaming", "").replace("&", "")
|
||||
scrapedtitle = scrapedtitle.replace("Download", "")
|
||||
lang = scrapertools.find_single_match(scrapedtitle, r"([Ss][Uu][Bb]\s*[Ii][Tt][Aa])")
|
||||
scrapedtitle = scrapedtitle.replace("Sub Ita", "").strip()
|
||||
eptype = scrapertools.find_single_match(scrapedtitle, "((?:Episodio?|OAV))")
|
||||
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', scrapedtitle)
|
||||
|
||||
|
||||
cleantitle = cleantitle.replace(lang, "").strip()
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie",
|
||||
title=color(scrapedtitle.replace(lang, "(%s)" % support.color(lang, "red") if show_lang else "").strip(), 'azure'),
|
||||
fulltitle=cleantitle,
|
||||
url=scrapedurl,
|
||||
show=cleantitle,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if nextpage:
|
||||
patronvideos = r'<link rel="next" href="([^"]+)"\s*/>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = matches[0]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def lista_anime_completa(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.find_single_match(data, r'<ul class="lcp_catlist"[^>]+>(.*?)</ul>')
|
||||
patron = r'<a href="([^"]+)"[^>]+>([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
|
||||
cleantitle = scrapedtitle.replace("Sub Ita Streaming", "").replace("Ita Streaming", "")
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie",
|
||||
title=support.color(scrapedtitle, 'azure'),
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="lista_anime_completa",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodi(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<td style="[^"]*?">\s*.*?<strong>(.*?)</strong>.*?\s*</td>\s*<td style="[^"]*?">\s*<a href="([^"]+?)"[^>]+>\s*<img.*?src="([^"]+?)".*?/>\s*</a>\s*</td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedtitle, scrapedurl, scrapedimg in matches:
|
||||
if 'nodownload' in scrapedimg or 'nostreaming' in scrapedimg:
|
||||
continue
|
||||
if 'vvvvid' in scrapedurl.lower():
|
||||
itemlist.append(Item(title='I Video VVVVID Non sono supportati'))
|
||||
continue
|
||||
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
|
||||
scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
fulltitle=item.title,
|
||||
show=scrapedtitle,
|
||||
plot=item.plot,
|
||||
fanart=item.thumbnail,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
# Comandi di servizio
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
headers = {'Upgrade-Insecure-Requests': '1',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
|
||||
|
||||
if item.extra:
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data, r'%s(.*?)</tr>' % item.extra)
|
||||
item.url = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]+>')
|
||||
|
||||
patron = r'http:\/\/link[^a]+animesubita[^o]+org\/[^\/]+\/.*?(episodio\d*)[^p]+php(\?.*)'
|
||||
for phpfile, scrapedurl in re.findall(patron, item.url, re.DOTALL):
|
||||
url = "%s/%s.php%s" % (host, phpfile, scrapedurl)
|
||||
headers['Referer'] = url
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
# ------------------------------------------------
|
||||
cookies = ""
|
||||
matches = re.compile('(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""), re.DOTALL).findall(config.get_cookie_data())
|
||||
for cookie in matches:
|
||||
name = cookie.split('\t')[5]
|
||||
value = cookie.split('\t')[6]
|
||||
cookies += name + "=" + value + ";"
|
||||
headers['Cookie'] = cookies[:-1]
|
||||
# ------------------------------------------------
|
||||
scrapedurl = scrapertools.find_single_match(data, r'<source src="([^"]+)"[^>]+>')
|
||||
url = scrapedurl + '|' + urllib.urlencode(headers)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
text_color="azure",
|
||||
title="[%s] %s" % (support.color("Diretto", "orange"), item.title),
|
||||
fulltitle=item.fulltitle,
|
||||
url=url,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.thumbnail,
|
||||
plot=item.plot))
|
||||
|
||||
return itemlist
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"id": "animetubeita",
|
||||
"name": "Animetubeita",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "http:\/\/i.imgur.com\/rQPx1iQ.png",
|
||||
"bannermenu": "http:\/\/i.imgur.com\/rQPx1iQ.png",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Anime",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,364 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale per animetubeita
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
import urllib
|
||||
|
||||
from core import httptools, scrapertools, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
__channel__ = "animetubeita"
|
||||
host = config.get_channel_url(__channel__)
|
||||
hostlista = host + "/lista-anime/"
|
||||
hostgeneri = host + "/generi/"
|
||||
hostcorso = host + "/category/serie-in-corso/"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
log("animetubeita", "mainlist", item.channel)
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="lista_home",
|
||||
title="[COLOR azure]Home[/COLOR]",
|
||||
url=host,
|
||||
thumbnail=AnimeThumbnail,
|
||||
fanart=AnimeFanart),
|
||||
# Item(channel=item.channel,
|
||||
# action="lista_anime",
|
||||
# title="[COLOR azure]A-Z[/COLOR]",
|
||||
# url=hostlista,
|
||||
# thumbnail=AnimeThumbnail,
|
||||
# fanart=AnimeFanart),
|
||||
Item(channel=item.channel,
|
||||
action="lista_genere",
|
||||
title="[COLOR azure]Genere[/COLOR]",
|
||||
url=hostgeneri,
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="lista_in_corso",
|
||||
title="[COLOR azure]Serie in Corso[/COLOR]",
|
||||
url=hostcorso,
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR lime]Cerca...[/COLOR]",
|
||||
url=host + "/?s=",
|
||||
thumbnail=CercaThumbnail,
|
||||
fanart=CercaFanart)]
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def lista_home(item):
|
||||
log("animetubeita", "lista_home", item.channel)
|
||||
|
||||
itemlist = []
|
||||
|
||||
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title=".*?">.*?<img.*?src="(.*?)".*?<strong>Titolo</strong></td>.*?<td>(.*?)</td>.*?<td><strong>Trama</strong></td>.*?<td>(.*?)</'
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in scrapedAll(item.url, patron):
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
title = title.split("Sub")[0]
|
||||
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="dl_s",
|
||||
contentType="tvshow",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
fulltitle=fulltitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail,
|
||||
show=fulltitle,
|
||||
plot=scrapedplot))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<link rel="next" href="(.*?)"'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_home",
|
||||
title=AvantiTxt,
|
||||
url=next_page,
|
||||
thumbnail=AvantiImg,
|
||||
folder=True))
|
||||
# ===========================================================
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
# def lista_anime(item):
|
||||
# log("animetubeita", "lista_anime", item.channel)
|
||||
|
||||
# itemlist = []
|
||||
|
||||
# patron = '<li.*?class="page_.*?href="(.*?)">(.*?)</a></li>'
|
||||
# for scrapedurl, scrapedtitle in scrapedAll(item.url, patron):
|
||||
# title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
# title = title.split("Sub")[0]
|
||||
# log("url:[" + scrapedurl + "] scrapedtitle:[" + title + "]")
|
||||
# itemlist.append(
|
||||
# Item(channel=item.channel,
|
||||
# action="dettaglio",
|
||||
# contentType="tvshow",
|
||||
# title="[COLOR azure]" + title + "[/COLOR]",
|
||||
# url=scrapedurl,
|
||||
# show=title,
|
||||
# thumbnail="",
|
||||
# fanart=""))
|
||||
|
||||
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# return itemlist
|
||||
|
||||
|
||||
|
||||
def lista_genere(item):
|
||||
log("lista_anime_genere", "lista_genere", item.channel)
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
bloque = scrapertools.find_single_match(data,
|
||||
'<div class="hentry page post-1 odd author-admin clear-block">(.*?)<div id="disqus_thread">')
|
||||
|
||||
patron = '<li class="cat-item cat-item.*?"><a href="(.*?)" >(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_generi",
|
||||
title='[COLOR lightsalmon][B]' + scrapedtitle + '[/B][/COLOR]',
|
||||
url=scrapedurl,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def lista_generi(item):
|
||||
log("animetubeita", "lista_generi", item.channel)
|
||||
|
||||
itemlist = []
|
||||
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title=".*?">.*?<img.*?src="(.*?)".*?<strong>Titolo</strong></td>.*?<td>(.*?)</td>.*?<td><strong>Trama</strong></td>.*?<td>(.*?)</'
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in scrapedAll(item.url, patron):
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
title = title.split("Sub")[0]
|
||||
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="dettaglio",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
contentType="tvshow",
|
||||
fulltitle=fulltitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
show=fulltitle,
|
||||
fanart=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<link rel="next" href="(.*?)"'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_generi",
|
||||
title=AvantiTxt,
|
||||
url=next_page,
|
||||
thumbnail=AvantiImg,
|
||||
folder=True))
|
||||
# ===========================================================
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def lista_in_corso(item):
|
||||
log("animetubeita", "lista_home", item.channel)
|
||||
|
||||
itemlist = []
|
||||
|
||||
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title="Link.*?>(.*?)</a></h2>.*?<img.*?src="(.*?)".*?<td><strong>Trama</strong></td>.*?<td>(.*?)</td>'
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot in scrapedAll(item.url, patron):
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
title = title.split("Sub")[0]
|
||||
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="dettaglio",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
contentType="tvshow",
|
||||
fulltitle=fulltitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
show=fulltitle,
|
||||
fanart=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<link rel="next" href="(.*?)"'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_in_corso",
|
||||
title=AvantiTxt,
|
||||
url=next_page,
|
||||
thumbnail=AvantiImg,
|
||||
folder=True))
|
||||
# ===========================================================
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def dl_s(item):
|
||||
log("animetubeita", "dl_s", item.channel)
|
||||
|
||||
itemlist = []
|
||||
encontrados = set()
|
||||
|
||||
# 1
|
||||
patron = '<p><center><a.*?href="(.*?)"'
|
||||
for scrapedurl in scrapedAll(item.url, patron):
|
||||
if scrapedurl in encontrados: continue
|
||||
encontrados.add(scrapedurl)
|
||||
title = "DOWNLOAD & STREAMING"
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="dettaglio",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.thumbnail,
|
||||
plot=item.plot,
|
||||
folder=True))
|
||||
# 2
|
||||
patron = '<p><center>.*?<a.*?href="(.*?)"'
|
||||
for scrapedurl in scrapedAll(item.url, patron):
|
||||
if scrapedurl in encontrados: continue
|
||||
encontrados.add(scrapedurl)
|
||||
title = "DOWNLOAD & STREAMING"
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="dettaglio",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.thumbnail,
|
||||
plot=item.plot,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def dettaglio(item):
|
||||
log("animetubeita", "dettaglio", item.channel)
|
||||
|
||||
itemlist = []
|
||||
headers = {'Upgrade-Insecure-Requests': '1',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
|
||||
|
||||
episodio = 1
|
||||
patron = r'<a href="http:\/\/link[^a]+animetubeita[^c]+com\/[^\/]+\/[^s]+((?:stream|strm))[^p]+php(\?.*?)"'
|
||||
for phpfile, scrapedurl in scrapedAll(item.url, patron):
|
||||
title = "Episodio " + str(episodio)
|
||||
episodio += 1
|
||||
url = "%s/%s.php%s" % (host, phpfile, scrapedurl)
|
||||
headers['Referer'] = url
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
# ------------------------------------------------
|
||||
cookies = ""
|
||||
matches = re.compile('(.animetubeita.com.*?)\n', re.DOTALL).findall(config.get_cookie_data())
|
||||
for cookie in matches:
|
||||
name = cookie.split('\t')[5]
|
||||
value = cookie.split('\t')[6]
|
||||
cookies += name + "=" + value + ";"
|
||||
headers['Cookie'] = cookies[:-1]
|
||||
# ------------------------------------------------
|
||||
url = scrapertools.find_single_match(data, """<source src="([^"]+)" type='video/mp4'>""")
|
||||
url += '|' + urllib.urlencode(headers)
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="play",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=url,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.thumbnail,
|
||||
plot=item.plot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
log("animetubeita", "search", item.channel)
|
||||
item.url = item.url + texto
|
||||
|
||||
try:
|
||||
return lista_home(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
|
||||
def scrapedAll(url="", patron=""):
|
||||
matches = []
|
||||
data = httptools.downloadpage(url).data
|
||||
MyPatron = patron
|
||||
matches = re.compile(MyPatron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
|
||||
def scrapedSingle(url="", single="", patron=""):
|
||||
matches = []
|
||||
data = httptools.downloadpage(url).data
|
||||
elemento = scrapertools.find_single_match(data, single)
|
||||
matches = re.compile(patron, re.DOTALL).findall(elemento)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
|
||||
def log(funzione="", stringa="", canale=""):
|
||||
logger.debug("[" + canale + "].[" + funzione + "] " + stringa)
|
||||
|
||||
|
||||
|
||||
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
|
||||
AnimeFanart = "http://www.animetubeita.com/wp-content/uploads/21407_anime_scenery.jpg"
|
||||
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
|
||||
CategoriaFanart = "http://www.animetubeita.com/wp-content/uploads/21407_anime_scenery.jpg"
|
||||
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
AvantiTxt = config.get_localized_string(30992)
|
||||
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
|
||||
20
channels/animeunity.json
Executable file
20
channels/animeunity.json
Executable file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"id": "animeunity",
|
||||
"name": "AnimeUnity",
|
||||
"active": true,
|
||||
"language": ["ita", "sub-ita"],
|
||||
"thumbnail": "animeunity.png",
|
||||
"banner": "animeunity.png",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "order",
|
||||
"type": "list",
|
||||
"label": "Ordine di Visualizzazione",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [ "Standard", "Lista A-Z", "Lista Z-A", "Popolarità", "Valutazione" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
268
channels/animeunity.py
Executable file
268
channels/animeunity.py
Executable file
@@ -0,0 +1,268 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per AnimeUnity
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import cloudscraper, json, copy, inspect
|
||||
from core import jsontools, support, httptools, scrapertools
|
||||
from platformcode import autorenumber
|
||||
|
||||
# support.dbg()
|
||||
host = support.config.get_channel_url()
|
||||
response = httptools.downloadpage(host + '/archivio')
|
||||
csrf_token = support.match(response.data, patron='name="csrf-token" content="([^"]+)"').match
|
||||
headers = {'content-type': 'application/json;charset=UTF-8',
|
||||
'x-csrf-token': csrf_token,
|
||||
'Cookie' : '; '.join([x.name + '=' + x.value for x in response.cookies])}
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
top = [('Ultimi Episodi', ['', 'news'])]
|
||||
|
||||
menu = [('Anime {bullet bold}',['', 'menu', {}, 'tvshow']),
|
||||
('Film {submenu}',['', 'menu', {'type': 'Movie'}]),
|
||||
('TV {submenu}',['', 'menu', {'type': 'TV'}, 'tvshow']),
|
||||
('OVA {submenu} {tv}',['', 'menu', {'type': 'OVA'}, 'tvshow']),
|
||||
('ONA {submenu} {tv}',['', 'menu', {'type': 'ONA'}, 'tvshow']),
|
||||
('Special {submenu} {tv}',['', 'menu', {'type': 'Special'}, 'tvshow'])]
|
||||
search =''
|
||||
return locals()
|
||||
|
||||
def menu(item):
|
||||
item.action = 'peliculas'
|
||||
ITA = copy.copy(item.args)
|
||||
ITA['title'] = '(ita)'
|
||||
InCorso = copy.copy(item.args)
|
||||
InCorso['status'] = 'In Corso'
|
||||
Terminato = copy.copy(item.args)
|
||||
Terminato['status'] = 'Terminato'
|
||||
itemlist = [item.clone(title=support.typo('Tutti','bold')),
|
||||
item.clone(title=support.typo('ITA','bold'), args=ITA),
|
||||
item.clone(title=support.typo('Genere','bold'), action='genres'),
|
||||
item.clone(title=support.typo('Anno','bold'), action='years')]
|
||||
if item.contentType == 'tvshow':
|
||||
itemlist += [item.clone(title=support.typo('In Corso','bold'), args=InCorso),
|
||||
item.clone(title=support.typo('Terminato','bold'), args=Terminato)]
|
||||
itemlist +=[item.clone(title=support.typo('Cerca...','bold'), action='search', thumbnail=support.thumb('search'))]
|
||||
return itemlist
|
||||
|
||||
|
||||
def genres(item):
|
||||
support.info()
|
||||
# support.dbg()
|
||||
itemlist = []
|
||||
|
||||
genres = json.loads(support.match(response.data, patron='genres="([^"]+)').match.replace('"','"'))
|
||||
|
||||
for genre in genres:
|
||||
item.args['genres'] = [genre]
|
||||
itemlist.append(item.clone(title=support.typo(genre['name'],'bold'), action='peliculas'))
|
||||
return support.thumb(itemlist)
|
||||
|
||||
def years(item):
|
||||
support.info()
|
||||
itemlist = []
|
||||
|
||||
from datetime import datetime
|
||||
next_year = datetime.today().year + 1
|
||||
oldest_year = int(support.match(response.data, patron='anime_oldest_date="([^"]+)').match)
|
||||
|
||||
for year in list(reversed(range(oldest_year, next_year + 1))):
|
||||
item.args['year']=year
|
||||
itemlist.append(item.clone(title=support.typo(year,'bold'), action='peliculas'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, text):
|
||||
support.info('search', item)
|
||||
if not item.args:
|
||||
item.args = {'title':text}
|
||||
else:
|
||||
item.args['title'] = text
|
||||
item.search = text
|
||||
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.info('search log:', line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
support.info(categoria)
|
||||
itemlist = []
|
||||
item = support.Item()
|
||||
item.url = host
|
||||
|
||||
try:
|
||||
itemlist = news(item)
|
||||
|
||||
if itemlist[-1].action == 'news':
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.info(line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
def news(item):
|
||||
support.info()
|
||||
item.contentType = 'episode'
|
||||
itemlist = []
|
||||
|
||||
fullJs = json.loads(support.match(httptools.downloadpage(item.url).data, headers=headers, patron=r'items-json="([^"]+)"').match.replace('"','"'))
|
||||
js = fullJs['data']
|
||||
|
||||
for it in js:
|
||||
if it.get('anime', {}).get('title') or it.get('anime', {}).get('title_eng'):
|
||||
title_name = it['anime']['title'] if it.get('anime', {}).get('title') else it['anime']['title_eng']
|
||||
pattern = r'[sS](?P<season>\d+)[eE](?P<episode>\d+)'
|
||||
match = scrapertools.find_single_match(it['file_name'], pattern)
|
||||
full_episode = ''
|
||||
if match:
|
||||
season, episode = match
|
||||
full_episode = ' - S' + season + ' E' + episode
|
||||
else:
|
||||
pattern = r'[._\s]Ep[._\s]*(?P<episode>\d+)'
|
||||
episode = scrapertools.find_single_match(it['file_name'], pattern)
|
||||
if episode:
|
||||
full_episode = ' - E' + episode
|
||||
itemlist.append(
|
||||
item.clone(title = support.typo(title_name + full_episode, 'bold'),
|
||||
fulltitle = it['anime']['title'],
|
||||
thumbnail = it['anime']['imageurl'],
|
||||
forcethumb = True,
|
||||
scws_id = it.get('scws_id', ''),
|
||||
url = '{}/anime/{}-{}'.format(item.url, it['anime']['id'],it['anime']['slug']),
|
||||
plot = it['anime']['plot'],
|
||||
action = 'findvideos')
|
||||
)
|
||||
if 'next_page_url' in fullJs:
|
||||
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),thumbnail=support.thumb(), url=fullJs['next_page_url']))
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
support.info()
|
||||
itemlist = []
|
||||
|
||||
page = item.page if item.page else 0
|
||||
item.args['offset'] = page * 30
|
||||
|
||||
order = support.config.get_setting('order', item.channel)
|
||||
if order:
|
||||
order_list = [ "Standard", "Lista A-Z", "Lista Z-A", "Popolarità", "Valutazione" ]
|
||||
item.args['order'] = order_list[order]
|
||||
|
||||
payload = json.dumps(item.args)
|
||||
records = httptools.downloadpage(host + '/archivio/get-animes', headers=headers, post=payload).json['records']
|
||||
# support.dbg()
|
||||
|
||||
for it in records:
|
||||
if not it['title']:
|
||||
it['title'] = ''
|
||||
lang = support.match(it['title'], patron=r'\(([It][Tt][Aa])\)').match
|
||||
title = support.re.sub(r'\s*\([^\)]+\)', '', it['title'])
|
||||
|
||||
if 'ita' in lang.lower(): language = 'ITA'
|
||||
else: language = 'Sub-ITA'
|
||||
|
||||
if title:
|
||||
itm = item.clone(title=support.typo(title,'bold') + support.typo(language,'_ [] color kod') + (support.typo(it['title_eng'],'_ ()') if it['title_eng'] else ''))
|
||||
else:
|
||||
itm = item.clone(title=support.typo(it['title_eng'],'bold') + support.typo(language,'_ [] color kod'))
|
||||
itm.contentLanguage = language
|
||||
itm.type = it['type']
|
||||
itm.thumbnail = it['imageurl']
|
||||
itm.plot = it['plot']
|
||||
itm.url = '{}/anime/{}-{}'.format(item.url, it.get('id'), it.get('slug'))
|
||||
|
||||
if it['episodes_count'] == 1:
|
||||
itm.contentType = 'movie'
|
||||
itm.fulltitle = itm.show = itm.contentTitle = title
|
||||
itm.contentSerieName = ''
|
||||
itm.action = 'findvideos'
|
||||
itm.scws_id = it['episodes'][0].get('scws_id', '')
|
||||
# itm.video_url = it['episodes'][0].get('link', '')
|
||||
|
||||
else:
|
||||
itm.contentType = 'tvshow'
|
||||
itm.contentTitle = ''
|
||||
itm.fulltitle = itm.show = itm.contentSerieName = title
|
||||
itm.action = 'episodios'
|
||||
itm.episodes = it['episodes'] if 'episodes' in it else it.get('scws_id', '')
|
||||
|
||||
itemlist.append(itm)
|
||||
|
||||
autorenumber.start(itemlist)
|
||||
if len(itemlist) >= 30:
|
||||
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), thumbnail=support.thumb(), page=page + 1))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
support.info()
|
||||
itemlist = []
|
||||
title = 'Parte' if item.type.lower() == 'movie' else 'Episodio'
|
||||
for it in item.episodes:
|
||||
itemlist.append(
|
||||
item.clone(title=support.typo('{}. {} {}'.format(it['number'], title, it['number']), 'bold'),
|
||||
episode = it['number'],
|
||||
fulltitle=item.title,
|
||||
show=item.title,
|
||||
contentTitle='',
|
||||
contentSerieName=item.contentSerieName,
|
||||
thumbnail=item.thumbnail,
|
||||
plot=item.plot,
|
||||
action='findvideos',
|
||||
contentType='episode',
|
||||
url = '{}/{}'.format(item.url, it['id'])
|
||||
)
|
||||
# video_url=it.get('link', ''))
|
||||
)
|
||||
|
||||
if inspect.stack(0)[1][3] not in ['find_episodes']:
|
||||
autorenumber.start(itemlist, item)
|
||||
support.videolibrary(itemlist, item)
|
||||
support.download(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
# if item.scws_id:
|
||||
# from time import time
|
||||
# from base64 import b64encode
|
||||
# from hashlib import md5
|
||||
#
|
||||
# client_ip = support.httptools.downloadpage('http://ip-api.com/json/').json.get('query')
|
||||
#
|
||||
# expires = int(time() + 172800)
|
||||
# token = b64encode(md5('{}{} Yc8U6r8KjAKAepEA'.format(expires, client_ip).encode('utf-8')).digest()).decode('utf-8').replace('=', '').replace('+', '-').replace('/', '_')
|
||||
#
|
||||
# url = 'https://scws.work/master/{}?token={}&expires={}&n=1'.format(item.scws_id, token, expires)
|
||||
#
|
||||
# itemlist = [item.clone(title=support.config.get_localized_string(30137), url=url, server='directo', action='play')]
|
||||
|
||||
from core import channeltools
|
||||
itemlist = [item.clone(title=channeltools.get_channel_parameters(item.channel)['title'],
|
||||
url=item.url, server='streamingcommunityws')]
|
||||
return support.server(item, itemlist=itemlist, referer=False)
|
||||
|
||||
# return support.server(item, itemlist=itemlist)
|
||||
|
||||
#
|
||||
# def play(item):
|
||||
# urls = list()
|
||||
# info = support.match(item.url, patron=r'(http.*?rendition=(\d+)[^\s]+)').matches
|
||||
#
|
||||
# if info:
|
||||
# for url, res in info:
|
||||
# urls.append(['hls [{}]'.format(res), url])
|
||||
# return urls
|
||||
21
channels/animeuniverse.json
Executable file
21
channels/animeuniverse.json
Executable file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"id": "animeuniverse",
|
||||
"name": "AnimeHDitalia",
|
||||
"active": false,
|
||||
"language": ["ita", "sub-ita"],
|
||||
"thumbnail": "animeuniverse.png",
|
||||
"banner": "animeuniverse.png",
|
||||
"categories": ["anime", "sub-ita"],
|
||||
"default_off": ["include_in_newest"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "perpage",
|
||||
"type": "list",
|
||||
"label": "Elementi per pagina",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["20","30","40","50","60","70","80","90","100"]
|
||||
}
|
||||
]
|
||||
}
|
||||
129
channels/animeuniverse.py
Executable file
129
channels/animeuniverse.py
Executable file
@@ -0,0 +1,129 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per animeuniverse
|
||||
# ----------------------------------------------------------
|
||||
|
||||
from core import support
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
headers = {}
|
||||
|
||||
perpage_list = ['20','30','40','50','60','70','80','90','100']
|
||||
perpage = perpage_list[support.config.get_setting('perpage' , 'animeuniverse')]
|
||||
epPatron = r'<td>\s*(?P<title>[^<]+)[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>\s*<img [^>]+Streaming'
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
anime=['/anime/',
|
||||
('Tipo',['', 'menu', 'Anime']),
|
||||
('Anno',['', 'menu', 'Anno']),
|
||||
('Genere', ['', 'menu','Genere']),
|
||||
('Ultimi Episodi',['/2/', 'peliculas', 'last']),
|
||||
('Hentai', ['/hentai/', 'peliculas'])]
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
action = 'peliculas'
|
||||
patronBlock = item.args + r'</a>\s*<ul class="sub-menu">(?P<block>.*?)</ul>'
|
||||
patronMenu = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
|
||||
return locals()
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
support.info(texto)
|
||||
item.search = texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
support.info(categoria)
|
||||
item = support.Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = host
|
||||
item.args = "last"
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
query = ''
|
||||
if '/mos/' in item.url:
|
||||
item.contentType = 'movie'
|
||||
action='findvideos'
|
||||
elif item.args == 'last':
|
||||
query='cat%5D=1¤tquery%5Bcategory__not_in%5D%5B'
|
||||
searchtext=''
|
||||
item.contentType = 'episode'
|
||||
action='findvideos'
|
||||
else:
|
||||
item.contentType = 'tvshow'
|
||||
action='episodios'
|
||||
if item.search:
|
||||
query = 's'
|
||||
searchtext = item.search
|
||||
if not query:
|
||||
query='category_name'
|
||||
searchtext = item.url.split('/')[-2] if item.url != host else ''
|
||||
if not item.pag: item.pag = 1
|
||||
|
||||
anime=True
|
||||
# blacklist=['Altri Hentai']
|
||||
data = support.match(host + '/wp-content/themes/animeuniverse/functions/ajax.php', post='sorter=recent&location=&loop=main+loop&action=sort&numarticles='+perpage+'&paginated='+str(item.pag)+'¤tquery%5B'+query+'%5D='+searchtext+'&thumbnail=1').data.replace('\\','')
|
||||
patron=r'<a href="(?P<url>[^"]+)"><img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)" class="[^"]+" alt="" title="(?P<title>.*?)\s*(?P<lang>Sub ITA|ITA)?(?:"| \[)'
|
||||
|
||||
def itemlistHook(itemlist):
|
||||
if len(itemlist) == int(perpage):
|
||||
item.pag += 1
|
||||
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), action='peliculas'))
|
||||
return itemlist
|
||||
return locals()
|
||||
|
||||
|
||||
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
anime = True
|
||||
pagination = int(perpage)
|
||||
patron = epPatron
|
||||
# debug = True
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
itemlist = []
|
||||
if item.contentType == 'movie':
|
||||
matches = support.match(item, patron=epPatron).matches
|
||||
for title, url in matches:
|
||||
get_video_list(url, title, itemlist)
|
||||
else:
|
||||
get_video_list(item.url, support.config.get_localized_string(30137), itemlist)
|
||||
return support.server(item, itemlist=itemlist)
|
||||
|
||||
|
||||
def get_video_list(url, title, itemlist):
|
||||
from requests import get
|
||||
if not url.startswith('http'): url = host + url
|
||||
|
||||
url = support.match(get(url).url, string=True, patron=r'file=([^$]+)').match
|
||||
if 'http' not in url: url = 'http://' + url
|
||||
itemlist.append(support.Item(title=title, url=url, server='directo', action='play'))
|
||||
|
||||
return itemlist
|
||||
77
channels/animeworld.json
Normal file → Executable file
77
channels/animeworld.json
Normal file → Executable file
@@ -1,70 +1,29 @@
|
||||
{
|
||||
"id": "animeworld",
|
||||
"name": "AnimeWorld",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"id": "animeworld",
|
||||
"name": "AnimeWorld",
|
||||
"active": true,
|
||||
"language": ["ita", "sub-ita"],
|
||||
"thumbnail": "animeworld.png",
|
||||
"banner": "animeworld.png",
|
||||
"categories": ["anime"],
|
||||
"categories": ["anime", "vos"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "https://www.animeworld.it",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"id": "lang",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"label": "Lingua di Ricerca",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["No filtrar","Italiano"]
|
||||
}
|
||||
"lvalues": [ "Tutte", "Ita", "Sub-Ita"]
|
||||
},
|
||||
{
|
||||
"id": "order",
|
||||
"type": "list",
|
||||
"label": "Ordine di Visualizzazione",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [ "Standard", "Ultime Aggiunte", "Lista A-Z", "Lista A-Z", "Più Vecchi", "Più Recenti", "Più Visti" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
431
channels/animeworld.py
Normal file → Executable file
431
channels/animeworld.py
Normal file → Executable file
@@ -1,315 +1,196 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per animeworld
|
||||
# thanks to fatshotty
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
import time
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertoolsV2, servertools, tmdb, support, jsontools
|
||||
from core.support import log
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay, autorenumber
|
||||
from core import httptools, support, config, jsontools
|
||||
|
||||
__channel__ = "animeworld"
|
||||
host = config.get_channel_url(__channel__)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'Italiano'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo']
|
||||
list_quality = ['default', '480p', '720p', '1080p']
|
||||
host = support.config.get_channel_url()
|
||||
__channel__ = 'animeworld'
|
||||
cookie = support.config.get_setting('cookie', __channel__)
|
||||
headers = [['Cookie', cookie]]
|
||||
|
||||
|
||||
def get_cookie(data):
|
||||
global cookie, headers
|
||||
cookie = support.match(data, patron=r'document.cookie="([^\s]+)').match
|
||||
support.config.set_setting('cookie', cookie, __channel__)
|
||||
headers = [['Cookie', cookie]]
|
||||
|
||||
|
||||
def get_data(item):
|
||||
# support.dbg()
|
||||
url = httptools.downloadpage(item.url, headers=headers, follow_redirects=True, only_headers=True).url
|
||||
data = support.match(url, headers=headers, follow_redirects=True).data
|
||||
if 'SecurityAW' in data:
|
||||
get_cookie(data)
|
||||
data = get_data(item)
|
||||
return data
|
||||
|
||||
|
||||
def order():
|
||||
# Seleziona l'ordinamento dei risultati
|
||||
return str(support.config.get_setting("order", __channel__))
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
log()
|
||||
|
||||
itemlist =[]
|
||||
|
||||
support.menu(itemlist, 'ITA submenu bold', 'build_menu', host + '/filter?', args=["anime", 'language[]=1'])
|
||||
support.menu(itemlist, 'Sub-ITA submenu bold', 'build_menu', host + '/filter?', args=["anime", 'language[]=0'])
|
||||
support.menu(itemlist, 'Archivio A-Z submenu', 'alfabetico', host+'/az-list', args=["tvshow","a-z"])
|
||||
support.menu(itemlist, 'In corso submenu', 'video', host+'/', args=["in sala"])
|
||||
support.menu(itemlist, 'Generi submenu', 'generi', host+'/')
|
||||
support.menu(itemlist, 'Ultimi Aggiunti bold', 'video', host+'/newest', args=["anime"])
|
||||
support.menu(itemlist, 'Ultimi Episodi bold', 'video', host+'/updated', args=["novita'"])
|
||||
support.menu(itemlist, 'Cerca...', 'search')
|
||||
support.aplay(item, itemlist, list_servers, list_quality)
|
||||
support.channel_config(item, itemlist)
|
||||
return itemlist
|
||||
|
||||
# Crea menu dei generi =================================================
|
||||
|
||||
def generi(item):
|
||||
log()
|
||||
patron_block = r'</i>\sGeneri</a>\s*<ul class="sub">(.*?)</ul>'
|
||||
patron = r'<a href="([^"]+)"\stitle="([^"]+)">'
|
||||
|
||||
return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='video')
|
||||
anime=['/filter?sort=',
|
||||
('ITA',['/filter?dub=1&sort=', 'menu', 'dub=1']),
|
||||
('SUB-ITA',['/filter?dub=0&sort=', 'menu', 'dub=0']),
|
||||
('In Corso', ['/ongoing', 'peliculas','noorder']),
|
||||
('Ultimi Episodi', ['/updated', 'peliculas', 'updated']),
|
||||
('Nuove Aggiunte',['/newest', 'peliculas','noorder' ]),
|
||||
('Generi',['/?d=1','genres',])]
|
||||
return locals()
|
||||
|
||||
|
||||
# Crea Menu Filtro ======================================================
|
||||
@support.scrape
|
||||
def genres(item):
|
||||
action = 'peliculas'
|
||||
data = get_data(item)
|
||||
|
||||
def build_menu(item):
|
||||
log()
|
||||
itemlist = []
|
||||
support.menu(itemlist, 'Tutti bold submenu', 'video', item.url+item.args[1])
|
||||
matches, data = support.match(item,r'<button class="btn btn-sm btn-default dropdown-toggle" data-toggle="dropdown"> (.*?) <span.*?>(.*?)<\/ul>',r'<form class="filters.*?>(.*?)<\/form>')
|
||||
log('ANIME DATA =' ,data)
|
||||
for title, html in matches:
|
||||
if title not in 'Lingua Ordine':
|
||||
support.menu(itemlist, title + ' submenu bold', 'build_sub_menu', html, args=item.args)
|
||||
log('ARGS= ', item.args[0])
|
||||
log('ARGS= ', html)
|
||||
return itemlist
|
||||
patronBlock = r'dropdown[^>]*>\s*Generi\s*<span.[^>]+>(?P<block>.*?)</ul>'
|
||||
patronMenu = r'<input.*?name="(?P<name>[^"]+)" value="(?P<value>[^"]+)"\s*>[^>]+>(?P<title>[^<]+)</label>'
|
||||
|
||||
# Crea SottoMenu Filtro ======================================================
|
||||
def itemHook(item):
|
||||
item.url = host + '/filter?' + item.name + '=' + item.value + '&sort='
|
||||
return item
|
||||
return locals()
|
||||
|
||||
def build_sub_menu(item):
|
||||
log()
|
||||
itemlist = []
|
||||
matches = re.compile(r'<input.*?name="([^"]+)" value="([^"]+)"\s*>[^>]+>([^<]+)<\/label>', re.DOTALL).findall(item.url)
|
||||
for name, value, title in matches:
|
||||
support.menu(itemlist, support.typo(title, 'bold'), 'video', host + '/filter?' + '&' + name + '=' + value + '&' + item.args[1])
|
||||
return itemlist
|
||||
|
||||
# Novità ======================================================
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
action = 'submenu'
|
||||
data = get_data(item)
|
||||
patronMenu=r'<button[^>]+>\s*(?P<title>[A-Za-z0-9]+)\s*<span.[^>]+>(?P<other>.*?)</ul>'
|
||||
def itemlistHook(itemlist):
|
||||
itemlist.insert(0, item.clone(title=support.typo('Tutti','bold'), action='peliculas'))
|
||||
itemlist.append(item.clone(title=support.typo('Cerca...','bold'), action='search', search=True, thumbnail=support.thumb('search.png')))
|
||||
return itemlist
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def submenu(item):
|
||||
action = 'peliculas'
|
||||
data = item.other
|
||||
# debug=True
|
||||
patronMenu = r'<input.*?name="(?P<name>[^"]+)" value="(?P<value>[^"]+)"\s*>[^>]+>(?P<title>[^<]+)<\/label>'
|
||||
def itemHook(item):
|
||||
item.url = '{}/filter?{}={}&{}{}'.format(host, item.name, item.value, item.args, ('&sort=' if item.name != 'sort' else ''))
|
||||
return item
|
||||
return locals()
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
log()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
support.info(categoria)
|
||||
item = support.Item()
|
||||
lang = config.get_setting('lang', channel=item.channel)
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = host + '/newest'
|
||||
item.action = "video"
|
||||
itemlist = video(item)
|
||||
|
||||
if itemlist[-1].action == "video":
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# Cerca ===========================================================
|
||||
|
||||
def search(item, texto):
|
||||
log(texto)
|
||||
item.url = host + '/search?keyword=' + texto
|
||||
try:
|
||||
return video(item)
|
||||
item.url = host
|
||||
item.args = "updated"
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
support.logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
# Lista A-Z ====================================================
|
||||
|
||||
def alfabetico(item):
|
||||
return support.scrape(item, '<a href="([^"]+)" title="([^"]+)">', ['url', 'title'], patron_block=r'<span>.*?A alla Z.<\/span>.*?<ul>(.*?)<\/ul>', action='lista_anime')
|
||||
|
||||
|
||||
def lista_anime(item):
|
||||
log()
|
||||
itemlist = []
|
||||
matches ,data = support.match(item, r'<div class="item"><a href="([^"]+)".*?src="([^"]+)".*?data-jtitle="([^"]+)".*?>([^<]+)<\/a><p>(.*?)<\/p>')
|
||||
for scrapedurl, scrapedthumb, scrapedoriginal, scrapedtitle, scrapedplot in matches:
|
||||
|
||||
if scrapedoriginal == scrapedtitle:
|
||||
scrapedoriginal=''
|
||||
else:
|
||||
scrapedoriginal = support.typo(scrapedoriginal,' -- []')
|
||||
|
||||
year = ''
|
||||
lang = ''
|
||||
infoLabels = {}
|
||||
if '(' in scrapedtitle:
|
||||
year = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([0-9]+\))')
|
||||
lang = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([a-zA-Z]+\))')
|
||||
|
||||
infoLabels['year'] = year
|
||||
title = scrapedtitle.replace(year,'').replace(lang,'').strip()
|
||||
original = scrapedoriginal.replace(year,'').replace(lang,'').strip()
|
||||
if lang: lang = support.typo(lang,'_ color kod')
|
||||
longtitle = '[B]' + title + '[/B]' + lang + original
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
contentType="episode",
|
||||
action="episodios",
|
||||
title=longtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumb,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
infoLabels=infoLabels,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
autorenumber.renumber(itemlist)
|
||||
|
||||
# Next page
|
||||
support.nextPage(itemlist, item, data, r'<a class="page-link" href="([^"]+)" rel="next"')
|
||||
|
||||
return itemlist
|
||||
def search(item, text):
|
||||
support.info(text)
|
||||
if item.search:
|
||||
item.url = '{}/filter?{}&keyword={}&sort='.format(host, item.args, text)
|
||||
else:
|
||||
lang = ['?', '?dub=1&', '?dub=0&'][config.get_setting('lang', channel=item.channel)]
|
||||
item.url = '{}/filter{}&keyword={}&sort='.format(host, lang, text)
|
||||
item.contentType = 'tvshow'
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def video(item):
|
||||
log()
|
||||
itemlist = []
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
data = get_data(item)
|
||||
anime = True
|
||||
if item.args not in ['noorder', 'updated'] and not item.url[-1].isdigit(): item.url += order() # usa l'ordinamento di configura canale
|
||||
data = get_data(item)
|
||||
|
||||
matches, data = support.match(item, r'<a href="([^"]+)" class[^>]+><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>', headers=headers)
|
||||
if item.args == 'updated':
|
||||
item.contentType='episode'
|
||||
patron=r'<div class="inner">\s*<a href="(?P<url>[^"]+)" class[^>]+>\s*<img.*?src="(?P<thumb>[^"]+)" alt?="(?P<title>[^\("]+)(?:\((?P<lang>[^\)]+)\))?"[^>]+>[^>]+>\s*(?:<div class="[^"]+">(?P<type>[^<]+)</div>)?(?:[^>]+>){2,4}\s*<div class="ep">[^\d]+(?P<episode>\d+)[^<]*</div>'
|
||||
action='findvideos'
|
||||
else:
|
||||
patron= r'<div class="inner">\s*<a href="(?P<url>[^"]+)" class[^>]+>\s*<img.*?src="(?P<thumb>[^"]+)" alt?="(?P<title>[^\("]+)(?:\((?P<year>\d+)\) )?(?:\((?P<lang>[^\)]+)\))?(?P<title2>[^"]+)?[^>]+>[^>]+>(?:\s*<div class="(?P<l>[^"]+)">[^>]+>)?\s*(?:<div class="[^"]+">(?P<type>[^<]+)</div>)?'
|
||||
action='episodios'
|
||||
|
||||
for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches:
|
||||
# Cerca Info come anno o lingua nel Titolo
|
||||
year = ''
|
||||
lang = ''
|
||||
if '(' in scrapedtitle:
|
||||
year = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([0-9]+\))')
|
||||
lang = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([a-zA-Z]+\))')
|
||||
|
||||
# Rimuove Anno e Lingua nel Titolo
|
||||
title = scrapedtitle.replace(year,'').replace(lang,'').strip()
|
||||
original = scrapedoriginal.replace(year,'').replace(lang,'').strip()
|
||||
|
||||
# Compara Il Titolo con quello originale
|
||||
if original == title:
|
||||
original=''
|
||||
else:
|
||||
original = support.typo(scrapedoriginal,'-- []')
|
||||
|
||||
# cerca info supplementari
|
||||
ep = ''
|
||||
ep = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ep">(.*?)<')
|
||||
if ep != '':
|
||||
ep = ' - ' + ep
|
||||
|
||||
ova = ''
|
||||
ova = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ova">(.*?)<')
|
||||
if ova != '':
|
||||
ova = ' - (' + ova + ')'
|
||||
|
||||
ona = ''
|
||||
ona = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ona">(.*?)<')
|
||||
if ona != '':
|
||||
ona = ' - (' + ona + ')'
|
||||
|
||||
movie = ''
|
||||
movie = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="movie">(.*?)<')
|
||||
if movie != '':
|
||||
movie = ' - (' + movie + ')'
|
||||
|
||||
special = ''
|
||||
special = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="special">(.*?)<')
|
||||
if special != '':
|
||||
special = ' - (' + special + ')'
|
||||
|
||||
|
||||
# Concatena le informazioni
|
||||
|
||||
lang = support.typo('Sub-ITA', '_ [] color kod') if '(ita)' not in lang.lower() else ''
|
||||
|
||||
info = ep + lang + year + ova + ona + movie + special
|
||||
|
||||
# Crea il title da visualizzare
|
||||
long_title = '[B]' + title + '[/B]' + info + original
|
||||
|
||||
# Controlla se sono Episodi o Film
|
||||
if movie == '':
|
||||
contentType = 'tvshow'
|
||||
action = 'episodios'
|
||||
else:
|
||||
contentType = 'movie'
|
||||
action = 'findvideos'
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
contentType=contentType,
|
||||
action=action,
|
||||
title=long_title,
|
||||
url=scrapedurl,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
thumbnail=scrapedthumb,
|
||||
context = autoplay.context))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
autorenumber.renumber(itemlist)
|
||||
|
||||
# Next page
|
||||
support.nextPage(itemlist, item, data, r'<a\sclass="page-link"\shref="([^"]+)"\srel="next"\saria-label="Successiva')
|
||||
return itemlist
|
||||
# Controlla la lingua se assente
|
||||
patronNext=r'<a href="([^"]+)" class="[^"]+" id="go-next'
|
||||
typeContentDict={'movie':['movie', 'special']}
|
||||
typeActionDict={'findvideos':['movie', 'special']}
|
||||
def itemHook(item):
|
||||
if not item.contentLanguage:
|
||||
if 'dub=1' in item.url or item.l == 'dub':
|
||||
item.contentLanguage = 'ITA'
|
||||
item.title += support.typo(item.contentLanguage,'_ [] color kod')
|
||||
else:
|
||||
item.contentLanguage = 'Sub-ITA'
|
||||
item.title += support.typo(item.contentLanguage,'_ [] color kod')
|
||||
return item
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
patron_block = r'<div class="widget servers".*?>(.*?)<div id="download"'
|
||||
patron = r'<li><a [^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+" href="([^"]+)"[^>]+>([^<]+)<'
|
||||
matches = support.match(item, patron, patron_block)[0]
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title='[B] Episodio ' + scrapedtitle + '[/B]',
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
plot=item.plot,
|
||||
fanart=item.thumbnail,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
autorenumber.renumber(itemlist, item,'bold')
|
||||
support.videolibrary(itemlist, item)
|
||||
return itemlist
|
||||
data = get_data(item)
|
||||
anime = True
|
||||
pagination = 50
|
||||
patronBlock= r'<div class="server\s*active\s*"(?P<block>.*?)(?:<div class="server|<link)'
|
||||
patron = r'<li[^>]*>\s*<a.*?href="(?P<url>[^"]+)"[^>]*>(?P<episode>[^-<]+)(?:-(?P<episode2>[^<]+))?'
|
||||
def itemHook(item):
|
||||
item.number = support.re.sub(r'\[[^\]]+\]', '', item.title)
|
||||
item.title += support.typo(item.fulltitle,'-- bold')
|
||||
return item
|
||||
action='findvideos'
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
log()
|
||||
import time
|
||||
support.info(item)
|
||||
itemlist = []
|
||||
|
||||
matches, data = support.match(item, r'class="tab.*?data-name="([0-9]+)">([^<]+)</span', headers=headers)
|
||||
videoData = ''
|
||||
|
||||
for serverid, servername in matches:
|
||||
block = scrapertoolsV2.find_multiple_matches(data,'data-id="'+serverid+'">(.*?)<div class="server')
|
||||
id = scrapertoolsV2.find_single_match(str(block),r'<a data-id="([^"]+)" data-base="'+item.fulltitle+'"')
|
||||
dataJson = httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, id, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
|
||||
json = jsontools.load(dataJson)
|
||||
log('JSON= ',json)
|
||||
urls = []
|
||||
# resp = support.match(get_data(item), headers=headers, patron=r'data-name="(\d+)">([^<]+)<')
|
||||
resp = support.match(get_data(item), headers=headers, patron=r'data-name="(\d+)">([^<]+)<')
|
||||
data = resp.data
|
||||
|
||||
videoData +='\n'+json['grabber']
|
||||
|
||||
if serverid == '28':
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
title='diretto',
|
||||
quality='',
|
||||
url=json['grabber'],
|
||||
server='directo',
|
||||
show=item.show,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
return support.server(item, videoData, itemlist)
|
||||
for ID, name in resp.matches:
|
||||
if not item.number: item.number = support.match(item.title, patron=r'(\d+) -').match
|
||||
match = support.match(data, patronBlock=r'data-name="' + ID + r'"[^>]+>(.*?)(?:<div class="(?:server|download)|link)', patron=r'data-id="([^"]+)" data-episode-num="' + (item.number if item.number else '1') + '"' + r'.*?href="([^"]+)"').match
|
||||
if match:
|
||||
epID, epurl = match
|
||||
# if 'vvvvid' in name.lower():
|
||||
# urls.append(support.match(host + '/api/episode/ugly/serverPlayerAnimeWorld?id=' + epID, headers=headers, patron=r'<a.*?href="([^"]+)"', debug=True).match)
|
||||
if 'animeworld' in name.lower():
|
||||
url = support.match(data, patron=r'href="([^"]+)"\s*id="alternativeDownloadLink"', headers=headers).match
|
||||
title = support.match(url, patron=r'http[s]?://(?:www.)?([^.]+)', string=True).match
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, server='directo'))
|
||||
else:
|
||||
dataJson = support.match(host + '/api/episode/info?id=' + epID + '&alt=0', headers=headers).data
|
||||
json = jsontools.load(dataJson)
|
||||
|
||||
title = support.match(json['grabber'], patron=r'server\d+.([^.]+)', string=True).match
|
||||
if title: itemlist.append(item.clone(action="play", title=title, url=json['grabber'].split('=')[-1], server='directo'))
|
||||
else: urls.append(json['grabber'])
|
||||
# support.info(urls)
|
||||
return support.server(item, urls, itemlist)
|
||||
|
||||
37
channels/aniplay.json
Executable file
37
channels/aniplay.json
Executable file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"id": "aniplay",
|
||||
"name": "AniPlay",
|
||||
"active": false,
|
||||
"language": ["ita", "sub-ita"],
|
||||
"thumbnail": "aniplay.png",
|
||||
"banner": "aniplay.png",
|
||||
"categories": ["anime", "vos"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "sort",
|
||||
"type": "list",
|
||||
"label": "Ordine di Visualizzazione",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [ "Popolarità", "Titolo", "Numero Episodi", "Data di inizio", "Data di fine", "Data di aggiunta"]
|
||||
},
|
||||
{
|
||||
"id": "order",
|
||||
"type": "bool",
|
||||
"label": "Visualizza in ordine Discendente?",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perpage",
|
||||
"type": "list",
|
||||
"label": "Numero di elementi per pagina",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["10", "20", "30", "40", "50", "60", "80", "90"]
|
||||
}
|
||||
]
|
||||
}
|
||||
331
channels/aniplay.py
Executable file
331
channels/aniplay.py
Executable file
@@ -0,0 +1,331 @@
|
||||
from platformcode import config, logger, autorenumber
|
||||
from core import httptools, scrapertools, support, tmdb, jsontools
|
||||
from inspect import stack
|
||||
|
||||
import sys
|
||||
if sys.version_info[0] >= 3:
|
||||
from concurrent import futures
|
||||
else:
|
||||
from concurrent_py2 import futures
|
||||
|
||||
host = config.get_channel_url()
|
||||
sort = ['views', 'title', 'episodeNumber', 'startDate', 'endDate', 'createdDate'][config.get_setting('sort', 'aniplay')]
|
||||
order = 'asc' if config.get_setting('order', 'aniplay') else 'desc'
|
||||
perpage = [10, 20, 30 ,40, 50, 60, 70, 80, 90][config.get_setting('perpage', 'aniplay')]
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
anime=['/api/anime/advanced-search',
|
||||
('A-Z', ['/api/anime/advanced-search', 'submenu_az', '']),
|
||||
('Anno', ['', 'submenu_year', '']),
|
||||
('Top', ['', 'submenu_top', '']),
|
||||
('Ultimi aggiunti', ['', 'latest_added', ''])]
|
||||
return locals()
|
||||
|
||||
|
||||
def submenu_az(item):
|
||||
itemlist = []
|
||||
for letter in ['0-9'] + list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
|
||||
itemlist.append(item.clone(title = support.typo(letter, 'bold'),
|
||||
url= host + '/api/anime/find-by-char',
|
||||
action= 'peliculas',
|
||||
variable= '&character=' + letter,
|
||||
thumbnail=support.thumb('az')))
|
||||
return itemlist
|
||||
|
||||
|
||||
def submenu_year(item):
|
||||
itemlist = []
|
||||
from datetime import date
|
||||
current = date.today().year
|
||||
first = int(httptools.downloadpage('{}/api/anime/advanced-search?page=0&size=1&sort=startDate,asc&sort=id'.format(host)).json[0]['startDate'].split('-')[0]) -1
|
||||
for year in range(current, first, -1):
|
||||
itemlist.append(item.clone(title = support.typo(year, 'bold'),
|
||||
action= 'submenu_season',
|
||||
variable= year,
|
||||
thumbnail=support.thumb('year')))
|
||||
return itemlist
|
||||
|
||||
|
||||
def submenu_top(item):
|
||||
itemlist = []
|
||||
links = {'Top del giorno':'daily-top', 'Top della settimana':'weekly-top', 'Top del mese':'monthly-top'}
|
||||
for label in links:
|
||||
link = links[label]
|
||||
itemlist.append(item.clone(title = support.typo(label, 'bold'),
|
||||
action= 'submenu_top_of',
|
||||
variable= link))
|
||||
return itemlist
|
||||
|
||||
|
||||
def submenu_season(item):
|
||||
itemlist = []
|
||||
seasons = {'winter':'Inverno', 'spring':'Primavera', 'summer':'Estate', 'fall':'Autunno'}
|
||||
url= '{}/api/seasonal-view?page=0&size=36&years={}'.format(host, item.variable)
|
||||
js = httptools.downloadpage(url).json[0]['seasonalAnime']
|
||||
for season in js:
|
||||
s = season['season'].split('.')[-1]
|
||||
title = seasons[s]
|
||||
itemlist.append(item.clone(title=title,
|
||||
url = '{}/api/seasonal-view/{}-{}'.format(host, s, item.variable),
|
||||
thumbnail = support.thumb(s),
|
||||
action = 'peliculas',
|
||||
variable=''))
|
||||
return itemlist
|
||||
|
||||
|
||||
def submenu_top_of(item):
|
||||
itemlist = []
|
||||
url= '{}/api/home/{}'.format(host, item.variable)
|
||||
js = httptools.downloadpage(url).json
|
||||
for anime in js:
|
||||
fulltitle = anime['animeTitle']
|
||||
title = fulltitle.split('(')[0].strip()
|
||||
scrapedlang = scrapertools.find_single_match(fulltitle, r'\(([^\)]+)')
|
||||
lang = scrapedlang.upper() if scrapedlang else 'Sub-ITA'
|
||||
long_title = support.typo(title, 'bold') + support.typo(lang, '_ [] color kod')
|
||||
|
||||
itemlist.append(item.clone(title=long_title,
|
||||
url = '{}/anime/{}'.format(host, anime['animeId']),
|
||||
video_url = '{}/api/anime/{}'.format(host, anime['animeId']),
|
||||
thumbnail = get_thumbnail(anime, 'animeHorizontalImages'),
|
||||
action = 'episodios',
|
||||
variable=anime['animeId']))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
support.info(texto)
|
||||
item.url = host + '/api/anime/advanced-search'
|
||||
item.variable = '&query=' + texto
|
||||
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
support.info(categoria)
|
||||
item = support.Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
return latest_added(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def latest_added(item):
|
||||
itemlist = []
|
||||
page = item.page if item.page else 0
|
||||
url= '{}/api/home/latest-episodes?page={}'.format(host, page)
|
||||
js = httptools.downloadpage(url).json
|
||||
|
||||
for episode in js:
|
||||
title = episode['title'] if episode['title'] else ''
|
||||
animeTitle, lang = get_lang(episode['animeTitle'])
|
||||
quality = 'Full HD' if episode['fullHd'] else 'HD'
|
||||
long_title = support.typo('{}. {}{}'.format(int(float(episode['episodeNumber'])), title + ' - ' if title else '', animeTitle), 'bold') + support.typo(lang, '_ [] color kod') + support.typo(quality, '_ [] color kod')
|
||||
image = get_thumbnail(episode, 'episodeImages')
|
||||
|
||||
itemlist.append(item.clone(title=long_title,
|
||||
fulltitle=title,
|
||||
url='{}/play/{}'.format(host, episode['id']),
|
||||
contentType = 'episode',
|
||||
contentTitle = title,
|
||||
contentSerieName = animeTitle,
|
||||
contentLanguage = lang,
|
||||
quality = quality,
|
||||
contentEpisodeNumber = int(float(episode['episodeNumber'])),
|
||||
video_url = '{}/api/episode/{}'.format(host, episode['id']),
|
||||
thumbnail = image,
|
||||
fanart = image,
|
||||
action = 'findvideos'))
|
||||
|
||||
if stack()[1][3] not in ['newest']:
|
||||
support.nextPage(itemlist, item.clone(page = page + 1))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.debug()
|
||||
|
||||
itemlist = []
|
||||
page = item.page if item.page else 0
|
||||
js = httptools.downloadpage('{}?page={}&size={}{}&sort={},{}&sort=id'.format(item.url, page, perpage, item.variable, sort, order)).json
|
||||
|
||||
for it in js:
|
||||
logger.debug(jsontools.dump(js))
|
||||
title, lang = get_lang(it['title'])
|
||||
|
||||
long_title = support.typo(title, 'bold') + support.typo(lang, '_ [] color kod')
|
||||
|
||||
itemlist.append(item.clone(title = long_title,
|
||||
fulltitle = title,
|
||||
show = title,
|
||||
contentLanguage = lang,
|
||||
contentType = 'movie' if it['type'] == 'Movie' else 'tvshow',
|
||||
contentTitle = title,
|
||||
contentSerieName = title if it['type'] == 'Serie' else '',
|
||||
action ='findvideos' if it['type'] == 'Movie' else 'episodios',
|
||||
plot = it['storyline'],
|
||||
url = '{}/anime/{}'.format(host, it['id']),
|
||||
video_url = '{}/api/anime/{}'.format(host, it.get('animeId', it.get('id'))),
|
||||
thumbnail = get_thumbnail(it),
|
||||
fanart = get_thumbnail(it, 'horizontalImages')))
|
||||
|
||||
autorenumber.start(itemlist)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if len(itemlist) == perpage:
|
||||
support.nextPage(itemlist, item.clone(page = page + 1))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.debug()
|
||||
itemlist = []
|
||||
if not item.video_url:
|
||||
item.video_url = item.url.replace('/anime/', '/api/anime/')
|
||||
# url = '{}/api/anime/{}'.format(host, item.id)
|
||||
json = httptools.downloadpage(item.video_url, CF=False ).json
|
||||
|
||||
if type(json) == list:
|
||||
item.show_renumber = False
|
||||
itemlist = list_episodes(item, json)
|
||||
|
||||
elif json.get('seasons'):
|
||||
seasons = json['seasons']
|
||||
seasons.sort(key=lambda s: s['episodeStart'])
|
||||
|
||||
for it in seasons:
|
||||
title = it['name']
|
||||
|
||||
itemlist.append(item.clone(title = title,
|
||||
video_url = '{}/api/anime/{}/season/{}'.format(host, it['animeId'], it['id']),
|
||||
contentType = 'season',
|
||||
action = 'list_episodes',
|
||||
plot = json['storyline'],
|
||||
year = it['yearStart'],
|
||||
show_renumber = True))
|
||||
|
||||
# If the call come from the videolibrary or autorenumber, shows the episodes
|
||||
if stack()[1][3] in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']:
|
||||
itlist = []
|
||||
with futures.ThreadPoolExecutor() as executor:
|
||||
eplist = []
|
||||
for ep in itemlist:
|
||||
ep.show_renumber = False
|
||||
eplist.append(executor.submit(list_episodes, ep))
|
||||
for res in futures.as_completed(eplist):
|
||||
if res.result():
|
||||
itlist.extend(res.result())
|
||||
itemlist = itlist
|
||||
elif json.get('episodes'):
|
||||
itemlist = list_episodes(item, json)
|
||||
|
||||
# add renumber option
|
||||
if stack()[1][3] not in ['find_episodes'] and itemlist and itemlist[0].contentType == 'episode':
|
||||
autorenumber.start(itemlist, item)
|
||||
|
||||
# add add to videolibrary menu
|
||||
if stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']:
|
||||
support.videolibrary(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def list_episodes(item, json=None):
|
||||
itemlist = []
|
||||
|
||||
if not json:
|
||||
json = httptools.downloadpage(item.video_url, CF=False ).json
|
||||
|
||||
episodes = json['episodes'] if 'episodes' in json else json
|
||||
episodes.sort(key=lambda ep: int(ep['episodeNumber'].split('.')[0]))
|
||||
|
||||
for it in episodes:
|
||||
quality = 'Full HD' if it['fullHd'] else 'HD'
|
||||
|
||||
if item.contentSeason:
|
||||
episode = '{}x{:02d}'.format(item.contentSeason, int(it['episodeNumber'].split('.')[0]))
|
||||
else:
|
||||
episode = '{:02d}'.format(int(it['episodeNumber'].split('.')[0]))
|
||||
|
||||
title = support.typo('{}. {}'.format(episode, it['title']), 'bold')
|
||||
image = get_thumbnail(it, 'episodeImages')
|
||||
|
||||
itemlist.append(item.clone(title = title,
|
||||
url= '{}/play/{}'.format(host, it['id']),
|
||||
video_url= '{}/api/episode/{}'.format(host, it['id']),
|
||||
contentType = 'episode',
|
||||
contentEpisodeNumber = int(it['episodeNumber'].split('.')[0]),
|
||||
contentSeason = item.contentSeason if item.contentSeason else '',
|
||||
action = 'findvideos',
|
||||
quality = quality,
|
||||
thumbnail = image,
|
||||
fanart= image))
|
||||
|
||||
# Renumber episodes only if shown in the menu
|
||||
if item.show_renumber:
|
||||
autorenumber.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.debug()
|
||||
|
||||
res = httptools.downloadpage(item.video_url, CF=False ).json
|
||||
|
||||
if res.get('episodes', []):
|
||||
res = httptools.downloadpage('{}/api/episode/{}'.format(host, res['episodes'][0]['id'])).json
|
||||
|
||||
item.url = res['videoUrl']
|
||||
item.server = 'directo'
|
||||
|
||||
if '.m3u' in item.url:
|
||||
item.manifest = 'hls'
|
||||
|
||||
return support.server(item, itemlist=[item])
|
||||
|
||||
|
||||
def get_thumbnail(data, prop = 'verticalImages', key = 'full'):
|
||||
"""
|
||||
" Returns the vertical image as per given key and prop
|
||||
" possibile key values are:
|
||||
" - small
|
||||
" - full
|
||||
" - blurred
|
||||
" - medium
|
||||
" possibile prop values are:
|
||||
" - verticalImages
|
||||
" - animeHorizontalImages
|
||||
" - animeVerticalImages
|
||||
" - horizontalImages
|
||||
" - episodeImages
|
||||
"""
|
||||
value = None
|
||||
verticalImages = data.get(prop, [])
|
||||
if verticalImages:
|
||||
first = verticalImages[0]
|
||||
if first:
|
||||
value = first.get('image' + key.capitalize(), '')
|
||||
return value
|
||||
|
||||
|
||||
def get_lang(value):
|
||||
title = value.split('(')[0] if value else ''
|
||||
scrapedlang = scrapertools.find_single_match(value, r'\(([^\)]+)')
|
||||
lang = scrapedlang.upper() if scrapedlang else 'Sub-ITA'
|
||||
return title, lang
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"id": "bleachportal",
|
||||
"name": "BleachPortal",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"fanart": "http://i39.tinypic.com/35ibvcx.jpg",
|
||||
"thumbnail": "http://www.bleachportal.it/images/index_r1_c1.jpg",
|
||||
"banner": "http://cgi.di.uoa.gr/~std05181/images/bleach.jpg",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Ringraziamo Icarus crew
|
||||
# ------------------------------------------------------------
|
||||
# XBMC Plugin
|
||||
# Canale per http://bleachportal.it
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
from core import scrapertools, httptools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = "http://www.bleachportal.it"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[BleachPortal.py]==> mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="episodi",
|
||||
title="[COLOR azure] Bleach [/COLOR] - [COLOR deepskyblue]Lista Episodi[/COLOR]",
|
||||
url=host + "/streaming/bleach/stream_bleach.htm",
|
||||
thumbnail="http://i45.tinypic.com/286xp3m.jpg",
|
||||
fanart="http://i40.tinypic.com/5jsinb.jpg",
|
||||
extra="bleach"),
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
title="[COLOR azure] D.Gray Man [/COLOR] - [COLOR deepskyblue]Lista Episodi[/COLOR]",
|
||||
url=host + "/streaming/d.gray-man/stream_dgray-man.htm",
|
||||
thumbnail="http://i59.tinypic.com/9is3tf.jpg",
|
||||
fanart="http://wallpapercraft.net/wp-content/uploads/2016/11/Cool-D-Gray-Man-Background.jpg",
|
||||
extra="dgrayman")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodi(item):
|
||||
logger.info("[BleachPortal.py]==> episodi")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<td>?[<span\s|<width="\d+%"\s]+?class="[^"]+">\D+([\d\-]+)\s?<[^<]+<[^<]+<[^<]+<[^<]+<.*?\s+?.*?<span style="[^"]+">([^<]+).*?\s?.*?<a href="\.*(/?[^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
animetitle = "Bleach" if item.extra == "bleach" else "D.Gray Man"
|
||||
for scrapednumber, scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = scrapedtitle.decode('latin1').encode('utf8')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title="[COLOR azure]%s Ep: [COLOR deepskyblue]%s[/COLOR][/COLOR]" % (animetitle, scrapednumber),
|
||||
url=item.url.replace("stream_bleach.htm",scrapedurl) if "stream_bleach.htm" in item.url else item.url.replace("stream_dgray-man.htm", scrapedurl),
|
||||
plot=scrapedtitle,
|
||||
extra=item.extra,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.fanart,
|
||||
fulltitle="[COLOR red]%s Ep: %s[/COLOR] | [COLOR deepskyblue]%s[/COLOR]" % (animetitle, scrapednumber, scrapedtitle)))
|
||||
|
||||
if item.extra == "bleach":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="oav",
|
||||
title="[B][COLOR azure] OAV e Movies [/COLOR][/B]",
|
||||
url=item.url.replace("stream_bleach.htm", "stream_bleach_movie_oav.htm"),
|
||||
extra=item.extra,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.fanart))
|
||||
|
||||
return list(reversed(itemlist))
|
||||
|
||||
|
||||
def oav(item):
|
||||
logger.info("[BleachPortal.py]==> oav")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<td>?[<span\s|<width="\d+%"\s]+?class="[^"]+">-\s+(.*?)<[^<]+<[^<]+<[^<]+<[^<]+<.*?\s+?.*?<span style="[^"]+">([^<]+).*?\s?.*?<a href="\.*(/?[^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapednumber, scrapedtitle, scrapedurl in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title="[COLOR deepskyblue] " + scrapednumber + " [/COLOR]",
|
||||
url=item.url.replace("stream_bleach_movie_oav.htm", scrapedurl),
|
||||
plot=scrapedtitle,
|
||||
extra=item.extra,
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle="[COLOR red]" + scrapednumber + "[/COLOR] | [COLOR deepskyblue]" + scrapedtitle + "[/COLOR]"))
|
||||
|
||||
return list(reversed(itemlist))
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[BleachPortal.py]==> findvideos")
|
||||
itemlist = []
|
||||
|
||||
if "bleach//" in item.url:
|
||||
item.url = re.sub(r'\w+//', "", item.url)
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
if "bleach" in item.extra:
|
||||
video = scrapertools.find_single_match(data, 'file: "(.*?)",')
|
||||
else:
|
||||
video = scrapertools.find_single_match(data, 'file=(.*?)&').rsplit('/', 1)[-1]
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title="[[COLOR orange]Diretto[/COLOR]] [B]%s[/B]" % item.title,
|
||||
url=item.url.replace(item.url.split("/")[-1], "/" + video),
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle=item.fulltitle))
|
||||
return itemlist
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"id": "bravoporn",
|
||||
"name": "bravoporn",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.bravoporn.com/v/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = 'http://www.bravoporn.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host +"/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/c/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/s/?q=%s" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class="th">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<span>([^"]+)</span>\s*(\d+) movies.*?</strong>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class=".*?video_block"><a href="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)".*?alt="([^"]+)".*?'
|
||||
patron += '<span class="time">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = "https:" + scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)" type=\'video/mp4\' title="HQ" />'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"id": "camwhoresbay",
|
||||
"name": "camwhoresbay",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://www.camwhoresbay.com/images/porntrex.ico",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = 'https://www.camwhoresbay.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = "%s/search/%s/" % (host, texto.replace("+", "-"))
|
||||
item.extra = texto
|
||||
try:
|
||||
return lista(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += '<img class="thumb" src="([^"]+)".*?'
|
||||
patron += '<div class="videos">([^"]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="video-item ">.*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += '<i class="fa fa-clock-o"></i>(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = scrapedtitle, fanart=thumbnail))
|
||||
if item.extra:
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
|
||||
if next_page:
|
||||
if "from_videos=" in item.url:
|
||||
next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url)
|
||||
else:
|
||||
next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
|
||||
"&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
|
||||
if next_page and not next_page.startswith("#"):
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
|
||||
if next_page:
|
||||
if "from" in item.url:
|
||||
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
|
||||
else:
|
||||
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
|
||||
item.url, next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'')
|
||||
if scrapedurl == "" :
|
||||
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=item.title, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"id": "canalporno",
|
||||
"name": "Canalporno",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://i.imgur.com/gAbPcvT.png?1",
|
||||
"banner": "canalporno.png",
|
||||
"categories": [
|
||||
"adult"
|
||||
]
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = "http://www.canalporno.com"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(item.clone(action="findvideos", title="Útimos videos", url=host))
|
||||
itemlist.append(item.clone(action="categorias", title="Listado Categorias",
|
||||
url=host + "/categorias"))
|
||||
itemlist.append(item.clone(action="search", title="Buscar", url=host + "/search/?q=%s"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
try:
|
||||
item.url = item.url % texto
|
||||
itemlist = findvideos(item)
|
||||
return sorted(itemlist, key=lambda it: it.title)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<img src="([^"]+)".*?alt="([^"]+)".*?<h2><a href="([^"]+)">.*?' \
|
||||
'<div class="duracion"><span class="ico-duracion sprite"></span> ([^"]+) min</div>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for thumbnail, title, url, time in matches:
|
||||
scrapedtitle = time + " - " + title
|
||||
scrapedurl = host + url
|
||||
scrapedthumbnail = thumbnail
|
||||
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
patron = '<div class="paginacion">.*?<span class="selected">.*?<a href="([^"]+)">([^"]+)</a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, title in matches:
|
||||
url = host + url
|
||||
title = "Página %s" % title
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=url))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="ordenar-por ordenar-por-categoria">'
|
||||
'(.*?)<\/ul>')
|
||||
|
||||
#patron = '<div class="muestra-categorias">.*?<a class="thumb" href="([^"]+)".*?<img class="categorias" src="([^"]+)".*?<div class="nombre">([^"]+)</div>'
|
||||
patron = "<li><a href='([^']+)'\s?title='([^']+)'>.*?<\/a><\/li>"
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, title in matches:
|
||||
url = host + url
|
||||
#thumbnail = "http:" + thumbnail
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=url))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
|
||||
itemlist.append(item.clone(url=url, server="directo"))
|
||||
|
||||
return itemlist
|
||||
69
channels/casacinema.json
Normal file → Executable file
69
channels/casacinema.json
Normal file → Executable file
@@ -1,70 +1,11 @@
|
||||
{
|
||||
"id": "casacinema",
|
||||
"name": "Casacinema",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/casacinema.png",
|
||||
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/casacinema.png",
|
||||
"categories": ["tvshow", "movie"],
|
||||
"language": ["ita", "sub-ita"],
|
||||
"active": false,
|
||||
"thumbnail": "casacinema.png",
|
||||
"banner": "casacinema.png",
|
||||
"categories": ["tvshow", "movie","vos"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
407
channels/casacinema.py
Normal file → Executable file
407
channels/casacinema.py
Normal file → Executable file
@@ -1,340 +1,145 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per casacinema
|
||||
# Canale per 'casacinema'
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channelselector import thumb, get_thumb
|
||||
from core import scrapertools, scrapertoolsV2, httptools, tmdb, support
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
|
||||
__channel__ = "casacinema"
|
||||
host = config.get_channel_url(__channel__)
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['verystream', 'openload', 'wstream', 'speedvideo']
|
||||
list_quality = ['HD', 'SD']
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'casacinema')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'casacinema')
|
||||
|
||||
headers = [['Referer', '%s/genere/serie-tv' % host]]
|
||||
|
||||
|
||||
from core import support
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
logger.info("kod.casacinema mainlist")
|
||||
film = ['/category/film',
|
||||
('Generi', ['', 'genres', 'genres']),
|
||||
]
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
tvshow = ['/category/serie-tv',
|
||||
('Novità', ['/aggiornamenti-serie-tv', 'peliculas', '']),
|
||||
]
|
||||
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[B]Film[/B]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/genere/film" % host),
|
||||
Item(channel=item.channel,
|
||||
title="[B]Film - HD[/B]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/?s=[HD]" % host),
|
||||
Item(channel=item.channel,
|
||||
title="[B] > Categorie[/B]",
|
||||
action="categorias",
|
||||
extra="movie",
|
||||
url="%s/genere/film" % host),
|
||||
Item(channel=item.channel,
|
||||
title="[B]Film Sub - Ita[/B]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/genere/sub-ita" % host),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR blue]Cerca Film...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",),
|
||||
Item(channel=item.channel,
|
||||
title="[B]Serie TV[/B]",
|
||||
extra="tvshow",
|
||||
action="peliculas_tv",
|
||||
url="%s/genere/serie-tv" % host),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR blue]Cerca Serie TV...[/COLOR]",
|
||||
action="search",
|
||||
extra="tvshow")]
|
||||
search = ''
|
||||
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return locals()
|
||||
|
||||
# auto thumb
|
||||
itemlist=thumb(itemlist)
|
||||
|
||||
return itemlist
|
||||
@support.scrape
|
||||
def genres(item):
|
||||
action = 'peliculas'
|
||||
blacklist = ['PRIME VISIONI', 'ULTIME SERIE TV', 'ULTIMI FILM']
|
||||
patronMenu = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<>]+)</a></li>'
|
||||
patronBlock = r'<div class="container home-cats">(?P<block>.*?)<div class="clear">'
|
||||
return locals()
|
||||
|
||||
|
||||
def check(item):
|
||||
item.data = support.match(item).data
|
||||
if 'episodi e stagioni' in item.data.lower():
|
||||
support.info('select = ### è una serie ###')
|
||||
item.contentType = 'tvshow'
|
||||
return episodios(item)
|
||||
else:
|
||||
support.info('select = ### è un film ###')
|
||||
item.contentType = 'movie'
|
||||
return findvideos(item)
|
||||
|
||||
|
||||
def search(item, text):
|
||||
support.info(text)
|
||||
text = text.replace(' ', '+')
|
||||
item.url = host + '/?a=b&s=' + text
|
||||
item.args = 'search'
|
||||
try:
|
||||
return peliculas(item)
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.info('search log:', line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[casacinema.py] newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item = support.Item()
|
||||
item.args = 'newest'
|
||||
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host + '/genere/film'
|
||||
item.extra = "movie"
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
if categoria == 'series':
|
||||
item.contentType = 'tvshow'
|
||||
item.url = host+'/aggiornamenti-serie-tv'
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
else:
|
||||
item.contentType = 'movie'
|
||||
item.url = host+'/category/film'
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
item.action = 'peliculas'
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == 'peliculas':
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
support.info("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[casacinema.py] " + item.url + " search " + texto)
|
||||
|
||||
item.url = host + "/?s=" + texto
|
||||
|
||||
try:
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_tv(item)
|
||||
if item.extra == "movie":
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
logger.info("kod.casacinema peliculas")
|
||||
if item.contentType == 'movie':
|
||||
action = 'findvideos'
|
||||
elif item.contentType == 'tvshow':
|
||||
action = 'episodios'
|
||||
pagination = ''
|
||||
else:
|
||||
action = 'check'
|
||||
|
||||
itemlist = []
|
||||
if item.args == 'newest':
|
||||
patron = r'<li><a href="(?P<url>[^"]+)"[^=]+="(?P<thumb>[^"]+)"><div>\s*?<div[^>]+>(?P<title>[^\(\[<]+)(?:\[(?P<quality1>HD)\])?[ ]?(?:\(|\[)?(?P<lang>[sS]ub-[iI][tT][aA])?(?:\)|\])?[ ]?(?:\[(?P<quality>.+?)\])?[ ]?(?:\((?P<year>\d+)\))?<(?:[^>]+>.+?(?:title="Nuovi episodi">(?P<episode>\d+x\d+)[ ]?(?P<lang2>Sub-Ita)?|title="IMDb">(?P<rating>[^<]+)))?'
|
||||
else:
|
||||
patron = r'<li><a href="(?P<url>[^"]+)"[^=]+="(?P<thumb>[^"]+)"><div>\s*?<div[^>]+>(?P<title>[^\(\[<]+)(?P<title2>\([\D*]+\))?(?:\[(?P<quality1>HD)\])?\s?(?:[\(\[])?(?P<lang>[sS]ub-[iI][tT][aA])?(?:[\)\]])?\s?(?:\[(?P<quality>.+?)\])?\s?(?:\((?P<year>\d+)\))?(?:\(\D{2}\s\d{4}\))?<'
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.info('DATA=' +data)
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li><a href="([^"]+)"[^=]+="([^"]+)"><div>\s*<div[^>]+>(.*?)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
cleantitle = re.sub(r'[-–]*\s*[Ii]l [Ff]ilm\s*[-–]*?', '', title).strip()
|
||||
cleantitle = cleantitle.replace('[HD]', '').strip()
|
||||
|
||||
year = scrapertools.find_single_match(title, r'\((\d{4})\)')
|
||||
infolabels = {}
|
||||
if year:
|
||||
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
|
||||
infolabels['year'] = year
|
||||
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=title,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
plot=scrapedplot,
|
||||
infoLabels=infolabels,
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
## Paginación
|
||||
next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)".*?>Pagina')
|
||||
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR blue]" + config.get_localized_string(30992) + " >[/COLOR]",
|
||||
url=next_page,
|
||||
extra=item.extra,
|
||||
thumbnail=get_thumb('next.png')))
|
||||
|
||||
return itemlist
|
||||
patronNext = r'<a href="([^"]+)"\s*>Pagina'
|
||||
|
||||
|
||||
def peliculas_tv(item):
|
||||
logger.info("kod.casacinema peliculas")
|
||||
def itemHook(item):
|
||||
if item.quality1:
|
||||
item.quality = item.quality1
|
||||
item.title += support.typo(item.quality, '_ [] color kod')
|
||||
if item.lang2:
|
||||
item.contentLanguage = item.lang2
|
||||
item.title += support.typo(item.lang2, '_ [] color kod')
|
||||
if item.args == 'novita':
|
||||
item.title = item.title
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li><a href="([^"]+)"[^=]+="([^"]+)"><div>\s*<div[^>]+>(.*?)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
cleantitle = re.sub(r'[-–]*\s*[Ss]erie [Tt]v\s*[-–]*?', '', title).strip()
|
||||
cleantitle = cleantitle.replace('[HD]', '').replace('[SD]', '').strip()
|
||||
|
||||
year = scrapertools.find_single_match(title, r'\((\d{4})\)')
|
||||
infolabels = {}
|
||||
if year:
|
||||
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
|
||||
infolabels['year'] = year
|
||||
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
contentType="tvshow",
|
||||
title=title,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
plot=scrapedplot,
|
||||
infoLabels=infolabels,
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
## Paginación
|
||||
next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Pagina') ### <- Regex rimosso spazio - precedente <li><a href="([^"]+)" >Pagina
|
||||
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_tv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
extra=item.extra,
|
||||
thumbnail=get_thumb('next.png')))
|
||||
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
logger.info("kod.casacinema categorias")
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.find_single_match(data, 'Categorie(.*?)</ul>')
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
extra=item.extra,
|
||||
url=urlparse.urljoin(host, scrapedurl)))
|
||||
|
||||
return itemlist
|
||||
return item
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
def load_episodios(html, item, itemlist, lang_title):
|
||||
patron = '.*?<a href="[^"]+"[^o]+ofollow[^>]+>[^<]+</a><(?:b|/)[^>]+>'
|
||||
matches = re.compile(patron).findall(html)
|
||||
for data in matches:
|
||||
# Estrae i contenuti
|
||||
scrapedtitle = scrapertoolsV2.htmlclean(re.sub(r'(<a [^>]+>)*(<\/a>.*)*(Speedvideo)*', '', data)).strip()
|
||||
if scrapedtitle != 'Categorie':
|
||||
scrapedtitle = scrapedtitle.replace('×', 'x')
|
||||
scrapedtitle = scrapedtitle.replace('×', 'x')
|
||||
scrapedtitle = scrapedtitle.replace(';', '')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
|
||||
url=data,
|
||||
thumbnail=item.thumbnail,
|
||||
extra=item.extra,
|
||||
fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
|
||||
show=item.show))
|
||||
|
||||
logger.info("[casacinema.py] episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
data = scrapertools.find_single_match(data, '<p>(?:<strong>|)(.*?)<div id="disqus_thread">')
|
||||
|
||||
lang_titles = []
|
||||
starts = []
|
||||
patron = r"Stagione.*?(?:ITA|\d+)"
|
||||
matches = re.compile(patron, re.IGNORECASE).finditer(data)
|
||||
for match in matches:
|
||||
season_title = match.group()
|
||||
if season_title != '':
|
||||
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
|
||||
starts.append(match.end())
|
||||
|
||||
i = 1
|
||||
len_lang_titles = len(lang_titles)
|
||||
|
||||
while i <= len_lang_titles:
|
||||
inizio = starts[i - 1]
|
||||
fine = starts[i] if i < len_lang_titles else -1
|
||||
|
||||
html = data[inizio:fine]
|
||||
lang_title = lang_titles[i - 1]
|
||||
|
||||
load_episodios(html, item, itemlist, lang_title)
|
||||
|
||||
i += 1
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios" + "###" + item.extra,
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
if item.data:
|
||||
data = item.data
|
||||
action = 'findvideos'
|
||||
item.contentType = 'tvshow'
|
||||
blacklist = ['']
|
||||
patron = r'"season-no">(?P<season>\d+)x(?P<episode>\d+)(?:[^>]+>){5}\s*(?P<title>[^<]+)(?P<data>.*?)</table>'
|
||||
patronBlock = r'<span>(?:.+?Stagione*.+?(?P<lang>[Ii][Tt][Aa]|[Ss][Uu][Bb][\-]?[iI][tT][aA]))?.*?</span>.*?class="content(?P<block>.*?)(?:"accordion-item|<script>)'
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.casacinema findvideos")
|
||||
|
||||
data = item.url if item.extra == "tvshow" else httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
html = httptools.downloadpage(data).data
|
||||
patron = '"http:\/\/shrink-service\.it\/[^\/]+\/[^\/]+\/([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(html)
|
||||
|
||||
for url in matches:
|
||||
if url is not None:
|
||||
data = data
|
||||
else:
|
||||
continue
|
||||
|
||||
return support.server(item, data=data)
|
||||
if item.contentType != 'movie':
|
||||
links = support.match(item.data, patron=r'href="([^"]+)"').matches
|
||||
else:
|
||||
matchData = item.data if item.data else support.match(item.url, headers=headers).data
|
||||
links = support.match(matchData, patron=r'data-id="([^"]+)"').matches
|
||||
|
||||
return support.server(item, links)
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"id": "casacinemaInfo",
|
||||
"name": "La casa del cinema",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "",
|
||||
"banner": "",
|
||||
"categories": ["movie"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per casacinema
|
||||
# ------------------------------------------------------------
|
||||
|
||||
from core import scrapertoolsV2, httptools, servertools, tmdb, support
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
|
||||
__channel__ = "casacinemainfo"
|
||||
host = config.get_channel_url(__channel__)
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['verystream', 'openload', 'wstream', 'speedvideo']
|
||||
list_quality = ['1080p', '720', '480p', '360p']
|
||||
|
||||
checklinks = config.get_setting('checklinks', 'casacinema')
|
||||
checklinks_number = config.get_setting('checklinks_number', 'casacinema')
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("alfa.casacinema mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="Film",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="In sala",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/category/in-sala/" % host,
|
||||
thumbnail="http://jcrent.com/apple%20tv%20final/HD.png"),
|
||||
Item(channel=item.channel,
|
||||
title="Novità",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/category/nuove-uscite/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="Sub - Ita",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/category/sub-ita/" % host,
|
||||
thumbnail="http://i.imgur.com/qUENzxl.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[casacinemaInfo.py] " + item.url + " search " + texto)
|
||||
|
||||
item.url = host + "/?s=" + texto
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
itemlist = []
|
||||
|
||||
patron = '<li class="col-md-12 itemlist">.*?<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)".*?Film dell\\\'anno: ([0-9]{4}).*?<p class="text-list">([^<>]+)</p>'
|
||||
matches = scrapertoolsV2.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches:
|
||||
title = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
|
||||
cleantitle = title.replace('[Sub-ITA]', '').strip()
|
||||
|
||||
infoLabels = {"plot": scrapertoolsV2.decodeHtmlentities(scrapedplot), "year": scrapedyear}
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
infoLabels=infoLabels,
|
||||
fulltitle=cleantitle))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("[casacinemaInfo.py] peliculas")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
|
||||
patron = '<div class="col-mt-5 postsh">[^<>]+<div class="poster-media-card">[^<>]+<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)"'
|
||||
|
||||
matches = scrapertoolsV2.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
title = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
|
||||
cleantitle = title.replace('[Sub-ITA]', '').strip()
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=cleantitle))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
## Paginación
|
||||
next_page = scrapertoolsV2.find_single_match(data, '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"') ### <- Regex rimosso spazio - precedente <li><a href="([^"]+)" >Pagina -> Continua. riga 221
|
||||
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
extra=item.extra,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[casacinemaInfo.py] findvideos")
|
||||
|
||||
itemlist = support.hdpass_get_servers(item)
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if checklinks:
|
||||
itemlist = servertools.check_list_links(itemlist, checklinks_number)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"id": "cat3plus",
|
||||
"name": "Cat3plus",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": [],
|
||||
"thumbnail": "https://i.imgur.com/SJxXKa2.png",
|
||||
"fanart": "https://i.imgur.com/ejCwTxT.jpg",
|
||||
"banner": "https://i.imgur.com/bXUyk6m.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"vo"
|
||||
]
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel SleazeMovies -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By Sculkurt -*-
|
||||
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = 'http://www.cat3plus.com/'
|
||||
|
||||
headers = [
|
||||
['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
|
||||
['Accept-Encoding', 'gzip, deflate'],
|
||||
['Referer', host]
|
||||
]
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True)))
|
||||
itemlist.append(item.clone(title="Años", action="years", url=host, thumbnail=get_thumb('year', auto=True)))
|
||||
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
return itemlist
|
||||
|
||||
def years(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(item.url, cookies=False).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = "<a dir='ltr' href='([^']+)'>([^<]+)</a>"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
return data
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = "<h2 class='post-title entry-title'><a href='([^']+)'>([^(]+).*?\(([^)]+).*?"
|
||||
patron += 'src="([^"]+).*?'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, year, img in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl,
|
||||
action = "findvideos",
|
||||
thumbnail = img,
|
||||
contentTitle = scrapedtitle,
|
||||
contentType = "movie",
|
||||
infoLabels = {'year': year}))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
|
||||
# Extraer la marca de siguiente página
|
||||
next_page = scrapertools.find_single_match(data, "<a class='blog-pager-older-link' href='([^']+)'")
|
||||
if next_page != "":
|
||||
itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
if texto != "":
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search?q=" + texto
|
||||
item.extra = "busqueda"
|
||||
try:
|
||||
return list_all(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<h2>\s*<a href="([^"]+)" target="_blank">.*?</a></h2>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url in matches:
|
||||
data = httptools.downloadpage(url, headers={'Referer': item.url}).data
|
||||
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for video in itemlist:
|
||||
|
||||
video.channel = item.channel
|
||||
video.contentTitle = item.contentTitle
|
||||
video.title = video.server.capitalize()
|
||||
|
||||
# Opción "Añadir esta pelicula a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url = item.url,
|
||||
action = "add_pelicula_to_library",
|
||||
extra = "findvideos",
|
||||
contentTitle = item.contentTitle,
|
||||
thumbnail = item.thumbnail
|
||||
))
|
||||
|
||||
return itemlist
|
||||
44
channels/cb01anime.json
Normal file → Executable file
44
channels/cb01anime.json
Normal file → Executable file
@@ -1,36 +1,10 @@
|
||||
{
|
||||
"id": "cb01anime",
|
||||
"name": "Cb01anime",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://i.imgur.com/bHoUMo2.png",
|
||||
"banner": "http://i.imgur.com/bHoUMo2.png",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi in Ricerca Globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Anime",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
"id": "cb01anime",
|
||||
"name": "Cb01anime",
|
||||
"language": ["ita", "vos", "sub-ita"],
|
||||
"active": false,
|
||||
"thumbnail": "cb01anime.png",
|
||||
"banner": "cb01anime.png",
|
||||
"categories": ["anime"],
|
||||
"settings": []
|
||||
}
|
||||
361
channels/cb01anime.py
Normal file → Executable file
361
channels/cb01anime.py
Normal file → Executable file
@@ -1,276 +1,139 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Ringraziamo Icarus crew
|
||||
# ------------------------------------------------------------
|
||||
# XBMC Plugin
|
||||
# Canale per cineblog01 - anime
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
|
||||
from core import httptools, scrapertools, servertools, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from core import support
|
||||
|
||||
__channel__ = "cb01anime"
|
||||
host = config.get_channel_url(__channel__)
|
||||
#esclusione degli articoli 'di servizio'
|
||||
blacklist = ['AVVISO IMPORTANTE – CB01.ROCKS', 'Lista Alfabetica Completa Anime/Cartoon', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE']
|
||||
host = support.config.get_channel_url() + '/cb01-anime-cartoon'
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
Blacklist = ['AVVISO IMPORTANTE – CB01.ROCKS', 'Lista Alfabetica Completa Anime/Cartoon', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE','Lista Richieste Up & Re-Up']
|
||||
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
logger.info("[cb01anime.py] mainlist")
|
||||
|
||||
# Main options
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="list_titles",
|
||||
title="[COLOR azure]Anime - Novita'[/COLOR]",
|
||||
url=host + '/anime',
|
||||
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
|
||||
Item(channel=item.channel,
|
||||
action="genere",
|
||||
title="[COLOR azure]Anime - Per Genere[/COLOR]",
|
||||
url=host + '/anime',
|
||||
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/Genres.png"),
|
||||
Item(channel=item.channel,
|
||||
action="alfabetico",
|
||||
title="[COLOR azure]Anime - Per Lettera A-Z[/COLOR]",
|
||||
url=host + '/anime',
|
||||
thumbnail="http://i.imgur.com/IjCmx5r.png"),
|
||||
Item(channel=item.channel,
|
||||
action="listacompleta",
|
||||
title="[COLOR azure]Anime - Lista Completa[/COLOR]",
|
||||
url="%s/anime/lista-completa-anime-cartoon/" % host,
|
||||
thumbnail="http://i.imgur.com/IjCmx5r.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca Anime[/COLOR]",
|
||||
extra="anime",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
anime = [('Genere',['','menu', '2']),
|
||||
('Per Lettera',['','menu', '1']),
|
||||
('Per Anno',['','menu', '3']),
|
||||
('Ultimi Anime Aggiornati',['','peliculas', 'newest'])]
|
||||
return locals()
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def genere(item):
|
||||
logger.info("[cb01anime.py] genere")
|
||||
|
||||
return build_itemlist(item, '<select name="select2"(.*?)</select>', '<option value="([^"]+)">([^<]+)</option>',
|
||||
"list_titles")
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
blacklist = ['Anime per Genere', 'Anime per Anno', 'Anime per Lettera']
|
||||
patronBlock = r'<select name="select%s"(?P<block>.*?)</select>' % item.args
|
||||
patronMenu = r'<option value="(?P<url>[^"]+)">(?P<title>[^<]+)</option>'
|
||||
action = 'peliculas'
|
||||
def itemHook(item):
|
||||
item.url = item.url.replace('cb01-anime/','cb01-anime-cartoon/')
|
||||
return item
|
||||
return locals()
|
||||
|
||||
|
||||
def alfabetico(item):
|
||||
logger.info("[cb01anime.py] alfabetico")
|
||||
|
||||
return build_itemlist(item, '<option value=\'-1\'>Anime per Lettera</option>(.*?)</select>',
|
||||
'<option value="([^"]+)">\(([^<]+)\)</option>', "list_titles")
|
||||
|
||||
|
||||
def listacompleta(item):
|
||||
logger.info("[cb01anime.py] listacompleta")
|
||||
|
||||
return build_itemlist(item,
|
||||
'<a href="#char_5a" title="Go to the letter Z">Z</a></span></div>(.*?)</ul></div><div style="clear:both;"></div></div>',
|
||||
'<li><a href="' + host + '/([^"]+)"><span class="head">([^<]+)</span></a></li>', "episodios")
|
||||
|
||||
|
||||
def build_itemlist(item, re_bloque, re_patron, iaction):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.find_single_match(data, re_bloque)
|
||||
|
||||
# The categories are the options for the combo
|
||||
matches = re.compile(re_patron, re.DOTALL).findall(bloque)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for url, titulo in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action=iaction,
|
||||
contentType="tvshow",
|
||||
title=titulo,
|
||||
fulltitle=titulo,
|
||||
text_color="azure",
|
||||
show=titulo,
|
||||
url=host + url,
|
||||
plot=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info("[cb01anime.py] " + item.url + " search " + texto)
|
||||
|
||||
item.url = host + "/anime/?s=" + texto
|
||||
|
||||
return list_titles(item)
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def list_titles(item):
|
||||
logger.info("[cb01anime.py] mainlist")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patronvideos = r'<div class="span4">\s*<a href="([^"]+)">'
|
||||
patronvideos += r'<img src="([^"]+)"[^>]+><\/a>[^>]+>[^>]+>'
|
||||
patronvideos += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(.*?)<\/a>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.htmlclean(scrapedtitle).strip()
|
||||
if not scrapedtitle in blacklist:
|
||||
if 'lista richieste' in scrapedtitle.lower(): continue
|
||||
|
||||
patron = r'(?:\[[Ff][Uu][Ll]{2}\s*[Ii][Tt][Aa]\]|\[[Ss][Uu][Bb]\s*[Ii][Tt][Aa]\])'
|
||||
cleantitle = re.sub(patron, '', scrapedtitle).strip()
|
||||
|
||||
## ------------------------------------------------
|
||||
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
|
||||
## ------------------------------------------------
|
||||
|
||||
# Añade al listado de XBMC
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="listacompleta" if "Lista Alfabetica Completa Anime/Cartoon" in scrapedtitle else "episodios",
|
||||
contentType="tvshow",
|
||||
title=scrapedtitle,
|
||||
fulltitle=cleantitle,
|
||||
text_color="azure",
|
||||
show=cleantitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Put the next page mark
|
||||
support.info(texto)
|
||||
item.url = host + "/search/" + texto
|
||||
try:
|
||||
next_page = scrapertools.find_single_match(data, "<link rel='next' href='([^']+)'")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="list_titles",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
return peliculas(item)
|
||||
except:
|
||||
pass
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.info('search log:', line)
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
support.info(categoria)
|
||||
itemlist = []
|
||||
item = support.Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = host
|
||||
item.args = 'newest'
|
||||
itemlist = peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
# debug=True
|
||||
blacklist = Blacklist
|
||||
item.contentType = 'tvshow'
|
||||
if item.args == 'newest':
|
||||
patron = r'<div id="blockvids">\s*<ul>\s*<li>\s*<a href="(?P<url>[^"]+)"[^>]+><img[^>]+src="(?P<thumb>[^"]+)"[^>]*>(?:[^>]+>){4}(?P<title>[^\[]+)\[(?P<lang>[^\]]+)\]'
|
||||
else:
|
||||
patron = r'<div class="span4">\s*<a href="(?P<url>[^"]+)"><img src="(?P<thumb>[^"]+)"[^>]+><\/a>(?:[^>]+>){7}\s*<h1>(?P<title>[^<\[]+)(?:\[(?P<lang>[^\]]+)\])?</h1></a>.*?-->(?:.*?<br(?: /)?>)?\s*(?P<plot>[^<]+)'
|
||||
patronNext = r'<link rel="next" href="([^"]+)"'
|
||||
action = 'check'
|
||||
return locals()
|
||||
|
||||
# =================================================================
|
||||
def check(item):
|
||||
# support.dbg()
|
||||
item.url = support.match(item, patron=r'(?:<p>|/>)(.*?)(?:<br|</td>|</p>)', patronBlock=r'Streaming:(.*?)</tr>').matches
|
||||
if 'Episodio' in str(item.url):
|
||||
item.contentType = 'tvshow'
|
||||
item.action ='episodios'
|
||||
return episodios(item)
|
||||
else:
|
||||
item.contentType = 'movie'
|
||||
item.action = 'findvideos'
|
||||
return findvideos(item)
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
logger.info("[cb01anime.py] episodios")
|
||||
support.info('EPISODIOS ', item.data)
|
||||
data = ''
|
||||
matches = item.data
|
||||
season = 1
|
||||
s = 1
|
||||
e = 0
|
||||
sp = 0
|
||||
|
||||
itemlist = []
|
||||
for match in item.url:
|
||||
if 'stagione' in match.lower():
|
||||
find_season = support.match(match, patron=r'Stagione\s*(\d+)').match
|
||||
season = int(find_season) if find_season else season + 1 if 'prima' not in match.lower() else season
|
||||
else:
|
||||
try: title = support.match(match, patron=r'<a[^>]+>([^<]+)</a>').match
|
||||
except: title = ''
|
||||
if title:
|
||||
if 'episodio' in title.lower():
|
||||
ep = support.match(match, patron=r'Episodio ((?:\d+.\d|\d+|\D+))').match
|
||||
check = ep.isdigit()
|
||||
if check or '.' in ep:
|
||||
if '.' in ep:
|
||||
sp += 1
|
||||
title = '0' + 'x' + str(sp).zfill(2) + ' - ' + title
|
||||
else:
|
||||
ep = int(ep)
|
||||
if season > s and ep > 1:
|
||||
s += 1
|
||||
e = ep - 1
|
||||
title = str(season) + 'x' + str(ep-e).zfill(2) + ' - ' + title
|
||||
data += title + '|' + match + '\|'
|
||||
else:
|
||||
title += ' #movie'
|
||||
data += title + '|' + match + '\|'
|
||||
def itemHook(item):
|
||||
if '#movie' in item.title:
|
||||
item.contentType='movie'
|
||||
item.title = item.title.replace(' #movie','')
|
||||
return item
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# data = scrapertools.decodeHtmlentities(data)
|
||||
patron = r'(?P<title>[^\|]+)\|(?P<url>[^\|]+)\|'
|
||||
action = 'findvideos'
|
||||
return locals()
|
||||
|
||||
patron1 = '(?:<p>|<td bgcolor="#ECEAE1">)<span class="txt_dow">(.*?)(?:</p>)?(?:\s*</span>)?\s*</td>'
|
||||
patron2 = '<a.*?href="([^"]+)"[^>]*>([^<]+)</a>'
|
||||
matches1 = re.compile(patron1, re.DOTALL).findall(data)
|
||||
if len(matches1) > 0:
|
||||
for match1 in re.split('<br />|<p>', matches1[0]):
|
||||
if len(match1) > 0:
|
||||
# Estrae i contenuti
|
||||
titulo = None
|
||||
scrapedurl = ''
|
||||
matches2 = re.compile(patron2, re.DOTALL).finditer(match1)
|
||||
for match2 in matches2:
|
||||
if titulo is None:
|
||||
titulo = match2.group(2)
|
||||
scrapedurl += match2.group(1) + '#' + match2.group(2) + '|'
|
||||
if titulo is not None:
|
||||
title = item.title + " " + titulo
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=title,
|
||||
extra=scrapedurl,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info("[cb01anime.py] findvideos")
|
||||
|
||||
itemlist = []
|
||||
|
||||
for match in item.extra.split(r'|'):
|
||||
match_split = match.split(r'#')
|
||||
scrapedurl = match_split[0]
|
||||
if len(scrapedurl) > 0:
|
||||
scrapedtitle = match_split[1]
|
||||
title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show,
|
||||
ontentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def play(item):
|
||||
logger.info("[cb01anime.py] play")
|
||||
|
||||
if '/goto/' in item.url:
|
||||
item.url = item.url.split('/goto/')[-1].decode('base64')
|
||||
|
||||
data = item.url
|
||||
|
||||
logger.debug("##### Play data ##\n%s\n##" % data)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.show
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
return support.server(item, item.url)
|
||||
|
||||
|
||||
63
channels/cineblog01.json
Normal file → Executable file
63
channels/cineblog01.json
Normal file → Executable file
@@ -1,65 +1,10 @@
|
||||
{
|
||||
"id": "cineblog01",
|
||||
"name": "CB01",
|
||||
"language": ["ita"],
|
||||
"language": ["ita", "sub-ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "cb01.png",
|
||||
"banner": "cb01.png",
|
||||
"categories": ["tvshow", "movie", "vosi"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi in Ricerca Globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero di link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Italiano"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
"categories": ["tvshow", "movie", "vos", "documentary"],
|
||||
"settings": []
|
||||
}
|
||||
460
channels/cineblog01.py
Normal file → Executable file
460
channels/cineblog01.py
Normal file → Executable file
@@ -2,98 +2,88 @@
|
||||
# ------------------------------------------------------------
|
||||
# Canale per cineblog01
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
from core import scrapertoolsV2, httptools, servertools, tmdb, support
|
||||
from core.item import Item
|
||||
from lib import unshortenit
|
||||
from core import scrapertools, httptools, servertools, support
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
|
||||
#impostati dinamicamente da getUrl()
|
||||
host = ""
|
||||
headers = ""
|
||||
|
||||
|
||||
def findhost():
|
||||
global host, headers
|
||||
permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers
|
||||
host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['verystream', 'openload', 'streamango', 'wstream']
|
||||
list_quality = ['HD', 'SD', 'default']
|
||||
|
||||
checklinks = config.get_setting('checklinks', 'cineblog01')
|
||||
checklinks_number = config.get_setting('checklinks_number', 'cineblog01')
|
||||
|
||||
# esclusione degli articoli 'di servizio'
|
||||
blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE ',
|
||||
'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬',
|
||||
'Openload: la situazione. Benvenuto Verystream', 'Openload: lo volete ancora?']
|
||||
def findhost(url):
|
||||
host = httptools.downloadpage(url, follow_redirect=True).url
|
||||
if host == 'https://cb01.uno/':
|
||||
host = support.match(host, patron=r'<a href="([^"]+)').match
|
||||
return host
|
||||
|
||||
|
||||
host = config.get_channel_url(findhost)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
findhost()
|
||||
film = [
|
||||
('HD', ['', 'menu', 'Film HD Streaming']),
|
||||
('Genere', ['', 'menu', 'Film per Genere']),
|
||||
('Anni', ['', 'menu', 'Film per Anno']),
|
||||
('Popolari per Genere', ['', 'menu', 'Film Popolari']),
|
||||
('Ultimi Aggiunti', ['/ultimi-100-film-aggiunti/', 'peliculas', 'newest']),
|
||||
('Popolari', ['/category/film-popolari/']),
|
||||
('Italiani', ['/category/nazione/italia/'])
|
||||
# ('Film in Lista', ['/lista-film/', 'peliculas', 'newest'])
|
||||
]
|
||||
tvshow = ['/serietv/',
|
||||
('Per Lettera', ['/serietv/', 'menu', 'Serie-TV x Lettera']),
|
||||
('Per Genere', ['/serietv/', 'menu', 'Serie-TV x Genere']),
|
||||
('Per anno', ['/serietv/', 'menu', 'Serie-TV x Anno']),
|
||||
('Ultime Aggiunte', ['/serietv/ultime-100-serie-tv-aggiunte/', 'peliculas', 'newest'])
|
||||
]
|
||||
docu = [('Documentari {bullet bold}', ['/category/documentario/', 'peliculas']),
|
||||
('HD {submenu} {documentari}', ['/category/hd-alta-definizione/documentario-hd/', 'peliculas'])
|
||||
]
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
# Main options
|
||||
itemlist = []
|
||||
support.menu(itemlist, 'Ultimi 100 Film Aggiornati bold', 'last', host + '/lista-film-ultimi-100-film-aggiornati/')
|
||||
|
||||
support.menu(itemlist, 'Film bold', 'peliculas', host)
|
||||
support.menu(itemlist, 'HD submenu', 'menu', host, args="Film HD Streaming")
|
||||
support.menu(itemlist, 'Per genere submenu', 'menu', host, args="Film per Genere")
|
||||
support.menu(itemlist, 'Per anno submenu', 'menu', host, args="Film per Anno")
|
||||
support.menu(itemlist, 'Cerca film... submenu', 'search', host, args='film')
|
||||
|
||||
support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serietv/', contentType='tvshow')
|
||||
support.menu(itemlist, 'Aggiornamenti serie tv', 'last', host + '/serietv/aggiornamento-quotidiano-serie-tv/', contentType='tvshow')
|
||||
support.menu(itemlist, 'Per Lettera submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Lettera")
|
||||
support.menu(itemlist, 'Per Genere submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Genere")
|
||||
support.menu(itemlist, 'Per anno submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Anno")
|
||||
support.menu(itemlist, 'Cerca serie... submenu', 'search', host + '/serietv/', contentType='tvshow', args='serie')
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
findhost()
|
||||
itemlist= []
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = re.sub('\n|\t', '', data)
|
||||
block = scrapertoolsV2.find_single_match(data, item.args + r'<span.*?><\/span>.*?<ul.*?>(.*?)<\/ul>')
|
||||
support.log('MENU BLOCK= ',block)
|
||||
patron = r'href="?([^">]+)"?>(.*?)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(block)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title=scrapedtitle,
|
||||
contentType=item.contentType,
|
||||
action='peliculas',
|
||||
url=host + scrapedurl
|
||||
)
|
||||
)
|
||||
|
||||
return support.thumb(itemlist)
|
||||
# debug = True
|
||||
patronBlock = item.args + r'<span.*?><\/span>.*?<ul.*?>(?P<block>.*?)<\/ul>'
|
||||
patronMenu = r'href="?(?P<url>[^">]+)"?[^>]+>(?P<title>[^<»]+)'
|
||||
action = 'peliculas'
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
support.info(categoria)
|
||||
|
||||
item = support.Item()
|
||||
try:
|
||||
if categoria == "series":
|
||||
item.contentType = 'tvshow'
|
||||
item.url = host + '/serietv/' # aggiornamento-quotidiano-serie-tv/'
|
||||
else:
|
||||
item.contentType = 'movie'
|
||||
item.url = host + '/ultimi-100-film-aggiunti/'
|
||||
item.args = "newest"
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def search(item, text):
|
||||
support.log(item.url, "search" ,text)
|
||||
|
||||
logger.info("search", text)
|
||||
if item.contentType == 'tvshow': item.url = host + '/serietv'
|
||||
else: item.url = host
|
||||
try:
|
||||
item.url = item.url + "/?s=" + text
|
||||
item.url = item.url + "/search/" + text.replace(' ', '+')
|
||||
return peliculas(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -101,179 +91,137 @@ def search(item, text):
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
findhost()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.contentType = 'movie'
|
||||
item.url = host + '/lista-film-ultimi-100-film-aggiunti/'
|
||||
return support.scrape(item, r'<a href=([^>]+)>([^<([]+)(?:\[([A-Z]+)\])?\s\(([0-9]{4})\)<\/a>',
|
||||
['url', 'title', 'quality', 'year'],
|
||||
patron_block=r'Ultimi 100 film aggiunti:.*?<\/td>')
|
||||
|
||||
|
||||
def last(item):
|
||||
support.log()
|
||||
|
||||
itemlist = []
|
||||
infoLabels = {}
|
||||
quality = ''
|
||||
PERPAGE = 20
|
||||
page = 1
|
||||
if item.page:
|
||||
page = item.page
|
||||
|
||||
if item.contentType == 'tvshow':
|
||||
matches = support.match(item, r'<a href="([^">]+)".*?>([^(:(|[)]+)([^<]+)<\/a>', '<article class="sequex-post-content.*?</article>', headers)[0]
|
||||
else:
|
||||
matches = support.match(item, r'<a href=([^>]+)>([^(:(|[)]+)([^<]+)<\/a>', r'<strong>Ultimi 100 film Aggiornati:<\/a><\/strong>(.*?)<td>', headers)[0]
|
||||
|
||||
for i, (url, title, info) in enumerate(matches):
|
||||
if (page - 1) * PERPAGE > i: continue
|
||||
if i >= page * PERPAGE: break
|
||||
add = True
|
||||
title = title.rstrip()
|
||||
if item.contentType == 'tvshow':
|
||||
for i in itemlist:
|
||||
if i.url == url: # togliamo i doppi
|
||||
add = False
|
||||
else:
|
||||
infoLabels['year'] = scrapertoolsV2.find_single_match(info, r'\(([0-9]+)\)')
|
||||
quality = scrapertoolsV2.find_single_match(info, r'\[([A-Z]+)\]')
|
||||
|
||||
if quality:
|
||||
longtitle = title + support.typo(quality,'_ [] color kod')
|
||||
else:
|
||||
longtitle = title
|
||||
|
||||
if add:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='findvideos' if item.contentType == 'movie' else 'episodios',
|
||||
contentType=item.contentType,
|
||||
title=longtitle,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
quality=quality,
|
||||
url=url,
|
||||
infoLabels=infoLabels
|
||||
)
|
||||
)
|
||||
support.pagination(itemlist, item, page, PERPAGE)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
support.log()
|
||||
if item.contentType == 'movie' or '/serietv/' not in item.url:
|
||||
patron = r'<div class="?card-image"?>.*?<img src="?([^" ]+)"? alt.*?<a href="?([^" >]+)(?:\/|")>([^<[(]+)(?:\[([A-Za-z0-9/-]+)])? (?:\(([0-9]{4})\))?.*?<strong>([^<>&]+).*?DURATA ([0-9]+).*?<br(?: /)?>([^<>]+)'
|
||||
listGroups = ['thumb', 'url', 'title', 'quality', 'year', 'genre', 'duration', 'plot']
|
||||
#debug = True
|
||||
# esclusione degli articoli 'di servizio'
|
||||
# curYear = datetime.date.today().year
|
||||
# blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE ',
|
||||
# 'Aggiornamento Quotidiano Serie TV', 'AVVISO!!!',
|
||||
# 'Openload: la situazione. Benvenuto Verystream', 'Openload: lo volete ancora?',
|
||||
# 'OSCAR ' + str(curYear) + ' ▶ VOTA IL TUO FILM PREFERITO! 🎬',
|
||||
# 'Auguri di Buon Natale e Felice Anno Nuovo! – ' + str(curYear) + '!']
|
||||
|
||||
if 'newest' in item.args:
|
||||
pagination = ''
|
||||
patronBlock = r'sequex-page-left(?P<block>.*?)sequex-page-right'
|
||||
if '/serietv/' not in item.url:
|
||||
patron = r'src="?(?P<thumb>[^ "]+)"? alt="?(?P<title>.*?)(?:\[(?P<quality>[a-zA-Z]+(?:[/]?3D)?)\]\s*)?(?:\[(?P<lang>Sub-ITA|ITA)\]\s*)?(?:\[(?P<quality2>[a-zA-Z]+(?:[/]?3D)?)\]\s*)?\((?P<year>\d{4})[^\)]*\)[^>]*>.*?<a href=(?:")?(?P<url>[^" ]+)(?:")?.*?rpwe-summary[^>]*>(?P<genre>\w+) [^ ]+ DURATA (?P<duration>[0-9]+)[^ ]+ [^ ]+ [A-Z ]+ (?P<plot>[^<]+)<'
|
||||
action = 'findvideos'
|
||||
else:
|
||||
patron = r'src=(?:")?(?P<thumb>[^ "]+)(?:")? alt=(?:")?(?P<title>.*?)(?: – \d+×\d+)?(?:>|"| – )(?:(?P<lang>Sub-ITA|ITA))?[^>]*>.*?<a href=(?:")?(?P<url>[^" ]+)(?:")?.*?rpwe-summary[^>]*>(?P<genre>[^\(]*)\((?P<year>\d{4})[^\)]*\) (?P<plot>[^<]+)<'
|
||||
action = 'episodios'
|
||||
|
||||
elif '/serietv/' not in item.url:
|
||||
patron = r'(?<!sticky )hentry.*?<div class="card-image">\s*<a[^>]+>\s*<img src="(?P<thumb>[^" ]+)" alt[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="?(?P<url>[^" >]+)(?:\/|"|\s+)>(?P<title>[^<[(]+)(?:\[(?P<quality>[a-zA-Z]+(?:[/]?3D)?)\]\s*)?(?:\[(?P<lang>Sub-ITA|ITA)\]\s*)?(?:\[(?P<quality2>[a-zA-Z/]+)\]\s*)? (?:\((?P<year>[0-9]{4})\))?[^>]+>[^>]+>[^>]+>[^>]+>(?P<genre>[^<>&âÃÂÖ]+)(?:[^ ]+\s*DURATA\s*(?P<duration>[0-9]+)[^>]+>[^>]+>[^>]+>(?P<plot>[^<>]+))?'
|
||||
action = 'findvideos'
|
||||
|
||||
else:
|
||||
patron = r'div class="card-image">.*?<img src="([^ ]+)" alt.*?<a href="([^ >]+)">([^<[(]+)<\/a>.*?<strong><span style="[^"]+">([^<>0-9(]+)\(([0-9]{4}).*?</(?:p|div)>(.*?)</div'
|
||||
listGroups = ['thumb', 'url', 'title', 'genre', 'year', 'plot']
|
||||
patron = r'(?<!sticky )hentry.*?card-image[^>]*>\s*<a href=(?:")?(?P<url>[^" >]+)(?:")?\s*>\s*<img src=(?:")?(?P<thumb>[^" ]+)(?:")? alt="(?P<title>.*?)(?: – \d+×\d+)?(?:"| – )(?:(?P<lang>Sub-ITA|ITA))?[^>]*>[^>]+>[^>]+>[^>]*>[^>]+>[^>]+>[^>]*>[^>]+>[^>]+>[^>]*>[^>]+>[^>]+>[^>]*>(?P<genre>[^\(]+)\((?P<year>\d{4})[^>]*>[^>]+>[^>]+>[^>]+>(?:<p>)?(?P<plot>[^<]+)'
|
||||
action = 'episodios'
|
||||
item.contentType = 'tvshow'
|
||||
|
||||
return support.scrape(item, patron_block=[r'<div class="?sequex-page-left"?>(.*?)<aside class="?sequex-page-right"?>',
|
||||
'<div class="?card-image"?>.*?(?=<div class="?card-image"?>|<div class="?rating"?>)'],
|
||||
patron=patron, listGroups=listGroups,
|
||||
patronNext='<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">', blacklist=blacklist, action=action)
|
||||
patronNext = '<a class="?page-link"? href="?([^>"]+)"?><i class="fa fa-angle-right">'
|
||||
|
||||
def itemHook(item):
|
||||
if item.quality2:
|
||||
item.quality = item.quality2
|
||||
item.title += support.typo(item.quality2, '_ [] color kod')
|
||||
return item
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
itemlist = []
|
||||
@support.scrape
|
||||
def folder(item, url):
|
||||
"""
|
||||
Quando c'è un link ad una cartella contenente più stagioni
|
||||
"""
|
||||
if url:
|
||||
data = support.match(url).data
|
||||
actLike = 'episodios'
|
||||
addVideolibrary = False
|
||||
downloadEnabled = False
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
matches = scrapertoolsV2.find_multiple_matches(data,
|
||||
r'(<div class="sp-head[a-z ]*?" title="Espandi">[^<>]*?</div>.*?)<div class="spdiv">\[riduci\]</div>')
|
||||
patron = r'<tr><td>(?P<title>[^<]+)<td><span [^>].+?><a [^>]+href="(?P<url>[^"]+)[^>]+>'
|
||||
sceneTitle = True
|
||||
# debug = True
|
||||
|
||||
for match in matches:
|
||||
support.log(match)
|
||||
blocks = scrapertoolsV2.find_multiple_matches(match, '(?:<p>)(.*?)(?:</p>|<br)')
|
||||
season = scrapertoolsV2.find_single_match(match, r'title="Espandi">.*?STAGIONE\s+\d+([^<>]+)').strip()
|
||||
def itemHook(item):
|
||||
item.serieFolder = True
|
||||
return item
|
||||
return locals()
|
||||
|
||||
for block in blocks:
|
||||
episode = scrapertoolsV2.find_single_match(block, r'([0-9]+(?:×|×)[0-9]+)').strip()
|
||||
seasons_n = scrapertoolsV2.find_single_match(block, r'<strong>STAGIONE\s+\d+([^<>]+)').strip()
|
||||
# debugBlock=True
|
||||
data = support.match(item.url, headers=headers).data
|
||||
folderItemlist = folder(item, scrapertools.find_single_match(data, r'TUTT[EA] L[EA] \w+\s+(?:–|-)\s+<a href="?([^" ]+)'))
|
||||
|
||||
if seasons_n:
|
||||
season = seasons_n
|
||||
patronBlock = r'(?P<block>sp-head[^>]+>\s*(?:STAGION[EI]\s*(?:(?:DA)?\s*[0-9]+\s*A)?\s*[0-9]+|MINISSERIE)(?::\s*PARTE\s*[0-9]+)? - (?P<lang>[^-<]+)(?:- (?P<quality>[^-<]+))?.*?<\/div>.*?)spdiv[^>]*>'
|
||||
patron = r'(?:/>|<p>|<strong>)(?P<other>.*?(?P<episode>[0-9]+(?:×|ÃÂ)[0-9]+)\s*(?P<title2>.*?)?(?:\s*–|\s*-|\s*<).*?)(?:<\/p>|<br)'
|
||||
def itemlistHook(itemlist):
|
||||
title_dict = {}
|
||||
itlist = []
|
||||
for i in itemlist:
|
||||
i.url = item.url
|
||||
i.title = re.sub(r'\.(\D)',' \\1', i.title)
|
||||
match = support.match(i.title, patron=r'(\d+.\d+)').match.replace('x','')
|
||||
i.order = match
|
||||
if match not in title_dict:
|
||||
title_dict[match] = i
|
||||
elif match in title_dict and i.contentLanguage == title_dict[match].contentLanguage \
|
||||
or i.contentLanguage == 'ITA' and not title_dict[match].contentLanguage \
|
||||
or title_dict[match].contentLanguage == 'ITA' and not i.contentLanguage:
|
||||
title_dict[match].url = i.url
|
||||
else:
|
||||
title_dict[match + '1'] = i
|
||||
|
||||
if not episode: continue
|
||||
for key, value in title_dict.items():
|
||||
itlist.append(value)
|
||||
|
||||
season = re.sub(r'–|–', "-", season)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType='episode',
|
||||
title="[B]" + episode + "[/B] " + season,
|
||||
fulltitle=episode + " " + season,
|
||||
show=episode + " " + season,
|
||||
url=block,
|
||||
extra=item.extra,
|
||||
thumbnail=item.thumbnail,
|
||||
infoLabels=item.infoLabels
|
||||
))
|
||||
itlist = sorted(itlist, key=lambda it: (it.contentLanguage, int(it.order)))
|
||||
|
||||
support.videolibrary(itemlist, item)
|
||||
itlist.extend(folderItemlist)
|
||||
|
||||
return itemlist
|
||||
return itlist
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
findhost()
|
||||
|
||||
if item.serieFolder:
|
||||
return support.server(item, data=item.url)
|
||||
if item.contentType == "episode":
|
||||
return findvid_serie(item)
|
||||
|
||||
def load_links(itemlist, re_txt, color, desc_txt, quality=""):
|
||||
streaming = scrapertoolsV2.find_single_match(data, re_txt).replace('"', '')
|
||||
support.log('STREAMING=', streaming)
|
||||
patron = '<td><a.*?href=(.*?) (?:target|rel)[^>]+>([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(streaming)
|
||||
def load_links(itemlist, re_txt, desc_txt, quality=""):
|
||||
streaming = scrapertools.find_single_match(data, re_txt).replace('"', '')
|
||||
logger.debug('STREAMING=', streaming)
|
||||
matches = support.match(streaming, patron = r'<td><a.*?href=([^ ]+) [^>]+>([^<]+)<').matches
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
server=scrapedtitle,
|
||||
fulltitle=item.fulltitle,
|
||||
thumbnail=item.thumbnail,
|
||||
show=item.show,
|
||||
quality=quality,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, server=scrapedtitle, quality=quality))
|
||||
|
||||
support.log()
|
||||
logger.debug()
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub('\n|\t','',data)
|
||||
|
||||
# Extract the quality format
|
||||
patronvideos = '>([^<]+)</strong></div>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
|
||||
QualityStr = ""
|
||||
for match in matches:
|
||||
QualityStr = scrapertoolsV2.decodeHtmlentities(match.group(1))[6:]
|
||||
data = re.sub('\n|\t', '', data)
|
||||
|
||||
# Estrae i contenuti - Streaming
|
||||
load_links(itemlist, '<strong>Streaming:</strong>(.*?)<tableclass=cbtable height=30>', "orange", "Streaming", "SD")
|
||||
load_links(itemlist, '<strong>Streamin?g:</strong>(.*?)cbtable', "Streaming", "SD")
|
||||
|
||||
# Estrae i contenuti - Streaming HD
|
||||
load_links(itemlist, '<strong>Streaming HD[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "yellow", "Streaming HD", "HD")
|
||||
load_links(itemlist, '<strong>Streamin?g HD[^<]+</strong>(.*?)cbtable', "Streaming HD", "HD")
|
||||
|
||||
# Estrae i contenuti - Streaming 3D
|
||||
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "pink", "Streaming 3D")
|
||||
load_links(itemlist, '<strong>Streamin?g 3D[^<]+</strong>(.*?)cbtable', "Streaming 3D")
|
||||
|
||||
return support.server(item, itemlist=itemlist)
|
||||
# Extract the quality format
|
||||
patronvideos = r'([\w.]+)</strong></div></td>'
|
||||
return support.server(item, itemlist=itemlist, patronTag=patronvideos)
|
||||
|
||||
# Estrae i contenuti - Download
|
||||
# load_links(itemlist, '<strong>Download:</strong>(.*?)<tableclass=cbtable height=30>', "aqua", "Download")
|
||||
@@ -283,94 +231,12 @@ def findvideos(item):
|
||||
|
||||
|
||||
def findvid_serie(item):
|
||||
def load_vid_series(html, item, itemlist, blktxt):
|
||||
logger.info('HTML' + html)
|
||||
patron = '<a href="([^"]+)"[^=]+="_blank"[^>]+>(.*?)</a>'
|
||||
# Estrae i contenuti
|
||||
matches = re.compile(patron, re.DOTALL).finditer(html)
|
||||
for match in matches:
|
||||
scrapedurl = match.group(1)
|
||||
scrapedtitle = match.group(2)
|
||||
# title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
server=scrapedtitle,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
logger.debug()
|
||||
data = re.sub(r'((?:<p>|<strong>)?[^\d]*\d*(?:×|Ã)[0-9]+[^<]+)', '', item.other)
|
||||
|
||||
support.log()
|
||||
|
||||
itemlist = []
|
||||
lnkblk = []
|
||||
lnkblkp = []
|
||||
|
||||
data = item.url
|
||||
|
||||
# First blocks of links
|
||||
if data[0:data.find('<a')].find(':') > 0:
|
||||
lnkblk.append(data[data.find(' - ') + 3:data[0:data.find('<a')].find(':') + 1])
|
||||
lnkblkp.append(data.find(' - ') + 3)
|
||||
else:
|
||||
lnkblk.append(' ')
|
||||
lnkblkp.append(data.find('<a'))
|
||||
|
||||
# Find new blocks of links
|
||||
patron = r'<a\s[^>]+>[^<]+</a>([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).finditer(data)
|
||||
for match in matches:
|
||||
sep = match.group(1)
|
||||
if sep != ' - ':
|
||||
lnkblk.append(sep)
|
||||
|
||||
i = 0
|
||||
if len(lnkblk) > 1:
|
||||
for lb in lnkblk[1:]:
|
||||
lnkblkp.append(data.find(lb, lnkblkp[i] + len(lnkblk[i])))
|
||||
i = i + 1
|
||||
|
||||
for i in range(0, len(lnkblk)):
|
||||
if i == len(lnkblk) - 1:
|
||||
load_vid_series(data[lnkblkp[i]:], item, itemlist, lnkblk[i])
|
||||
else:
|
||||
load_vid_series(data[lnkblkp[i]:lnkblkp[i + 1]], item, itemlist, lnkblk[i])
|
||||
|
||||
return support.server(item, itemlist=itemlist)
|
||||
return support.server(item, data=data)
|
||||
|
||||
|
||||
def play(item):
|
||||
support.log()
|
||||
itemlist = []
|
||||
### Handling new cb01 wrapper
|
||||
if host[9:] + "/film/" in item.url:
|
||||
iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
|
||||
support.log("/film/ wrapper: ", iurl)
|
||||
if iurl:
|
||||
item.url = iurl
|
||||
|
||||
if '/goto/' in item.url:
|
||||
item.url = item.url.split('/goto/')[-1].decode('base64')
|
||||
|
||||
item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw')
|
||||
|
||||
logger.debug("##############################################################")
|
||||
if "go.php" in item.url:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if "window.location.href" in data:
|
||||
try:
|
||||
data = scrapertoolsV2.find_single_match(data, 'window.location.href = "([^"]+)";')
|
||||
except IndexError:
|
||||
data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
|
||||
data, c = unshortenit.unwrap_30x_only(data)
|
||||
else:
|
||||
data = scrapertoolsV2.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
|
||||
|
||||
logger.debug("##### play go.php data ##\n%s\n##" % data)
|
||||
else:
|
||||
data = support.swzz_get_url(item)
|
||||
|
||||
return servertools.find_video_items(data=data)
|
||||
logger.debug()
|
||||
return servertools.find_video_items(item, data=item.url)
|
||||
|
||||
87
channels/cinemalibero.json
Normal file → Executable file
87
channels/cinemalibero.json
Normal file → Executable file
@@ -2,85 +2,10 @@
|
||||
"id": "cinemalibero",
|
||||
"name": "Cinemalibero",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
|
||||
"banner": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
|
||||
"categories": ["tvshow", "movie","anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "https://www.cinemalibero.fun/",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Anime",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
"active": false,
|
||||
"thumbnail": "cinemalibero.png",
|
||||
"banner": "cinemalibero.png",
|
||||
"categories": ["movie","tvshow","anime"],
|
||||
"not_active": ["include_in_newest_anime", "include_in_newest_peliculas"],
|
||||
"settings": []
|
||||
}
|
||||
|
||||
550
channels/cinemalibero.py
Normal file → Executable file
550
channels/cinemalibero.py
Normal file → Executable file
@@ -1,61 +1,192 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per CinemaLibero - First Version
|
||||
# Canale per 'cinemaLibero'
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
from core import scrapertools, servertools, httptools, support
|
||||
from core import tmdb
|
||||
from core import httptools, support, scrapertools
|
||||
from core.item import Item
|
||||
from lib import unshortenit
|
||||
from platformcode import config
|
||||
from platformcode import logger
|
||||
from specials import autoplay
|
||||
import channelselector
|
||||
from core.support import typo
|
||||
from platformcode import config, logger
|
||||
import sys
|
||||
|
||||
# Necessario per Autoplay
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['akstream', 'wstream', 'openload', 'streamango']
|
||||
list_quality = ['default']
|
||||
from platformcode.logger import debug
|
||||
if sys.version_info[0] >= 3:
|
||||
from concurrent import futures
|
||||
else:
|
||||
from concurrent_py2 import futures
|
||||
|
||||
# Necessario per Verifica Link
|
||||
checklinks = config.get_setting('checklinks', 'cinemalibero')
|
||||
checklinks_number = config.get_setting('checklinks_number', 'cinemalibero')
|
||||
|
||||
__channel__ = "cinemalibero"
|
||||
host = config.get_channel_url(__channel__)
|
||||
# rimanda a .today che contiene tutti link a .plus
|
||||
# def findhost(url):
|
||||
# permUrl = httptools.downloadpage('https://www.cinemalibero.online/', follow_redirects=False).headers
|
||||
# try:
|
||||
# import urlparse
|
||||
# except:
|
||||
# import urllib.parse as urlparse
|
||||
# p = list(urlparse.urlparse(permUrl['location'].replace('https://www.google.com/search?q=site:', '')))
|
||||
# if not p[0]:
|
||||
# p[0] = 'https'
|
||||
# return urlparse.urlunparse(p)
|
||||
|
||||
host = config.get_channel_url()
|
||||
headers = [['Referer', host]]
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
logger.info('[cinemalibero.py] mainlist')
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality) # Necessario per Autoplay
|
||||
|
||||
# Menu Principale
|
||||
itemlist = []
|
||||
support.menu(itemlist, 'Film bold', 'video', host+'/category/film/')
|
||||
support.menu(itemlist, 'Generi submenu', 'genres', host)
|
||||
support.menu(itemlist, 'Cerca film submenu', 'search', host)
|
||||
support.menu(itemlist, 'Serie TV bold', 'video', host+'/category/serie-tv/', contentType='episode')
|
||||
support.menu(itemlist, 'Anime submenu', 'video', host+'/category/anime-giapponesi/', contentType='episode')
|
||||
support.menu(itemlist, 'Cerca serie submenu', 'search', host, contentType='episode')
|
||||
support.menu(itemlist, 'Sport bold', 'video', host+'/category/sport/')
|
||||
film = ['/category/film/',
|
||||
('Novità', ['', 'peliculas', 'update']),
|
||||
('Generi', ['', 'genres'])]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist) # Necessario per Autoplay (Menu Configurazione)
|
||||
tvshow = ['/category/serie-tv/']
|
||||
|
||||
support.channel_config(item, itemlist)
|
||||
|
||||
return itemlist
|
||||
anime = ['/category/anime-giapponesi/']
|
||||
|
||||
## Sport = [(support.typo('Sport', 'bullet bold'), ['/category/sport/', 'peliculas', 'sport', 'tvshow'])]
|
||||
news = [('Ultimi episodi Serie/Anime', ['/aggiornamenti-serie-tv/', 'peliculas', 'update', 'tvshow'])]
|
||||
|
||||
search = ''
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
# debug = True
|
||||
action = 'check'
|
||||
patronBlock = r'<div class="container">.*?class="col-md-12[^"]*?">(?P<block>.*?)<div class=(?:"container"|"bg-dark ")>'
|
||||
if item.args == 'newest':
|
||||
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>(?P<title>[^<]+)<[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>.+?(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
|
||||
pagination = 25
|
||||
elif item.contentType == 'movie':
|
||||
# action = 'findvideos'
|
||||
patron = r'<a href="(?P<url>[^"]+)" title="(?P<title>.+?)(?:[ ]\[(?P<lang>[sSuUbB\-iItTaA]+)\])?(?:[ ]\((?P<year>\d{4})?\))?"\s*alt="[^"]+"\s*class="[^"]+"(?: style="background-image: url\((?P<thumb>.+?)\)">)?\s*<div class="voto">[^>]+>[^>]+>.(?P<rating>[\d\.a-zA-Z\/]+)?[^>]+>[^>]+>[^>]+>(?:<div class="genere">(?P<quality>[^<]+)</div>)?'
|
||||
if item.args == 'update':
|
||||
patronBlock = r'<section id="slider">(?P<block>.*?)</section>'
|
||||
patron = r'<a href="(?P<url>(?:https:\/\/.+?\/(?P<title>[^\/]+[a-zA-Z0-9\-]+)(?P<year>\d{4})?))/".+?url\((?P<thumb>[^\)]+)\)">'
|
||||
elif item.contentType == 'tvshow':
|
||||
# action = 'episodios'
|
||||
if item.args == 'update':
|
||||
patron = r'<a href="(?P<url>[^"]+)"[^<]+?url\((?P<thumb>.+?)\)">\s*?<div class="titolo">(?P<title>.+?)(?: – Serie TV)?(?:\([sSuUbBiItTaA\-]+\))?[ ]?(?P<year>\d{4})?</div>\s*?(?:<div class="genere">)?(?:[\w]+?\.?\s?[\s|S]?[\dx\-S]+?\s\(?(?P<lang>[iItTaA]+|[sSuUbBiItTaA\-]+)\)?\s?(?P<quality>[HD]+)?|.+?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?</div>)'
|
||||
pagination = 25
|
||||
else:
|
||||
patron = r'<a href="(?P<url>[^"]+)"\s*title="(?P<title>[^"\(]+)(?:"|\()(?:(?P<year>\d+)[^"]+)?.*?url\((?P<thumb>[^\)]+)\)(?:.*?<div class="voto">[^>]+>[^>]+>\s*(?P<rating>[^<]+))?.*?<div class="titolo">[^>]+>(?:<div class="genere">[^ ]*(?:\s\d+)?\s*(?:\()?(?P<lang>[^\)< ]+))?'
|
||||
else:
|
||||
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>.+?)(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
|
||||
|
||||
def itemHook(item):
|
||||
if 'sub' in item.contentLanguage.lower() and not 'ita' in item.contentLanguage.lower():
|
||||
item.contentLanguage= 'Sub-ITA'
|
||||
item.title = re.sub('[Ss]ub(?:-)?', item.contentLanguage, item.title)
|
||||
if item.lang2:
|
||||
if len(item.lang2)<3:
|
||||
item.lang2 = 'ITA'
|
||||
item.contentLanguage = item.lang2
|
||||
item.title += support.typo(item.lang2, '_ [] color kod')
|
||||
if item.args == 'update':
|
||||
item.title = item.title.replace('-', ' ')
|
||||
# if item.args == 'search':
|
||||
# item.contentType = 'tvshow' if 'serie-' in item.url else 'movie'
|
||||
|
||||
return item
|
||||
|
||||
patronNext = r'<a class="next page-numbers".*?href="([^"]+)">'
|
||||
return locals()
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
data = item.data
|
||||
# debug=True
|
||||
|
||||
if item.args == 'anime':
|
||||
logger.debug("Anime :", item)
|
||||
patron = r'<a target=(?P<url>[^>]+>(?P<title>Episodio\s(?P<episode>\d+))(?::)?(?:(?P<title2>[^<]+))?.*?(?:<br|</p))|(?P<data>.+)'
|
||||
patronBlock = r'(?:Stagione (?P<season>\d+))?(?:</span><br />|</span></p>|strong></p>)(?P<block>.*?)(?:<div style="margin-left|<span class="txt_dow">)'
|
||||
item.contentType = 'tvshow'
|
||||
elif item.args == 'sport':
|
||||
logger.debug("Sport :", item)
|
||||
patron = r'(?:/>|<p>)\s*(?P<title>[^-]+)-(?P<data>.+?)(?:<br|</p)'
|
||||
patronBlock = r'</strong>\s*</p>(?P<block>.*?</p>)'
|
||||
item.contentType = 'tvshow'
|
||||
elif item.args == 'serie' or item.contentType == 'tvshow':
|
||||
logger.debug("Serie :", item)
|
||||
patron = r'(?:/>|<p>)\s*(?:(?P<episode>\d+(?:x|×|×)\d+|Puntata \d+)(?:-(?P<episode2>\d+))?[;]?[ ]?(?P<title>[^<–-]+))?(?P<data>.*?)(?:<br|</p)'
|
||||
patronBlock = r'Stagione\s(?:[Uu]nica)?(?:(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?.*?</strong>(?P<block>.+?)(?:strong>|<div class="at-below)'
|
||||
item.contentType = 'tvshow'
|
||||
else:
|
||||
patron = r'(?P<title>\s*[0-9]{2}/[0-9]{2}/[0-9]{4})(?P<data>.*?)(?:<br|</p)'
|
||||
|
||||
def itemHook(it):
|
||||
if not scrapertools.find_single_match(it.title, r'(\d+x\d+)'):
|
||||
it.title = re.sub(r'(\d+) -', '1x\\1', it.title)
|
||||
return it
|
||||
|
||||
def itemlistHook(itl):
|
||||
ret = []
|
||||
if item.args == 'sport':
|
||||
return itl
|
||||
# support.dbg()
|
||||
for it in itl:
|
||||
ep = scrapertools.find_single_match(it.title, r'(\d+x\d+)')
|
||||
if not ep and 'http' in it.data: # stagione intera
|
||||
# from lib import unshortenit
|
||||
# data = unshortenit.findlinks(it.data)
|
||||
episodes = {}
|
||||
|
||||
def get_ep(s):
|
||||
srv_mod = __import__('servers.%s' % s.server, None, None, ["servers.%s" % s.server])
|
||||
if hasattr(srv_mod, 'get_filename'):
|
||||
title = srv_mod.get_filename(s.url)
|
||||
if item.args == 'anime':
|
||||
ep = title
|
||||
else:
|
||||
ep = scrapertools.get_season_and_episode(title)
|
||||
if ep:
|
||||
if ep not in episodes:
|
||||
episodes[ep] = []
|
||||
episodes[ep].append(s)
|
||||
|
||||
servers = support.server(item, it.data, CheckLinks=False, Download=False, Videolibrary=False)
|
||||
# for s in servers:
|
||||
# get_ep(s)
|
||||
# ottengo l'episodio dal nome del file
|
||||
with futures.ThreadPoolExecutor() as executor:
|
||||
for s in servers:
|
||||
executor.submit(get_ep, s)
|
||||
# logger.debug(it.contentLanguage)
|
||||
if item.args != 'anime':
|
||||
for ep in episodes:
|
||||
ret.append(it.clone(title=typo(ep, 'bold') + typo(it.contentLanguage, '_ [] color kod bold'),
|
||||
servers=[srv.tourl() for srv in episodes[ep]], contentSeason=int(ep.split('x')[0]), contentEpisodeNumber=int(ep.split('x')[1])))
|
||||
else:
|
||||
ret.extend([it.clone(title=typo(ep, 'bold') + typo(it.contentLanguage, '_ [] color kod bold'),
|
||||
servers=[srv.tourl() for srv in episodes[ep]]) for ep in episodes])
|
||||
elif ep:
|
||||
ret.append(it)
|
||||
return sorted(ret, key=lambda i: i.title)
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def genres(item):
|
||||
action='peliculas'
|
||||
patron_block=r'<div id="bordobar" class="dropdown-menu(?P<block>.*?)</li>'
|
||||
patronMenu=r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"'
|
||||
|
||||
return locals()
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[cinemalibero.py] " + item.url + " search " + texto)
|
||||
logger.debug(item.url,texto)
|
||||
texto = texto.replace(' ', '+')
|
||||
item.url = host + "/?s=" + texto
|
||||
# item.contentType = 'tv'
|
||||
item.args = 'search'
|
||||
try:
|
||||
return video(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -63,264 +194,99 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def genres(item):
|
||||
return support.scrape(item, patron_block=r'<div id="bordobar" class="dropdown-menu(.*?)</li>', patron=r'<a class="dropdown-item" href="([^"]+)" title="([A-z]+)"', listGroups=['url', 'title'], action='video')
|
||||
|
||||
|
||||
def video(item):
|
||||
logger.info('[cinemalibero.py] video')
|
||||
def newest(categoria):
|
||||
logger.debug('newest ->', categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.args = 'newest'
|
||||
try:
|
||||
if categoria == 'series' or categoria == 'anime':
|
||||
item.args = 'update'
|
||||
item.url = host+'/aggiornamenti-serie-tv/'
|
||||
item.contentType = 'tvshow'
|
||||
item.action = 'peliculas'
|
||||
itemlist = peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error('newest log: ', (line))
|
||||
return []
|
||||
|
||||
if host not in item.url:
|
||||
item.url = host + item.url
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')
|
||||
block = scrapertools.find_single_match(data, '<div class="container">.*?class="col-md-12[^"]*?">(.*?)<div class=(?:"container"|"bg-dark ")>')
|
||||
|
||||
# Estrae i contenuti
|
||||
matches = re.compile(r'<div class="col-lg-3">(.*?)<\/a><\/div>', re.DOTALL).findall(block)
|
||||
|
||||
for match in matches:
|
||||
url = scrapertools.find_single_match(match, r'href="([^"]+)"')
|
||||
long_title = scrapertools.find_single_match(match, r'<div class="titolo">([^<]+)<\/div>')
|
||||
thumb = scrapertools.find_single_match(match, r'url=\((.*?)\)')
|
||||
quality = scrapertools.find_single_match(match, r'<div class="voto">([^<]+)<\/div>')
|
||||
genere = scrapertools.find_single_match(match, r'<div class="genere">([^<]+)<\/div>')
|
||||
|
||||
year = scrapertools.find_single_match(long_title, r'\(([0-9)]+)') or scrapertools.find_single_match(long_title, r'\) ([0-9)]+)')
|
||||
lang = scrapertools.find_single_match(long_title, r'\(([a-zA-Z)]+)')
|
||||
|
||||
title = re.sub(r'\(.*','',long_title)
|
||||
title = re.sub(r'(?:\(|\))','',title)
|
||||
if genere:
|
||||
genere = ' - [' + genere + ']'
|
||||
if year:
|
||||
long_title = title + ' - ('+ year + ')' + genere
|
||||
if lang:
|
||||
long_title = '[B]' + title + '[/B]' + ' - ('+ lang + ')' + genere
|
||||
else:
|
||||
long_title = '[B]' + title + '[/B]'
|
||||
|
||||
# Seleziona fra Serie TV e Film
|
||||
if item.contentType == 'movie':
|
||||
tipologia = 'movie'
|
||||
action = 'findvideos'
|
||||
elif item.contentType == 'episode':
|
||||
tipologia = 'tv'
|
||||
action = 'episodios'
|
||||
else:
|
||||
tipologia = 'movie'
|
||||
action = 'select'
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action=action,
|
||||
contentType=item.contentType,
|
||||
title=long_title,
|
||||
fulltitle=title,
|
||||
quality=quality,
|
||||
url=url,
|
||||
thumbnail=thumb,
|
||||
infoLabels={'year': year},
|
||||
show=title))
|
||||
|
||||
# Next page
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers".*?href="([^"]+)">')
|
||||
|
||||
if next_page != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='[B]' + config.get_localized_string(30992) + ' »[/B]',
|
||||
url=next_page,
|
||||
contentType=item.contentType,
|
||||
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
def select(item):
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<\/div>')
|
||||
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
|
||||
logger.info('select = ### è una serie ###')
|
||||
return episodios(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
extra='serie',
|
||||
contentType='episode'))
|
||||
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
|
||||
if re.findall('episodio', block, re.IGNORECASE):
|
||||
logger.info('select = ### è un anime ###')
|
||||
return episodios(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
extra='anime',
|
||||
contentType='episode'))
|
||||
def check(item):
|
||||
data = support.match(item.url, headers=headers).data
|
||||
|
||||
if data:
|
||||
ck = str(support.match(data, patronBlock=r'Genere:(.*?)</span>', patron=r'tag">([^<]+)').matches).lower()
|
||||
|
||||
if 'serie tv' in ck or 'anime' in ck or 'wrestling wwe' in ck :# in ['serie tv', 'wrestling wwe', 'anime']:
|
||||
if 'anime' in ck:
|
||||
item.args = 'anime'
|
||||
elif 'sport' in ck or 'wrestling' in ck:
|
||||
item.args = 'sport'
|
||||
else:
|
||||
item.args = 'serie'
|
||||
item.contentType = 'tvshow'
|
||||
item.data = data
|
||||
itemlist = episodios(item)
|
||||
if not itemlist:
|
||||
item.data = data
|
||||
return findvideos(item)
|
||||
else:
|
||||
logger.info('select = ### è un film ###')
|
||||
return findvideos(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
contentType='movie'))
|
||||
item.contentType = 'movie'
|
||||
item.data = data
|
||||
# item.action = 'findvideos'
|
||||
return findvideos(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
def filter_ep(s):
|
||||
srv_mod = __import__('servers.%s' % s.server, None, None, ["servers.%s" % s.server])
|
||||
if hasattr(srv_mod, 'get_filename'):
|
||||
title = srv_mod.get_filename(s.url)
|
||||
# support.dbg()
|
||||
if scrapertools.get_season_and_episode(title) == str(item.contentSeason) + "x" + str(
|
||||
item.contentEpisodeNumber).zfill(2):
|
||||
servers.append(s)
|
||||
logger.debug()
|
||||
# support.dbg()
|
||||
if item.servers:
|
||||
return support.server(item, itemlist=[Item().fromurl(s) for s in item.servers])
|
||||
if not item.data:
|
||||
item.data = httptools.downloadpage(item.url)
|
||||
data = scrapertools.find_single_match(item.data, '<div class="at-above-post addthis_tool"(.*?)(?:<div class="at-below-post|[dD][oO][wW][nN][lL][oO][aA][dD])')
|
||||
if data:
|
||||
item.data = data
|
||||
|
||||
servers = []
|
||||
# if item.args == 'anime':
|
||||
# if item.urls: # this is a episode
|
||||
# return support.server(item, itemlist=[Item(url=support.unshortenit.FileCrypt().unshorten(u)) for u in item.urls])
|
||||
# itemlist = []
|
||||
# episodes = {}
|
||||
# for uri in support.unshortenit.FileCrypt().find(item.data):
|
||||
# for ep in support.unshortenit.FileCrypt(uri).list_files():
|
||||
# ep = ('.'.join(ep[0].split('.')[:-1]), ep[1]) # remove extension
|
||||
# if not ep[0] in episodes:
|
||||
# episodes[ep[0]] = []
|
||||
# episodes[ep[0]].append(ep[1])
|
||||
# for ep in episodes.keys():
|
||||
# itemlist.append(item.clone(title=ep, urls=episodes[ep], action='findvideos', data=''))
|
||||
# return itemlist
|
||||
total_servers = support.server(item, data=item.data)
|
||||
|
||||
if item.contentType == 'episode' and len(set([srv.server for srv in total_servers])) < len([srv.server for srv in total_servers]):
|
||||
# i link contengono più puntate, cerco quindi quella selezionata
|
||||
with futures.ThreadPoolExecutor() as executor:
|
||||
for s in total_servers:
|
||||
if s.server:
|
||||
executor.submit(filter_ep, s)
|
||||
else:
|
||||
servers.append(s)
|
||||
return servers
|
||||
else:
|
||||
logger.info('select = ### è un film ###')
|
||||
return findvideos(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
contentType='movie'))
|
||||
|
||||
|
||||
def findvideos(item): # Questa def. deve sempre essere nominata findvideos
|
||||
logger.info('[cinemalibero.py] findvideos')
|
||||
itemlist = []
|
||||
|
||||
if item.args == 'direct':
|
||||
return servertools.find_video_items(item)
|
||||
|
||||
if item.contentType == 'episode':
|
||||
data = item.url.lower()
|
||||
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>*?<\/h2>(.*?)<\/div>')
|
||||
urls = re.findall('<a.*?href="([^"]+)"', block, re.DOTALL)
|
||||
else:
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = re.sub(r'\n|\t','',data).lower()
|
||||
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>(.*?)<strong>')
|
||||
urls = re.findall('<a href="([^"]+)".*?class="external"', block, re.DOTALL)
|
||||
|
||||
logger.info('URLS'+ str(urls))
|
||||
if urls:
|
||||
data =''
|
||||
for url in urls:
|
||||
url, c = unshortenit.unshorten(url)
|
||||
data += url + '\n'
|
||||
|
||||
logger.info('DATA'+ data)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle + ' - [COLOR limegreen][[/COLOR]'+videoitem.title+' [COLOR limegreen]][/COLOR]'
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
# Link Aggiungi alla Libreria
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR lightblue][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action='add_pelicula_to_library', extra='findservers', contentTitle=item.contentTitle))
|
||||
|
||||
# Necessario per filtrare i Link
|
||||
if checklinks:
|
||||
itemlist = servertools.check_list_links(itemlist, checklinks_number)
|
||||
|
||||
# Necessario per FilterTools
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Necessario per AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item): # Questa def. deve sempre essere nominata episodios
|
||||
logger.info('[cinemalibero.py] episodios')
|
||||
itemlist = []
|
||||
extra = ''
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)at-below-post')
|
||||
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
|
||||
# logger.info('select = ### è una serie ###')
|
||||
extra='serie'
|
||||
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
|
||||
if re.findall('episodi', block, re.IGNORECASE):
|
||||
# logger.info('select = ### è un anime ###')
|
||||
extra='anime'
|
||||
|
||||
block = re.sub(r'<h2>.*?<\/h2>','',block)
|
||||
block = block.replace('<p>','').replace('<p style="text-align: left;">','').replace('–<','<').replace('-<','<').replace('–<','<').replace('– <','<').replace('<strong>','<stop><start><strong>')+'<stop>'
|
||||
block = re.sub(r'stagione completa.*?<\/p>','',block,flags=re.IGNORECASE)
|
||||
|
||||
|
||||
if extra == 'serie':
|
||||
block = block.replace('<br /> <a','<a')
|
||||
matches = re.compile(r'<start>.*?(?:stagione|Stagione)(.*?)<\/(?:strong|span)><\/p>(.*?)<stop>', re.DOTALL).findall(block)
|
||||
|
||||
if not matches:
|
||||
matches = scrapertools.find_multiple_matches(block, r'<a href="([^"]+)"[^>]+>(Episodio [0-9]+)</a>')
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = re.sub(r'Episodio ([0-9]+)', r'Episodio 1x\1', scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType='episode',
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
show=item.fulltitle,
|
||||
url=scrapedurl,
|
||||
args='direct'))
|
||||
else:
|
||||
for lang, html in matches:
|
||||
lang = re.sub('<.*?>','',lang)
|
||||
html = html.replace('<br />','\n').replace('</p>', '\n')
|
||||
|
||||
matches = re.compile(r'([^<]+)([^\n]+)\n', re.DOTALL).findall(html)
|
||||
for scrapedtitle, html in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType='episode',
|
||||
title=scrapedtitle + ' - (' + lang + ')',
|
||||
fulltitle=scrapedtitle,
|
||||
show=item.fulltitle,
|
||||
url=html))
|
||||
|
||||
elif extra == 'anime':
|
||||
block = re.sub(r'<start.*?(?:download:|Download:).*?<stop>','',block)
|
||||
block = re.sub(r'(?:mirror|Mirror)[^<]+<','',block)
|
||||
block = block.replace('<br />','\n').replace('/a></p>','\n')
|
||||
block = re.sub(r'<start.*?(?:download|Download).*?\n','',block)
|
||||
matches = re.compile('<a(.*?)\n', re.DOTALL).findall(block)
|
||||
for html in matches:
|
||||
scrapedtitle = scrapertools.find_single_match(html, r'>(.*?)<\/a>')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType='episode',
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
show=item.fulltitle,
|
||||
url=html))
|
||||
|
||||
else:
|
||||
logger.info('select = ### è un film ###')
|
||||
return findvideos(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
show=item.fulltitle,
|
||||
contentType='movie'))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
return total_servers
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
{
|
||||
"id": "cinemastreaming",
|
||||
"name": "Cinemastreaming",
|
||||
"language": ["ita"],
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"thumbnail": "https://www.telegramitalia.it/wp-content/uploads/2018/02/IMG_20180222_214809_805.jpg",
|
||||
"banner": "https://www.telegramitalia.it/wp-content/uploads/2018/02/IMG_20180222_214809_805.jpg",
|
||||
"categories": ["tvshow", "movie"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per cinemastreaming
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
|
||||
from core import scrapertools, httptools, scrapertoolsV2, support
|
||||
from core.item import Item
|
||||
from specials import autoplay
|
||||
from platformcode import config
|
||||
|
||||
__channel__ = "cinemastreaming"
|
||||
host = config.get_channel_url(__channel__)
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango']
|
||||
list_quality = ['1080p', '1080p 3D', 'SD', 'CAM', 'default']
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
support.log()
|
||||
|
||||
# Menu Principale
|
||||
itemlist = []
|
||||
support.menu(itemlist, 'Film bold', 'peliculas', host + '/film/')
|
||||
support.menu(itemlist, 'Per genere submenu', 'menu', host, args="Film per Genere")
|
||||
support.menu(itemlist, 'Anime bold', 'peliculas', host + '/category/anime/')
|
||||
support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serie-tv/', contentType='episode')
|
||||
support.menu(itemlist, 'Ultime Uscite submenu', 'peliculas', host + "/stagioni/", "episode", args='latests')
|
||||
support.menu(itemlist, 'Ultimi Episodi submenu', 'peliculas_latest_ep', host + "/episodi/", "episode", args='lateste')
|
||||
support.menu(itemlist, '[COLOR blue]Cerca...[/COLOR]', 'search')
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
support.log()
|
||||
list_groups = ["url", "thumb", "title", "year", "rating", "duration"]
|
||||
|
||||
patron = r'<article.*?"TPost C".*?href="([^"]+)".*?img.*?src="([^"]+)".*?<h3.*?>([^<]+).*?Year">'
|
||||
|
||||
if item.args == "latests":
|
||||
patron += r'([^<]+)'
|
||||
else:
|
||||
patron += r'(\d{4}).*?AAIco-star.*?>([^<]+).*?AAIco-access_time">([^<]+).*?Qlty'
|
||||
|
||||
patron_next = r'page-numbers current.*?href="([^"]+)"'
|
||||
|
||||
if item.contentType == "movie":
|
||||
patron += r'\">([^<]+)'
|
||||
list_groups.append("quality")
|
||||
|
||||
action = "findvideos" if item.contentType == "movie" else "episodios"
|
||||
|
||||
return support.scrape(item, patron, list_groups, patronNext=patron_next, action=action)
|
||||
|
||||
|
||||
def peliculas_latest_ep(item):
|
||||
|
||||
patron = r'<article.*?"TPost C".*?href="([^"]+)".*?img.*?src="([^"]+)"'
|
||||
patron += r'.*?class="ClB">([^<]+)<\/span>([^<]+).*?<h3.*?>([^<]+)'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for scrapedurl, scrapedthumbnail, scrapednum, scrapedep, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType=item.contentType,
|
||||
title="[B]" + scrapednum + "[/B]" + scrapedep + " - " + scrapedtitle,
|
||||
fulltitle=scrapedep + " " + scrapedtitle,
|
||||
show=scrapedep + " " + scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra=item.extra,
|
||||
thumbnail="http:" + scrapedthumbnail,
|
||||
infoLabels=item.infoLabels
|
||||
))
|
||||
|
||||
support.nextPage(itemlist, item, data, r'page-numbers current.*?href="([^"]+)"')
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_menu(item):
|
||||
itemlist = peliculas(item)
|
||||
return itemlist[:-1]
|
||||
|
||||
|
||||
def episodios(item):
|
||||
patron = r'<td class="MvTbTtl"><a href="([^"]+)">(.*?)<\/a>.*?>\d{4}<'
|
||||
list_groups = ["url", "title", "year"]
|
||||
|
||||
itemlist = support.scrape(item, patron, list_groups)
|
||||
|
||||
for itm in itemlist:
|
||||
fixedtitle = scrapertools.get_season_and_episode(itm.url)
|
||||
itm.title = fixedtitle + " - " + itm.title
|
||||
itm.fulltitle = fixedtitle + " - " + itm.fulltitle
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu(item):
|
||||
patron_block = r'<ul class="sub-menu">.*?</ul>'
|
||||
patron = r'menu-category-list"><a href="([^"]+)">([^<]+)<'
|
||||
list_groups = ["url", "title"]
|
||||
|
||||
return support.scrape(item, patron, list_groups, blacklist="Anime", action="peliculas_menu", patron_block=patron_block)
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
support.log("s=", texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except Exception, e:
|
||||
import traceback
|
||||
traceback.print_stack()
|
||||
support.log(str(e))
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
support.log("newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "series":
|
||||
item.url = host + "/episodi/"
|
||||
item.action = "peliculas"
|
||||
item.args = "lateste"
|
||||
item.contentType = "episode"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except Exception, e:
|
||||
import traceback
|
||||
traceback.print_stack()
|
||||
support.log(str(e))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
|
||||
if item.quality.lower() in ["ended", "canceled", "returning series"]:
|
||||
return episodios(item)
|
||||
|
||||
itemlist = []
|
||||
data = scrapertoolsV2.decodeHtmlentities(httptools.downloadpage(item.url).data)
|
||||
btns = re.compile(r'data-tplayernv="Opt.*?><span>([^<]+)</span><span>([^<]+)</span>', re.DOTALL).findall(data)
|
||||
matches = re.compile(r'<iframe.*?src="([^"]+trembed=[^"]+)', re.DOTALL).findall(data)
|
||||
for i, scrapedurl in enumerate(matches):
|
||||
|
||||
scrapedurl = scrapertoolsV2.decodeHtmlentities(scrapedurl)
|
||||
patron = r'<iframe.*?src="([^"]+)"'
|
||||
link_data = httptools.downloadpage(scrapedurl).data
|
||||
url = scrapertoolsV2.find_single_match(link_data, patron)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
contentType=item.contentType,
|
||||
title="[B]" + btns[i][0] + "[/B] - " + btns[i][1],
|
||||
fulltitle=btns[i][0] + " " + btns[i][1],
|
||||
show=btns[i][0] + " " + btns[i][1],
|
||||
url=url,
|
||||
extra=item.extra,
|
||||
infoLabels=item.infoLabels,
|
||||
server=btns[i][0],
|
||||
contentQuality=btns[i][1].replace('Italiano - ', ''),
|
||||
))
|
||||
|
||||
if item.contentType == "movie":
|
||||
support.videolibrary(itemlist, item)
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
35
channels/cinetecadibologna.json
Normal file → Executable file
35
channels/cinetecadibologna.json
Normal file → Executable file
@@ -1,36 +1,11 @@
|
||||
{
|
||||
"id": "cinetecadibologna",
|
||||
"name": "Cinetecadibologna",
|
||||
"name": "Cineteca di Bologna",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://cinestore.cinetecadibologna.it/pics/logo.gif",
|
||||
"banner": "http://cinestore.cinetecadibologna.it/pics/logo.gif",
|
||||
"thumbnail": "cinetecadibologna.png",
|
||||
"banner": "cinetecadibologna.png",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Documentari",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
"not_active":["include_in_newest_peliculas", "include_in_newest_series", "include_in_newest_anime", "include_in_global_search"],
|
||||
"settings": []
|
||||
}
|
||||
|
||||
189
channels/cinetecadibologna.py
Normal file → Executable file
189
channels/cinetecadibologna.py
Normal file → Executable file
@@ -1,155 +1,74 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale per cinetecadibologna
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
host = "http://cinestore.cinetecadibologna.it"
|
||||
from core import support
|
||||
|
||||
host = support.config.get_channel_url()
|
||||
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
logger.info("kod.cinetecadibologna mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Elenco Film - Cineteca di Bologna[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/video/alfabetico_completo" % host,
|
||||
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Epoche - Cineteca di Bologna[/COLOR]",
|
||||
action="epoche",
|
||||
url="%s/video/epoche" % host,
|
||||
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Percorsi Tematici - Cineteca di Bologna[/COLOR]",
|
||||
action="percorsi",
|
||||
url="%s/video/percorsi" % host,
|
||||
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif")]
|
||||
|
||||
return itemlist
|
||||
film = ['/video/alfabetico_completo',
|
||||
('Anni',['/video/epoche', 'menu']),
|
||||
('Registi',['/video/registi', 'menu']),
|
||||
('Attori',['/video/attori', 'menu']),
|
||||
('Percorsi Tematici',['/video/percorsi','menu'])]
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
action = 'peliculas'
|
||||
if 'epoche' in item.url:
|
||||
patronMenu =r'<li>\s*<a href="(?P<url>[^"]+)">(?P<title>[^>]+)<'
|
||||
elif 'percorsi' in item.url:
|
||||
patron = r'<div class="cover_percorso">\s*<a href="(?P<url>[^"]+)">\s*<img src="(?P<thumb>[^"]+)"[^>]+>\s*[^>]+>(?P<title>.*?)<'
|
||||
else:
|
||||
patron = r'<h2>\s*<a href="(?P<url>[^,"]+),[^"]+"\s*>(?P<title>[^<]+)<'
|
||||
patronNext = r'<div class="dx">\s*<a href="(.*?)">pagina suc'
|
||||
return locals()
|
||||
|
||||
|
||||
def search(item, text):
|
||||
support.info(text)
|
||||
item.args = 'noorder'
|
||||
item.url = host + '/ricerca/type_ALL/ricerca_' + text
|
||||
item.contentType = 'movie'
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
logger.info("kod.cinetecadibologna peliculas")
|
||||
itemlist = []
|
||||
if 'alfabetico' in item.url:
|
||||
patron = r'<img src="(?P<thumb>[^"]+)"[^>]+>\s*[^>]+>\s*<div[^>]+>\s*<div[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>(?:\[)?(?P<title>[^\]<]+)(?:\]|<)'
|
||||
else:
|
||||
if 'type_ALL' in item.url: patronBlock = r'Video:(?P<block>.*?)(?:<div class=""|<!--)'
|
||||
elif not 'NomePersona' in item.url: patronBlock = r'<h3>Film</h3>(?P<block>.*?)<div class="list_wrapper'
|
||||
patron = r'<a href="(?P<url>[^"]+)"\s*class="[^"]+"\s*title="(?:\[)?(?P<title>[^\]"]+)(?:\])?"\s*rel="(?P<thumb>[^"]+)"'
|
||||
patronNext = r'<div class="dx">\s*<a href="(.*?)">pagina suc'
|
||||
return locals()
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<img src="([^"]+)"[^>]+>\s*[^>]+>\s*<div[^>]+>\s*<div[^>]+>[^>]+>\s*<a href="([^"]+)"[^>]+>(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
scrapedurl = host + scrapedurl
|
||||
if not "/video/" in scrapedurl:
|
||||
continue
|
||||
html = scrapertools.cache_page(scrapedurl)
|
||||
start = html.find("Sinossi:")
|
||||
end = html.find('<div class="sx_col">', start)
|
||||
scrapedplot = html[start:end]
|
||||
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle,
|
||||
title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<div class="footerList clearfix">\s*<div class="sx">\s*[^>]+>[^g]+gina[^>]+>\s*[^>]+>\s*<div class="dx">\s*<a href="(.*?)">pagina suc'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url= scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def epoche(item):
|
||||
logger.info("kod.cinetecadibologna categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.find_single_match(data, '<h1 class="pagetitle">Epoche</h1>(.*?)</ul>')
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<a href="([^"]+)">(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedplot = ""
|
||||
if scrapedtitle.startswith(("'")):
|
||||
scrapedtitle = scrapedtitle.replace("'", "Anni '")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://www.cinetecadibologna.it/pics/cinema-ritrovato-alcinema.png",
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
def percorsi(item):
|
||||
logger.info("kod.cinetecadibologna categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = '<div class="cover_percorso">\s*<a href="([^"]+)">\s*<img src="([^"]+)"[^>]+>\s*[^>]+>(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.cinetecadibologna findvideos")
|
||||
support.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
matches = support.match(item, patron=r'filename: "(.*?)"').matches
|
||||
|
||||
patron = 'filename: "(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url in matches:
|
||||
itemlist.append(item.clone(action="play", title=support.config.get_localized_string(30137), server='directo', url=host + url))
|
||||
|
||||
for video in matches:
|
||||
video = host + video
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
|
||||
url=video,
|
||||
folder=False))
|
||||
|
||||
return itemlist
|
||||
return support.server(item, itemlist=itemlist)
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"id": "cinetemagay",
|
||||
"name": "Cinetemagay",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "cinetemagay.png",
|
||||
"banner": "cinetemagay.png",
|
||||
"categories": [
|
||||
"adult"
|
||||
]
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
IMAGES_PATH = os.path.join(config.get_runtime_path(), 'resources', 'images', 'cinetemagay')
|
||||
|
||||
|
||||
def strip_tags(value):
|
||||
return re.sub(r'<[^>]*?>', '', value)
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay latinoamericano",
|
||||
url="http://cinegaylatinoamericano.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
|
||||
thumbnail="http://www.americaeconomia.com/sites/default/files/imagecache/foto_nota/homosexual1.jpg"))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Cine y cortos gay",
|
||||
url="http://cineycortosgay.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
|
||||
thumbnail="http://www.elmolar.org/wp-content/uploads/2015/05/cortometraje.jpg"))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay online (México)",
|
||||
url="http://cinegayonlinemexico.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
|
||||
thumbnail="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTmmqL6tS2Ced1VoxlGQT0q-ibPEz1DCV3E1waHFDI5KT0pg1lJ"))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Sentido gay",
|
||||
url="http://www.sentidogay.blogspot.com.es//feeds/posts/default/?max-results=100&start-index=1",
|
||||
thumbnail="http://1.bp.blogspot.com/-epOPgDD_MQw/VPGZGQOou1I/AAAAAAAAAkI/lC25GrukDuo/s1048/SentidoGay.jpg"))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="PGPA",
|
||||
url="http://pgpa.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
|
||||
thumbnail="http://themes.googleusercontent.com/image?id=0BwVBOzw_-hbMNTRlZjk2YWMtYTVlMC00ZjZjLWI3OWEtMWEzZDEzYWVjZmQ4"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
patronvideos = '<img .*?src="(.*?)"'
|
||||
patronvideos += "(.*?)<link rel='alternate' type='text/html' href='([^']+)' title='([^']+)'.*?>"
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
for match in matches:
|
||||
scrapedtitle = match[3]
|
||||
scrapedtitle = scrapedtitle.replace("'", "'")
|
||||
scrapedtitle = scrapedtitle.replace(""", "'")
|
||||
scrapedtitle = scrapedtitle.replace("&amp;", "'")
|
||||
scrapedtitle = scrapedtitle.replace("&#39;", "'")
|
||||
scrapedurl = match[2]
|
||||
scrapedthumbnail = match[0]
|
||||
imagen = ""
|
||||
scrapedplot = match[1]
|
||||
tipo = match[1]
|
||||
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
|
||||
scrapedplot = "<" + scrapedplot
|
||||
scrapedplot = scrapedplot.replace(">", ">")
|
||||
scrapedplot = scrapedplot.replace("<", "<")
|
||||
scrapedplot = scrapedplot.replace("</div>", "\n")
|
||||
scrapedplot = scrapedplot.replace("<br />", "\n")
|
||||
scrapedplot = scrapedplot.replace("&", "")
|
||||
scrapedplot = scrapedplot.replace("nbsp;", "")
|
||||
scrapedplot = strip_tags(scrapedplot)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
plot=scrapedurl + scrapedplot, folder=True))
|
||||
|
||||
variable = item.url.split("index=")[1]
|
||||
variable = int(variable)
|
||||
variable += 100
|
||||
variable = str(variable)
|
||||
variable_url = item.url.split("index=")[0]
|
||||
url_nueva = variable_url + "index=" + variable
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title="Ir a la página siguiente (desde " + variable + ")",
|
||||
url=url_nueva, thumbnail="", plot="Pasar a la página siguiente (en grupos de 100)\n\n" + url_nueva))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def detail(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
data = data.replace("%3A", ":")
|
||||
data = data.replace("%2F", "/")
|
||||
data = data.replace("%3D", "=")
|
||||
data = data.replace("%3", "?")
|
||||
data = data.replace("%26", "&")
|
||||
descripcion = ""
|
||||
plot = ""
|
||||
patrondescrip = 'SINOPSIS:(.*?)'
|
||||
matches = re.compile(patrondescrip, re.DOTALL).findall(data)
|
||||
if len(matches) > 0:
|
||||
descripcion = matches[0]
|
||||
descripcion = descripcion.replace(" ", "")
|
||||
descripcion = descripcion.replace("<br/>", "")
|
||||
descripcion = descripcion.replace("\r", "")
|
||||
descripcion = descripcion.replace("\n", " ")
|
||||
descripcion = descripcion.replace("\t", " ")
|
||||
descripcion = re.sub("<[^>]+>", " ", descripcion)
|
||||
descripcion = descripcion
|
||||
try:
|
||||
plot = unicode(descripcion, "utf-8").encode("iso-8859-1")
|
||||
except:
|
||||
plot = descripcion
|
||||
|
||||
# Busca los enlaces a los videos de servidores
|
||||
video_itemlist = servertools.find_video_items(data=data)
|
||||
for video_item in video_itemlist:
|
||||
itemlist.append(Item(channel=item.channel, action="play", server=video_item.server,
|
||||
title=item.title + " " + video_item.title, url=video_item.url, thumbnail=item.thumbnail,
|
||||
plot=video_item.url, folder=False))
|
||||
|
||||
return itemlist
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"id": "cliphunter",
|
||||
"name": "cliphunter",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.cliphunter.com/gfx/new/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host = 'https://www.cliphunter.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/categories/All"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular/ratings/yesterday"))
|
||||
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstars/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">\s*<img src=\'([^\']+)\'/>.*?<span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
|
||||
url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)"/>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<img class=".*?" src="([^"]+)".*?<div class="tr">(.*?)</div>.*?<a href="([^"]+)\s*" class="vttl.*?">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedtime,scrapedurl,scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fanart=thumbnail, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">»</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '"url"\:"(.*?)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace("\/", "/")
|
||||
title = scrapedurl
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
|
||||
return itemlist
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"id": "coomelonitas",
|
||||
"name": "Coomelonitas",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.coomelonitas.com/wp-content/themes/3xTheme/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from platformcode import config
|
||||
|
||||
host ='http://www.coomelonitas.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host+ "/?s=%s" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="all"(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
title = scrapertools.find_single_match(match,'title="([^"]+)"')
|
||||
url = scrapertools.find_single_match(match,'<a href="([^"]+)"')
|
||||
plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>')
|
||||
thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie") )
|
||||
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
|
||||
if next_page!="":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"id": "cumlouder",
|
||||
"name": "Cumlouder",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "cumlouder.png",
|
||||
"banner": "cumlouder.png",
|
||||
"categories": [
|
||||
"adult"
|
||||
]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user