-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathcreate-external-cluster-resources.py
2098 lines (1954 loc) · 92.1 KB
/
create-external-cluster-resources.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Copyright 2020 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import errno
import sys
import json
import argparse
import re
import subprocess
import hmac
from hashlib import sha1 as sha
from os import linesep as LINESEP
from os import path
from email.utils import formatdate
import requests
from requests.auth import AuthBase
py3k = False
if sys.version_info.major >= 3:
py3k = True
import urllib.parse
from ipaddress import ip_address, IPv4Address
ModuleNotFoundError = ImportError
try:
import rados
except ModuleNotFoundError as noModErr:
print(f"Error: {noModErr}\nExiting the script...")
sys.exit(1)
try:
import rbd
except ModuleNotFoundError as noModErr:
print(f"Error: {noModErr}\nExiting the script...")
sys.exit(1)
try:
# for 2.7.x
from StringIO import StringIO
except ModuleNotFoundError:
# for 3.x
from io import StringIO
try:
# for 2.7.x
from urlparse import urlparse
from urllib import urlencode as urlencode
except ModuleNotFoundError:
# for 3.x
from urllib.parse import urlparse
from urllib.parse import urlencode as urlencode
try:
from base64 import encodestring
except:
from base64 import encodebytes as encodestring
class ExecutionFailureException(Exception):
pass
################################################
################## DummyRados ##################
################################################
# this is mainly for testing and could be used where 'rados' is not available
class DummyRados(object):
def __init__(self):
self.return_val = 0
self.err_message = ""
self.state = "connected"
self.cmd_output_map = {}
self.cmd_names = {}
self._init_cmd_output_map()
self.dummy_host_ip_map = {}
def _init_cmd_output_map(self):
json_file_name = "test-data/ceph-status-out"
script_dir = path.abspath(path.dirname(__file__))
ceph_status_str = ""
with open(
path.join(script_dir, json_file_name), mode="r", encoding="UTF-8"
) as json_file:
ceph_status_str = json_file.read()
self.cmd_names["fs ls"] = """{"format": "json", "prefix": "fs ls"}"""
self.cmd_names["quorum_status"] = (
"""{"format": "json", "prefix": "quorum_status"}"""
)
self.cmd_names["mgr services"] = (
"""{"format": "json", "prefix": "mgr services"}"""
)
# all the commands and their output
self.cmd_output_map[self.cmd_names["fs ls"]] = (
"""[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-replicated"]}]"""
)
self.cmd_output_map[self.cmd_names["quorum_status"]] = (
"""{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}"""
)
self.cmd_output_map[self.cmd_names["mgr services"]] = (
"""{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}"""
)
self.cmd_output_map[
"""{"caps": ["mon", "allow r, allow command quorum_status", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}"""
] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]"""
self.cmd_output_map[
"""{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}"""
] = """[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]"""
self.cmd_output_map[
"""{"caps": ["mon", "profile rbd, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}"""
] = """[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd, allow command 'osd blocklist'","osd":"profile rbd"}}]"""
self.cmd_output_map[
"""{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}"""
] = """[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs *=*"}}]"""
self.cmd_output_map[
"""{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}"""
] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]"""
self.cmd_output_map[
"""{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get-or-create"}"""
] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage","key":"BQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=*"}}]"""
self.cmd_output_map[
"""{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=myfs"], "entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get-or-create"}"""
] = """[{"entity":"client.csi-cephfs-provisioner-openshift-storage-myfs","key":"CQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r, allow command 'osd blocklist'","osd":"allow rw tag cephfs metadata=myfs"}}]"""
self.cmd_output_map[
"""{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}"""
] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]"""
self.cmd_output_map[
"""{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth caps"}"""
] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSRKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]"""
self.cmd_output_map["""{"format": "json", "prefix": "mgr services"}"""] = (
"""{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}"""
)
self.cmd_output_map[
"""{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}"""
] = """{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}"""
self.cmd_output_map[
"""{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}"""
] = """[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "profile rbd-read-only, allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]"""
self.cmd_output_map[
"""{"entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get"}"""
] = """[]"""
self.cmd_output_map[
"""{"entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get"}"""
] = """[]"""
self.cmd_output_map[
"""{"entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get"}"""
] = """[]"""
self.cmd_output_map[
"""{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}"""
] = """[]"""
self.cmd_output_map[
"""{"entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get"}"""
] = """[]"""
self.cmd_output_map[
"""{"entity": "client.csi-cephfs-provisioner-openshift-storage-myfs", "format": "json", "prefix": "auth get"}"""
] = """[]"""
self.cmd_output_map[
"""{"entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get"}"""
] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]"""
self.cmd_output_map[
"""{"caps": ["mon", "allow r, allow command 'osd blocklist'", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth caps"}"""
] = """[{"entity":"client.csi-cephfs-provisioner","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command 'osd blocklist'", "mgr":"allow rw", "osd":"allow rw tag cephfs metadata=*"}}]"""
self.cmd_output_map['{"format": "json", "prefix": "status"}'] = ceph_status_str
def shutdown(self):
pass
def get_fsid(self):
return "af4e1673-0b72-402d-990a-22d2919d0f1c"
def conf_read_file(self):
pass
def connect(self):
pass
def pool_exists(self, pool_name):
return True
def mon_command(self, cmd, out):
json_cmd = json.loads(cmd)
json_cmd_str = json.dumps(json_cmd, sort_keys=True)
cmd_output = self.cmd_output_map[json_cmd_str]
return self.return_val, cmd_output, str(self.err_message.encode("utf-8"))
def _convert_hostname_to_ip(self, host_name):
ip_reg_x = re.compile(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}")
# if provided host is directly an IP address, return the same
if ip_reg_x.match(host_name):
return host_name
import random
host_ip = self.dummy_host_ip_map.get(host_name, "")
if not host_ip:
host_ip = f"172.9.{random.randint(0, 254)}.{random.randint(0, 254)}"
self.dummy_host_ip_map[host_name] = host_ip
del random
return host_ip
@classmethod
def Rados(conffile=None):
return DummyRados()
class S3Auth(AuthBase):
"""Attaches AWS Authentication to the given Request object."""
service_base_url = "s3.amazonaws.com"
def __init__(self, access_key, secret_key, service_url=None):
if service_url:
self.service_base_url = service_url
self.access_key = str(access_key)
self.secret_key = str(secret_key)
def __call__(self, r):
# Create date header if it is not created yet.
if "date" not in r.headers and "x-amz-date" not in r.headers:
r.headers["date"] = formatdate(timeval=None, localtime=False, usegmt=True)
signature = self.get_signature(r)
if py3k:
signature = signature.decode("utf-8")
r.headers["Authorization"] = f"AWS {self.access_key}:{signature}"
return r
def get_signature(self, r):
canonical_string = self.get_canonical_string(r.url, r.headers, r.method)
if py3k:
key = self.secret_key.encode("utf-8")
msg = canonical_string.encode("utf-8")
else:
key = self.secret_key
msg = canonical_string
h = hmac.new(key, msg, digestmod=sha)
return encodestring(h.digest()).strip()
def get_canonical_string(self, url, headers, method):
parsedurl = urlparse(url)
objectkey = parsedurl.path[1:]
bucket = parsedurl.netloc[: -len(self.service_base_url)]
if len(bucket) > 1:
# remove last dot
bucket = bucket[:-1]
interesting_headers = {"content-md5": "", "content-type": "", "date": ""}
for key in headers:
lk = key.lower()
try:
lk = lk.decode("utf-8")
except:
pass
if headers[key] and (
lk in interesting_headers.keys() or lk.startswith("x-amz-")
):
interesting_headers[lk] = headers[key].strip()
# If x-amz-date is used it supersedes the date header.
if not py3k:
if "x-amz-date" in interesting_headers:
interesting_headers["date"] = ""
else:
if "x-amz-date" in interesting_headers:
interesting_headers["date"] = ""
buf = f"{method}\n"
for key in sorted(interesting_headers.keys()):
val = interesting_headers[key]
if key.startswith("x-amz-"):
buf += f"{key}:{val}\n"
else:
buf += f"{val}\n"
# append the bucket if it exists
if bucket != "":
buf += f"/{bucket}"
# add the objectkey. even if it doesn't exist, add the slash
buf += f"/{objectkey}"
return buf
class RadosJSON:
EXTERNAL_USER_NAME = "client.healthchecker"
EXTERNAL_RGW_ADMIN_OPS_USER_NAME = "rgw-admin-ops-user"
EMPTY_OUTPUT_LIST = "Empty output list"
DEFAULT_RGW_POOL_PREFIX = "default"
DEFAULT_MONITORING_ENDPOINT_PORT = "9283"
@classmethod
def gen_arg_parser(cls, args_to_parse=None):
argP = argparse.ArgumentParser()
common_group = argP.add_argument_group("common")
common_group.add_argument("--verbose", "-v", action="store_true", default=False)
common_group.add_argument(
"--ceph-conf", "-c", help="Provide a ceph conf file.", type=str
)
common_group.add_argument(
"--keyring", "-k", help="Path to ceph keyring file.", type=str
)
common_group.add_argument(
"--run-as-user",
"-u",
default="",
type=str,
help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'",
)
common_group.add_argument(
"--cluster-name",
default="",
help="Kubernetes cluster name(legacy flag), Note: Either use this or --k8s-cluster-name",
)
common_group.add_argument(
"--k8s-cluster-name", default="", help="Kubernetes cluster name"
)
common_group.add_argument(
"--namespace",
default="",
help="Namespace where CephCluster is running",
)
common_group.add_argument(
"--rgw-pool-prefix", default="", help="RGW Pool prefix"
)
common_group.add_argument(
"--restricted-auth-permission",
default=False,
help="Restrict cephCSIKeyrings auth permissions to specific pools, cluster."
+ "Mandatory flags that need to be set are --rbd-data-pool-name, and --k8s-cluster-name."
+ "--cephfs-filesystem-name flag can also be passed in case of cephfs user restriction, so it can restrict user to particular cephfs filesystem"
+ "sample run: `python3 /etc/ceph/create-external-cluster-resources.py --cephfs-filesystem-name myfs --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --restricted-auth-permission true`"
+ "Note: Restricting the csi-users per pool, and per cluster will require creating new csi-users and new secrets for that csi-users."
+ "So apply these secrets only to new `Consumer cluster` deployment while using the same `Source cluster`.",
)
common_group.add_argument(
"--v2-port-enable",
action="store_true",
default=False,
help="Enable v2 mon port(3300) for mons",
)
output_group = argP.add_argument_group("output")
output_group.add_argument(
"--format",
"-t",
choices=["json", "bash"],
default="json",
help="Provides the output format (json | bash)",
)
output_group.add_argument(
"--output",
"-o",
default="",
help="Output will be stored into the provided file",
)
output_group.add_argument(
"--cephfs-filesystem-name",
default="",
help="Provides the name of the Ceph filesystem",
)
output_group.add_argument(
"--cephfs-metadata-pool-name",
default="",
help="Provides the name of the cephfs metadata pool",
)
output_group.add_argument(
"--cephfs-data-pool-name",
default="",
help="Provides the name of the cephfs data pool",
)
output_group.add_argument(
"--rbd-data-pool-name",
default="",
required=False,
help="Provides the name of the RBD datapool",
)
output_group.add_argument(
"--alias-rbd-data-pool-name",
default="",
required=False,
help="Provides an alias for the RBD data pool name, necessary if a special character is present in the pool name such as a period or underscore",
)
output_group.add_argument(
"--rgw-endpoint",
default="",
required=False,
help="RADOS Gateway endpoint (in `<IPv4>:<PORT>` or `<[IPv6]>:<PORT>` or `<FQDN>:<PORT>` format)",
)
output_group.add_argument(
"--rgw-tls-cert-path",
default="",
required=False,
help="RADOS Gateway endpoint TLS certificate",
)
output_group.add_argument(
"--rgw-skip-tls",
required=False,
default=False,
help="Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED",
)
output_group.add_argument(
"--monitoring-endpoint",
default="",
required=False,
help="Ceph Manager prometheus exporter endpoints (comma separated list of (format `<IPv4>` or `<[IPv6]>` or `<FQDN>`) entries of active and standby mgrs)",
)
output_group.add_argument(
"--monitoring-endpoint-port",
default="",
required=False,
help="Ceph Manager prometheus exporter port",
)
output_group.add_argument(
"--skip-monitoring-endpoint",
default=False,
action="store_true",
help="Do not check for a monitoring endpoint for the Ceph cluster",
)
output_group.add_argument(
"--rbd-metadata-ec-pool-name",
default="",
required=False,
help="Provides the name of erasure coded RBD metadata pool",
)
output_group.add_argument(
"--dry-run",
default=False,
action="store_true",
help="Dry run prints the executed commands without running them",
)
output_group.add_argument(
"--rados-namespace",
default="",
required=False,
help="Divides a pool into separate logical namespaces, used for creating RBD PVC in a CephBlockPoolRadosNamespace (should be lower case)",
)
output_group.add_argument(
"--subvolume-group",
default="",
required=False,
help="provides the name of the subvolume group",
)
output_group.add_argument(
"--rgw-realm-name",
default="",
required=False,
help="provides the name of the rgw-realm",
)
output_group.add_argument(
"--rgw-zone-name",
default="",
required=False,
help="provides the name of the rgw-zone",
)
output_group.add_argument(
"--rgw-zonegroup-name",
default="",
required=False,
help="provides the name of the rgw-zonegroup",
)
output_group.add_argument(
"--topology-pools",
default="",
required=False,
help="comma-separated list of topology-constrained rbd pools",
)
output_group.add_argument(
"--topology-failure-domain-label",
default="",
required=False,
help="k8s cluster failure domain label (example: zone, rack, or host) for the topology-pools that match the ceph domain",
)
output_group.add_argument(
"--topology-failure-domain-values",
default="",
required=False,
help="comma-separated list of the k8s cluster failure domain values corresponding to each of the pools in the `topology-pools` list",
)
upgrade_group = argP.add_argument_group("upgrade")
upgrade_group.add_argument(
"--upgrade",
action="store_true",
default=False,
help="Upgrades the cephCSIKeyrings(For example: client.csi-cephfs-provisioner) and client.healthchecker ceph users with new permissions needed for the new cluster version and older permission will still be applied."
+ "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade`, this will upgrade all the default csi users(non-restricted)"
+ "For restricted users(For example: client.csi-cephfs-provisioner-openshift-storage-myfs), users created using --restricted-auth-permission flag need to pass mandatory flags"
+ "mandatory flags: '--rbd-data-pool-name, --k8s-cluster-name and --run-as-user' flags while upgrading"
+ "in case of cephfs users if you have passed --cephfs-filesystem-name flag while creating user then while upgrading it will be mandatory too"
+ "Sample run: `python3 /etc/ceph/create-external-cluster-resources.py --upgrade --rbd-data-pool-name replicapool --k8s-cluster-name rookstorage --run-as-user client.csi-rbd-node-rookstorage-replicapool`"
+ "PS: An existing non-restricted user cannot be converted to a restricted user by upgrading."
+ "Upgrade flag should only be used to append new permissions to users, it shouldn't be used for changing user already applied permission, for example you shouldn't change in which pool user has access",
)
if args_to_parse:
assert (
type(args_to_parse) == list
), "Argument to 'gen_arg_parser' should be a list"
else:
args_to_parse = sys.argv[1:]
return argP.parse_args(args_to_parse)
def validate_rbd_metadata_ec_pool_name(self):
if self._arg_parser.rbd_metadata_ec_pool_name:
rbd_metadata_ec_pool_name = self._arg_parser.rbd_metadata_ec_pool_name
rbd_pool_name = self._arg_parser.rbd_data_pool_name
if rbd_pool_name == "":
raise ExecutionFailureException(
"Flag '--rbd-data-pool-name' should not be empty"
)
if rbd_metadata_ec_pool_name == "":
raise ExecutionFailureException(
"Flag '--rbd-metadata-ec-pool-name' should not be empty"
)
cmd_json = {"prefix": "osd dump", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
f"{cmd_json['prefix']} command failed.\n"
f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
)
metadata_pool_exist, pool_exist = False, False
for key in json_out["pools"]:
# if erasure_code_profile is empty and pool name exists then it replica pool
if (
key["erasure_code_profile"] == ""
and key["pool_name"] == rbd_metadata_ec_pool_name
):
metadata_pool_exist = True
# if erasure_code_profile is not empty and pool name exists then it is ec pool
if key["erasure_code_profile"] and key["pool_name"] == rbd_pool_name:
pool_exist = True
if not metadata_pool_exist:
raise ExecutionFailureException(
"Provided rbd_ec_metadata_pool name,"
f" {rbd_metadata_ec_pool_name}, does not exist"
)
if not pool_exist:
raise ExecutionFailureException(
f"Provided rbd_data_pool name, {rbd_pool_name}, does not exist"
)
return rbd_metadata_ec_pool_name
def dry_run(self, msg):
if self._arg_parser.dry_run:
print("Execute: " + "'" + msg + "'")
def validate_rgw_endpoint_tls_cert(self):
if self._arg_parser.rgw_tls_cert_path:
with open(self._arg_parser.rgw_tls_cert_path, encoding="utf8") as f:
contents = f.read()
return contents.rstrip()
def _check_conflicting_options(self):
if not self._arg_parser.upgrade and not self._arg_parser.rbd_data_pool_name:
raise ExecutionFailureException(
"Either '--upgrade' or '--rbd-data-pool-name <pool_name>' should be specified"
)
def _invalid_endpoint(self, endpoint_str):
# extract the port by getting the last split on `:` delimiter
try:
endpoint_str_ip, port = endpoint_str.rsplit(":", 1)
except ValueError:
raise ExecutionFailureException(f"Not a proper endpoint: {endpoint_str}")
try:
if endpoint_str_ip[0] == "[":
endpoint_str_ip = endpoint_str_ip[1 : len(endpoint_str_ip) - 1]
ip_type = (
"IPv4" if type(ip_address(endpoint_str_ip)) is IPv4Address else "IPv6"
)
except ValueError:
ip_type = "FQDN"
if not port.isdigit():
raise ExecutionFailureException(f"Port not valid: {port}")
intPort = int(port)
if intPort < 1 or intPort > 2**16 - 1:
raise ExecutionFailureException(f"Out of range port number: {port}")
return ip_type
def endpoint_dial(self, endpoint_str, ip_type, timeout=3, cert=None):
# if the 'cluster' instance is a dummy one,
# don't try to reach out to the endpoint
if isinstance(self.cluster, DummyRados):
return "", "", ""
if ip_type == "IPv6":
try:
endpoint_str_ip, endpoint_str_port = endpoint_str.rsplit(":", 1)
except ValueError:
raise ExecutionFailureException(
f"Not a proper endpoint: {endpoint_str}"
)
if endpoint_str_ip[0] != "[":
endpoint_str_ip = "[" + endpoint_str_ip + "]"
endpoint_str = ":".join([endpoint_str_ip, endpoint_str_port])
protocols = ["http", "https"]
response_error = None
for prefix in protocols:
try:
ep = f"{prefix}://{endpoint_str}"
verify = None
# If verify is set to a path to a directory,
# the directory must have been processed using the c_rehash utility supplied with OpenSSL.
if prefix == "https" and self._arg_parser.rgw_skip_tls:
verify = False
r = requests.head(ep, timeout=timeout, verify=False)
elif prefix == "https" and cert:
verify = cert
r = requests.head(ep, timeout=timeout, verify=cert)
else:
r = requests.head(ep, timeout=timeout)
if r.status_code == 200:
return prefix, verify, ""
except Exception as err:
response_error = err
continue
sys.stderr.write(
f"unable to connect to endpoint: {endpoint_str}, failed error: {response_error}"
)
return (
"",
"",
("-1"),
)
def __init__(self, arg_list=None):
self.out_map = {}
self._excluded_keys = set()
self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list)
self._check_conflicting_options()
self.run_as_user = self._arg_parser.run_as_user
self.output_file = self._arg_parser.output
self.ceph_conf = self._arg_parser.ceph_conf
self.ceph_keyring = self._arg_parser.keyring
# if user not provided, give a default user
if not self.run_as_user and not self._arg_parser.upgrade:
self.run_as_user = self.EXTERNAL_USER_NAME
if not self._arg_parser.rgw_pool_prefix and not self._arg_parser.upgrade:
self._arg_parser.rgw_pool_prefix = self.DEFAULT_RGW_POOL_PREFIX
if self.ceph_conf:
kwargs = {}
if self.ceph_keyring:
kwargs["conf"] = {"keyring": self.ceph_keyring}
self.cluster = rados.Rados(conffile=self.ceph_conf, **kwargs)
else:
self.cluster = rados.Rados()
self.cluster.conf_read_file()
self.cluster.connect()
def shutdown(self):
if self.cluster.state == "connected":
self.cluster.shutdown()
def get_fsid(self):
if self._arg_parser.dry_run:
return self.dry_run("ceph fsid")
return str(self.cluster.get_fsid())
def _common_cmd_json_gen(self, cmd_json):
cmd = json.dumps(cmd_json, sort_keys=True)
ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b"")
if self._arg_parser.verbose:
print(f"Command Input: {cmd}")
print(
f"Return Val: {ret_val}\nCommand Output: {cmd_out}\n"
f"Error Message: {err_msg}\n----------\n"
)
json_out = {}
# if there is no error (i.e; ret_val is ZERO) and 'cmd_out' is not empty
# then convert 'cmd_out' to a json output
if ret_val == 0 and cmd_out:
json_out = json.loads(cmd_out)
return ret_val, json_out, err_msg
def get_ceph_external_mon_data(self):
cmd_json = {"prefix": "quorum_status", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json["prefix"])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'quorum_status' command failed.\n"
f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
)
q_leader_name = json_out["quorum_leader_name"]
q_leader_details = {}
q_leader_matching_list = [
l for l in json_out["monmap"]["mons"] if l["name"] == q_leader_name
]
if len(q_leader_matching_list) == 0:
raise ExecutionFailureException("No matching 'mon' details found")
q_leader_details = q_leader_matching_list[0]
# get the address vector of the quorum-leader
q_leader_addrvec = q_leader_details.get("public_addrs", {}).get("addrvec", [])
ip_addr = str(q_leader_details["public_addr"].split("/")[0])
if self._arg_parser.v2_port_enable:
if q_leader_addrvec[0]["type"] == "v2":
ip_addr = q_leader_addrvec[0]["addr"]
elif len(q_leader_addrvec) > 1 and q_leader_addrvec[1]["type"] == "v2":
ip_addr = q_leader_addrvec[1]["addr"]
else:
sys.stderr.write(
"'v2' address type not present, and 'v2-port-enable' flag is provided"
)
return f"{str(q_leader_name)}={ip_addr}"
def _convert_hostname_to_ip(self, host_name, port, ip_type):
# if 'cluster' instance is a dummy type,
# call the dummy instance's "convert" method
if not host_name:
raise ExecutionFailureException("Empty hostname provided")
if isinstance(self.cluster, DummyRados):
return self.cluster._convert_hostname_to_ip(host_name)
if ip_type == "FQDN":
# check which ip FQDN should be converted to, IPv4 or IPv6
# check the host ip, the endpoint ip type would be similar to host ip
cmd_json = {"prefix": "orch host ls", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'orch host ls' command failed.\n"
f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
)
host_addr = json_out[0]["addr"]
# add :80 sample port in ip_type, as _invalid_endpoint also verify port
host_ip_type = self._invalid_endpoint(host_addr + ":80")
import socket
# example output [(<AddressFamily.AF_INET: 2>, <SocketKind.SOCK_STREAM: 1>, 6, '', ('93.184.216.34', 80)), ...]
# we need to get 93.184.216.34 so it would be ip[0][4][0]
if host_ip_type == "IPv6":
ip = socket.getaddrinfo(
host_name, port, family=socket.AF_INET6, proto=socket.IPPROTO_TCP
)
elif host_ip_type == "IPv4":
ip = socket.getaddrinfo(
host_name, port, family=socket.AF_INET, proto=socket.IPPROTO_TCP
)
del socket
return ip[0][4][0]
return host_name
def get_active_and_standby_mgrs(self):
if self._arg_parser.dry_run:
return "", self.dry_run("ceph status")
monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port
monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint
standby_mgrs = []
if not monitoring_endpoint_ip_list:
cmd_json = {"prefix": "status", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'mgr services' command failed.\n"
f"Error: {err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST}"
)
monitoring_endpoint = (
json_out.get("mgrmap", {}).get("services", {}).get("prometheus", "")
)
if not monitoring_endpoint:
raise ExecutionFailureException(
"can't find monitoring_endpoint, prometheus module might not be enabled, "
"enable the module by running 'ceph mgr module enable prometheus'"
)
# now check the stand-by mgr-s
standby_arr = json_out.get("mgrmap", {}).get("standbys", [])
for each_standby in standby_arr:
if "name" in each_standby.keys():
standby_mgrs.append(each_standby["name"])
try:
parsed_endpoint = urlparse(monitoring_endpoint)
except ValueError:
raise ExecutionFailureException(
f"invalid endpoint: {monitoring_endpoint}"
)
monitoring_endpoint_ip_list = parsed_endpoint.hostname
if not monitoring_endpoint_port:
monitoring_endpoint_port = str(parsed_endpoint.port)
# if monitoring endpoint port is not set, put a default mon port
if not monitoring_endpoint_port:
monitoring_endpoint_port = self.DEFAULT_MONITORING_ENDPOINT_PORT
# user could give comma and space separated inputs (like --monitoring-endpoint="<ip1>, <ip2>")
monitoring_endpoint_ip_list = monitoring_endpoint_ip_list.replace(",", " ")
monitoring_endpoint_ip_list_split = monitoring_endpoint_ip_list.split()
# if monitoring-endpoint could not be found, raise an error
if len(monitoring_endpoint_ip_list_split) == 0:
raise ExecutionFailureException("No 'monitoring-endpoint' found")
# first ip is treated as the main monitoring-endpoint
monitoring_endpoint_ip = monitoring_endpoint_ip_list_split[0]
# rest of the ip-s are added to the 'standby_mgrs' list
standby_mgrs.extend(monitoring_endpoint_ip_list_split[1:])
failed_ip = monitoring_endpoint_ip
monitoring_endpoint = ":".join(
[monitoring_endpoint_ip, monitoring_endpoint_port]
)
ip_type = self._invalid_endpoint(monitoring_endpoint)
try:
monitoring_endpoint_ip = self._convert_hostname_to_ip(
monitoring_endpoint_ip, monitoring_endpoint_port, ip_type
)
# collect all the 'stand-by' mgr ips
mgr_ips = []
for each_standby_mgr in standby_mgrs:
failed_ip = each_standby_mgr
mgr_ips.append(
self._convert_hostname_to_ip(
each_standby_mgr, monitoring_endpoint_port, ip_type
)
)
except:
raise ExecutionFailureException(
f"Conversion of host: {failed_ip} to IP failed. "
"Please enter the IP addresses of all the ceph-mgrs with the '--monitoring-endpoint' flag"
)
_, _, err = self.endpoint_dial(monitoring_endpoint, ip_type)
if err == "-1":
raise ExecutionFailureException(err)
# add the validated active mgr IP into the first index
mgr_ips.insert(0, monitoring_endpoint_ip)
all_mgr_ips_str = ",".join(mgr_ips)
return all_mgr_ips_str, monitoring_endpoint_port
def check_user_exist(self, user):
cmd_json = {"prefix": "auth get", "entity": f"{user}", "format": "json"}
ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
if ret_val != 0 or len(json_out) == 0:
return ""
return str(json_out[0]["key"])
def get_cephfs_provisioner_caps_and_entity(self):
entity = "client.csi-cephfs-provisioner"
caps = {
"mon": "allow r, allow command 'osd blocklist'",
"mgr": "allow rw",
"osd": "allow rw tag cephfs metadata=*",
}
if self._arg_parser.restricted_auth_permission:
k8s_cluster_name = self._arg_parser.k8s_cluster_name
if k8s_cluster_name == "":
raise ExecutionFailureException(
"k8s_cluster_name not found, please set the '--k8s-cluster-name' flag"
)
cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
if cephfs_filesystem == "":
entity = f"{entity}-{k8s_cluster_name}"
else:
entity = f"{entity}-{k8s_cluster_name}-{cephfs_filesystem}"
caps["osd"] = f"allow rw tag cephfs metadata={cephfs_filesystem}"
return caps, entity
def get_cephfs_node_caps_and_entity(self):
entity = "client.csi-cephfs-node"
caps = {
"mon": "allow r, allow command 'osd blocklist'",
"mgr": "allow rw",
"osd": "allow rw tag cephfs *=*",
"mds": "allow rw",
}
if self._arg_parser.restricted_auth_permission:
k8s_cluster_name = self._arg_parser.k8s_cluster_name
if k8s_cluster_name == "":
raise ExecutionFailureException(
"k8s_cluster_name not found, please set the '--k8s-cluster-name' flag"
)
cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
if cephfs_filesystem == "":
entity = f"{entity}-{k8s_cluster_name}"
else:
entity = f"{entity}-{k8s_cluster_name}-{cephfs_filesystem}"
caps["osd"] = f"allow rw tag cephfs *={cephfs_filesystem}"
return caps, entity
def get_entity(
self,
entity,
rbd_pool_name,
alias_rbd_pool_name,
k8s_cluster_name,
rados_namespace,
):
if (
rbd_pool_name.count(".") != 0
or rbd_pool_name.count("_") != 0
or alias_rbd_pool_name != ""
# checking alias_rbd_pool_name is not empty as there maybe a special character used other than . or _
):
if alias_rbd_pool_name == "":
raise ExecutionFailureException(
"please set the '--alias-rbd-data-pool-name' flag as the rbd data pool name contains '.' or '_'"
)
if (
alias_rbd_pool_name.count(".") != 0
or alias_rbd_pool_name.count("_") != 0
):
raise ExecutionFailureException(
"'--alias-rbd-data-pool-name' flag value should not contain '.' or '_'"
)
entity = f"{entity}-{k8s_cluster_name}-{alias_rbd_pool_name}"
else:
entity = f"{entity}-{k8s_cluster_name}-{rbd_pool_name}"
if rados_namespace:
entity = f"{entity}-{rados_namespace}"
return entity
def get_rbd_provisioner_caps_and_entity(self):
entity = "client.csi-rbd-provisioner"
caps = {
"mon": "profile rbd, allow command 'osd blocklist'",
"mgr": "allow rw",
"osd": "profile rbd",
}
if self._arg_parser.restricted_auth_permission:
rbd_pool_name = self._arg_parser.rbd_data_pool_name
alias_rbd_pool_name = self._arg_parser.alias_rbd_data_pool_name
k8s_cluster_name = self._arg_parser.k8s_cluster_name
rados_namespace = self._arg_parser.rados_namespace
if rbd_pool_name == "":
raise ExecutionFailureException(
"mandatory flag not found, please set the '--rbd-data-pool-name' flag"
)
if k8s_cluster_name == "":
raise ExecutionFailureException(
"mandatory flag not found, please set the '--k8s-cluster-name' flag"
)
entity = self.get_entity(
entity,
rbd_pool_name,
alias_rbd_pool_name,
k8s_cluster_name,
rados_namespace,
)
if rados_namespace != "":
caps["osd"] = (
f"profile rbd pool={rbd_pool_name} namespace={rados_namespace}"
)
else:
caps["osd"] = f"profile rbd pool={rbd_pool_name}"
return caps, entity
def get_rbd_node_caps_and_entity(self):
entity = "client.csi-rbd-node"
caps = {
"mon": "profile rbd, allow command 'osd blocklist'",
"osd": "profile rbd",
}
if self._arg_parser.restricted_auth_permission:
rbd_pool_name = self._arg_parser.rbd_data_pool_name
alias_rbd_pool_name = self._arg_parser.alias_rbd_data_pool_name
k8s_cluster_name = self._arg_parser.k8s_cluster_name
rados_namespace = self._arg_parser.rados_namespace
if rbd_pool_name == "":
raise ExecutionFailureException(
"mandatory flag not found, please set the '--rbd-data-pool-name' flag"
)
if k8s_cluster_name == "":
raise ExecutionFailureException(
"mandatory flag not found, please set the '--k8s-cluster-name' flag"
)
entity = self.get_entity(
entity,
rbd_pool_name,
alias_rbd_pool_name,