forked from datacenter/ACI-Pre-Upgrade-Validation-Script
-
Notifications
You must be signed in to change notification settings - Fork 0
/
aci-preupgrade-validation-script.py
2987 lines (2607 loc) · 128 KB
/
aci-preupgrade-validation-script.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2021 Cisco Systems, Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
from six import iteritems
from six.moves import input
from textwrap import TextWrapper
from getpass import getpass
from collections import defaultdict
from datetime import datetime
import warnings
import time
import pexpect
import logging
import subprocess
import json
import sys
import os
import re
SCRIPT_VERSION = "v1.7.0"
DONE = 'DONE'
PASS = 'PASS'
FAIL_O = 'FAIL - OUTAGE WARNING!!'
FAIL_UF = 'FAIL - UPGRADE FAILURE!!'
ERROR = 'ERROR !!'
MANUAL = 'MANUAL CHECK REQUIRED'
NA = 'N/A'
node_regex = r'topology/pod-(?P<pod>\d+)/node-(?P<node>\d+)'
ver_regex = r'(?:dk9\.)?[1]?(?P<major1>\d)\.(?P<major2>\d)(?:\.|\()(?P<maint>\d+)\.?(?P<patch>(?:[a-b]|[0-9a-z]+))\)?'
tz = time.strftime('%z')
ts = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
DIR = 'preupgrade_validator_logs/'
BUNDLE_NAME = 'preupgrade_validator_%s%s.tgz' % (ts, tz)
RESULT_FILE = DIR + 'preupgrade_validator_%s%s.txt' % (ts, tz)
JSON_FILE = DIR + 'preupgrade_validator_%s%s.json' % (ts, tz)
LOG_FILE = DIR + 'preupgrade_validator_debug.log'
fmt = '[%(asctime)s.%(msecs)03d{} %(levelname)-8s %(funcName)20s:%(lineno)-4d] %(message)s'.format(tz)
subprocess.check_output(['mkdir', '-p', DIR])
logging.basicConfig(level=logging.DEBUG, filename=LOG_FILE, format=fmt, datefmt='%Y-%m-%d %H:%M:%S')
warnings.simplefilter(action='ignore', category=FutureWarning)
class Connection(object):
"""
Object built primarily for executing commands on Cisco IOS/NXOS devices. The following
methods and variables are available for use in this class:
username (opt) username credential (default 'admin')
password (opt) password credential (default 'cisco')
enable_password (opt) enable password credential (IOS only) (default 'cisco')
protocol (opt) telnet/ssh option (default 'ssh')
port (opt) port to connect on (if different from telnet/ssh default)
timeout (opt) wait in seconds between each command (default 30)
prompt (opt) prompt to expect after each command (default for IOS/NXOS)
log (opt) logfile (default None)
verify (opt) verify/enforce strictHostKey values for SSL (disabled by default)
searchwindowsize (opt) maximum amount of data used in matching expressions
extremely important to set to a low value for large outputs
pexpect default = None, setting this class default=256
force_wait (opt) some OS ignore searchwindowsize and therefore still experience high
CPU and long wait time for commands with large outputs to complete.
A workaround is to sleep the script instead of running regex checking
for prompt character.
This should only be used in those unique scenarios...
Default is 0 seconds (disabled). If needed, set to 8 (seconds)
functions:
connect() (opt) connect to device with provided protocol/port/hostname
login() (opt) log into device with provided credentials
close() (opt) close current connection
cmd() execute a command on the device (provide matches and timeout)
Example using all defaults
c = Connection("10.122.140.89")
c.cmd("terminal length 0")
c.cmd("show version")
print "version of code: %s" % c.output
@author [email protected]
@version 07/28/2014
"""
def __init__(self, hostname):
self.hostname = hostname
self.log = None
self.username = 'admin'
self.password = 'cisco'
self.enable_password = 'cisco'
self.protocol = "ssh"
self.port = None
self.timeout = 30
self.prompt = "[^#]#[ ]*(\x1b[\x5b-\x5f][\x40-\x7e])*[ ]*$"
self.verify = False
self.searchwindowsize = 256
self.force_wait = 0
self.child = None
self.output = "" # output from last command
self._term_len = 0 # terminal length for cisco devices
self._login = False # set to true at first successful login
self._log = None # private variable for tracking logfile state
def __connected(self):
# determine if a connection is already open
connected = (self.child is not None and self.child.isatty())
logging.debug("check for valid connection: %r" % connected)
return connected
@property
def term_len(self):
return self._term_len
@term_len.setter
def term_len(self, term_len):
self._term_len = int(term_len)
if (not self.__connected()) or (not self._login):
# login function will set the terminal length
self.login()
else:
# user changing terminal length during operation, need to explicitly
self.cmd("terminal length %s" % self._term_len)
def start_log(self):
""" start or restart sending output to logfile """
if self.log is not None and self._log is None:
# if self.log is a string, then attempt to open file pointer (do not catch exception, we want it
# to die if there's an error opening the logfile)
if isinstance(self.log, str) or isinstance(self.log, unicode):
self._log = open(self.log, "ab")
else:
self._log = self.log
logging.debug("setting logfile to %s" % self._log.name)
if self.child is not None:
self.child.logfile = self._log
def stop_log(self):
""" stop sending output to logfile """
self.child.logfile = None
self._log = None
return
def connect(self):
# close any currently open connections
self.close()
# determine port if not explicitly set
if self.port is None:
if self.protocol == "ssh":
self.port = 22
if self.protocol == "telnet":
self.port = 23
# spawn new thread
if self.protocol.lower() == "ssh":
logging.debug(
"spawning new pexpect connection: ssh %s@%s -p %d" % (self.username, self.hostname, self.port))
no_verify = " -o StrictHostKeyChecking=no -o LogLevel=ERROR -o UserKnownHostsFile=/dev/null"
no_verify += " -o HostKeyAlgorithms=+ssh-dss"
if self.verify: no_verify = ""
self.child = pexpect.spawn("ssh %s %s@%s -p %d" % (no_verify, self.username, self.hostname, self.port),
searchwindowsize=self.searchwindowsize)
elif self.protocol.lower() == "telnet":
logging.info("spawning new pexpect connection: telnet %s %d" % (self.hostname, self.port))
self.child = pexpect.spawn("telnet %s %d" % (self.hostname, self.port),
searchwindowsize=self.searchwindowsize)
else:
logging.error("unknown protocol %s" % self.protocol)
raise Exception("Unsupported protocol: %s" % self.protocol)
# start logging
self.start_log()
def close(self):
# try to gracefully close the connection if opened
if self.__connected():
logging.info("closing current connection")
self.child.close()
self.child = None
self._login = False
def __expect(self, matches, timeout=None):
"""
receives a dictionary 'matches' and returns the name of the matched item
instead of relying on the index into a list of matches. Automatically
adds following options if not already present
"eof" : pexpect.EOF
"timeout" : pexpect.TIMEOUT
"""
if "eof" not in matches:
matches["eof"] = pexpect.EOF
if "timeout" not in matches:
matches["timeout"] = pexpect.TIMEOUT
if timeout is None: timeout = self.timeout
indexed = []
mapping = []
for i in matches:
indexed.append(matches[i])
mapping.append(i)
result = self.child.expect(indexed, timeout)
logging.debug("timeout: %d, matched: '%s'\npexpect output: '%s%s'" % (
timeout, self.child.after, self.child.before, self.child.after))
if result <= len(mapping) and result >= 0:
logging.debug("expect matched result[%d] = %s" % (result, mapping[result]))
return mapping[result]
ds = ''
logging.error("unexpected pexpect return index: %s" % result)
for i in range(0, len(mapping)):
ds += '[%d] %s\n' % (i, mapping[i])
logging.debug("mapping:\n%s" % ds)
raise Exception("Unexpected pexpect return index: %s" % result)
def login(self, max_attempts=7, timeout=17):
"""
returns true on successful login, else returns false
"""
logging.debug("Logging into host")
# successfully logged in at a different time
if not self.__connected(): self.connect()
# check for user provided 'prompt' which indicates successful login
# else provide approriate username/password/enable_password
matches = {
"console": "(?i)press return to get started",
"refuse": "(?i)connection refused",
"yes/no": "(?i)yes/no",
"username": "(?i)(user(name)*|login)[ as]*[ \t]*:[ \t]*$",
"password": "(?i)password[ \t]*:[ \t]*$",
"enable": ">[ \t]*$",
"prompt": self.prompt
}
last_match = None
while max_attempts > 0:
max_attempts -= 1
match = self.__expect(matches, timeout)
if match == "console": # press return to get started
logging.debug("matched console, send enter")
self.child.sendline("\r\n")
elif match == "refuse": # connection refused
logging.error("connection refused by host")
return False
elif match == "yes/no": # yes/no for SSH key acceptance
logging.debug("received yes/no prompt, send yes")
self.child.sendline("yes")
elif match == "username": # username/login prompt
logging.debug("received username prompt, send username")
self.child.sendline(self.username)
elif match == "password":
# don't log passwords to the logfile
self.stop_log()
if last_match == "enable":
# if last match was enable prompt, then send enable password
logging.debug("matched password prompt, send enable password")
self.child.sendline(self.enable_password)
else:
logging.debug("matched password prompt, send password")
self.child.sendline(self.password)
# restart logging
self.start_log()
elif match == "enable":
logging.debug("matched enable prompt, send enable")
self.child.sendline("enable")
elif match == "prompt":
logging.debug("successful login")
self._login = True
# force terminal length at login
self.term_len = self._term_len
return True
elif match == "timeout":
logging.debug("timeout received but connection still opened, send enter")
self.child.sendline("\r\n")
last_match = match
# did not find prompt within max attempts, failed login
logging.error("failed to login after multiple attempts")
return False
def cmd(self, command, **kargs):
"""
execute a command on a device and wait for one of the provided matches to return.
Required argument string command
Optional arguments:
timeout - seconds to wait for command to completed (default to self.timeout)
sendline - boolean flag to use send or sendline fuction (default to true)
matches - dictionary of key/regex to match against. Key corresponding to matched
regex will be returned. By default, the following three keys/regex are applied:
'eof' : pexpect.EOF
'timeout' : pexpect.TIMEOUT
'prompt' : self.prompt
echo_cmd - boolean flag to echo commands sent (default to false)
note most terminals (i.e., Cisco devices) will echo back all typed characters
by default. Therefore, enabling echo_cmd may cause duplicate cmd characters
Return:
returns the key from the matched regex. For most scenarios, this will be 'prompt'. The output
from the command can be collected from self.output variable
"""
sendline = True
timeout = self.timeout
matches = {}
echo_cmd = False
if "timeout" in kargs:
timeout = kargs["timeout"]
if "matches" in kargs:
matches = kargs["matches"]
if "sendline" in kargs:
sendline = kargs["sendline"]
if "echo_cmd" in kargs:
echo_cmd = kargs["echo_cmd"]
# ensure prompt is in the matches list
if "prompt" not in matches:
matches["prompt"] = self.prompt
self.output = ""
# check if we've ever logged into device or currently connected
if (not self.__connected()) or (not self._login):
logging.debug("no active connection, attempt to login")
if not self.login():
raise Exception("failed to login to host")
# if echo_cmd is disabled, then need to disable logging before
# executing commands
if not echo_cmd: self.stop_log()
# execute command
logging.debug("cmd command: %s" % command)
if sendline:
self.child.sendline(command)
else:
self.child.send(command)
# remember to re-enable logging
if not echo_cmd: self.start_log()
# force wait option
if self.force_wait != 0:
time.sleep(self.force_wait)
result = self.__expect(matches, timeout)
self.output = "%s%s" % (self.child.before, self.child.after)
if result == "eof" or result == "timeout":
logging.warning("unexpected %s occurred" % result)
return result
class IPAddress:
"""Custom IP handling class since old APICs do not have `ipaddress` module.
"""
@staticmethod
def ip_to_binary(ip):
octets = ip.split(".")
octets_bin = [format(int(octet), "08b") for octet in octets]
return "".join(octets_bin)
@classmethod
def get_network_binary(cls, ip, pfxlen):
ip_bin = cls.ip_to_binary(ip)
return ip_bin[0:32-(32-int(pfxlen))]
@classmethod
def ip_in_subnet(cls, ip, subnet):
subnet_ip, subnet_pfxlen = subnet.split("/")
subnet_network = cls.get_network_binary(subnet_ip, subnet_pfxlen)
ip_network = cls.get_network_binary(ip, subnet_pfxlen)
return ip_network == subnet_network
class AciVersion():
v_regex = r'(?:dk9\.)?[1]?(?P<major1>\d)\.(?P<major2>\d)(?:\.|\()(?P<maint>\d+)\.?(?P<patch>(?:[a-b]|[0-9a-z]+))\)?'
def __init__(self, version):
self.original = version
v = re.search(self.v_regex, version)
self.version = ('{major1}.{major2}({maint}{patch})'
.format(**v.groupdict()) if v else None)
self.dot_version = ("{major1}.{major2}.{maint}{patch}"
.format(**v.groupdict()) if v else None)
self.simple_version = ("{major1}.{major2}({maint})"
.format(**v.groupdict()) if v else None)
self.major1 = v.group('major1') if v else None
self.major2 = v.group('major2') if v else None
self.maint = v.group('maint') if v else None
self.patch = v.group('patch') if v else None
self.regex = v
if not v:
raise RuntimeError("Parsing failure of ACI version `%s`", version)
def __str__(self):
return self.version
def older_than(self, version):
v = re.search(self.v_regex, version)
if not v: return None
for i in range(1, len(v.groups())+1):
if i < 4:
if int(self.regex.group(i)) > int(v.group(i)): return False
elif int(self.regex.group(i)) < int(v.group(i)): return True
if i == 4:
if self.regex.group(i) > v.group(i): return False
elif self.regex.group(i) < v.group(i): return True
return False
def newer_than(self, version):
return not self.older_than(version) and not self.same_as(version)
def same_as(self, version):
v = re.search(self.v_regex, version)
ver = ('{major1}.{major2}({maint}{patch})'
.format(**v.groupdict()) if v else None)
return self.version == ver
def is_firstver_gt_secondver(first_ver, second_ver):
""" Used for CIMC version comparison """
result = False
if first_ver[0] > second_ver[0]:
return True
elif first_ver[0] == second_ver[0]:
if first_ver[2] > second_ver[2]:
return True
elif first_ver[2] == second_ver[2]:
if first_ver[4] > second_ver[4]:
return True
elif first_ver[4] == second_ver[4]:
if first_ver[5] >= second_ver[5]:
result = True
return result
def format_table(headers, data,
min_width=5, left_padding=2, hdr_sp='-', col_sp=' '):
""" get string results in table format
Args:
header (list): list of column headers (optional)
each header can either be a string representing the name or a
dictionary with following attributes:
{
name (str): column name
width (int or str): integer width of column. can also be a string 'auto'
which is based on the longest string in column
max_width (int): integer value of max width when combined with
}
data (list): list of rows, where each row is a list of values
corresponding to the appropriate header. If length of row
exceeds length of headers, it is is ignored.
min_width (int, optional): minimum width enforced on any auto-calculated column. Defaults to 5.
left_padding (int, optional): number of spaces to 'pad' left most column. Defaults to 2.
hdr_sp (str, optional): print a separator string between hdr and data row. Defaults to '-'.
col_sp (str, optional): print a separator string between data columns. Defaults to ' '.
Returns:
str: table with columns aligned with spacing
"""
if type(data) is not list or len(data) == 0:
return ""
cl = 800
col_widths = []
rows = []
def update_col_widths(idx, new_width):
if len(col_widths) < idx + 1:
col_widths.append(new_width)
elif col_widths[idx] < new_width:
col_widths[idx] = new_width
for row in data:
if type(row) is not list:
return ""
for idx, col in enumerate(row):
update_col_widths(idx, len(str(col)))
rows.append([str(col) for col in row])
h_cols = []
for idx, col in enumerate(headers):
if isinstance(col, str):
update_col_widths(idx, len(col))
h_cols.append({'name': col, 'width': 'auto'})
elif isinstance(col, dict):
name = col.get('name', '')
width = col.get('width', '')
max_w = col.get('max_width', 0)
update_col_widths(idx, len(name))
if width == 'auto' and max_w:
try:
if int(max_w) < col_widths[idx]:
col_widths[idx] = int(max_w)
except ValueError:
max_w = 0
else:
try:
col_widths[idx] = int(width)
except ValueError:
width = 'auto'
h_cols.append({'name': name, 'width': width})
# Adjust column width to fit the table with
recovery_width = 3 * min_width
total_width = sum(col_widths) + len(col_sp) * len(col_widths) + left_padding
for idx, h in enumerate(h_cols):
if total_width <= cl: break
if h['width'] == 'auto' and col_widths[idx] > recovery_width:
total_width -= col_widths[idx] - recovery_width
col_widths[idx] = recovery_width
pad = ' ' * left_padding
output = []
if headers:
output.append(
get_row(col_widths, [c['name'] for c in h_cols], col_sp, pad)
)
if isinstance(hdr_sp, str):
if len(hdr_sp) > 0:
hsp_sp = hdr_sp[0] # only single char for hdr_sp
values = [hsp_sp * len(c['name']) for c in h_cols]
output.append(
get_row(col_widths, values, col_sp, pad)
)
for row in rows:
output.append(get_row(col_widths, row, col_sp, pad))
return '\n'.join(output)
def get_row(widths, values, spad=" ", lpad=""):
cols = []
row_maxnum = 0
for i, value in enumerate(values):
w = widths[i] if widths[i] > 0 else 1
tw = TextWrapper(width=w)
lines = []
for v in value.split('\n'):
lines += tw.wrap(v)
cols.append({'width': w, 'lines': lines})
if row_maxnum < len(lines): row_maxnum = len(lines)
spad2 = ' ' * len(spad) # space separators except for the 1st line
output = []
for i in range(row_maxnum):
row = []
for c in cols:
if len(c['lines']) > i:
row.append('{:{}}'.format(c['lines'][i], c['width']))
else:
row.append('{:{}}'.format('', c['width']))
if not output:
output.append("%s%s" % (lpad, spad.join(row).rstrip()))
else:
output.append("%s%s" % (lpad, spad2.join(row).rstrip()))
return ('\n'.join(output).rstrip())
def prints(objects, sep=' ', end='\n'):
with open(RESULT_FILE, 'a') as f:
print(objects, sep=sep, end=end, file=sys.stdout)
print(objects, sep=sep, end=end, file=f)
sys.stdout.flush()
f.flush()
def print_title(title, index=None, total=None):
if index and total:
prints('[Check{:3}/{}] {}... '.format(index, total, title), end='')
else:
prints('{:14}{}... '.format('', title), end='')
def print_result(title, result, msg='',
headers=None, data=None,
unformatted_headers=None, unformatted_data=None,
recommended_action='',
doc_url='',
adjust_title=False):
padding = 120 - len(title) - len(msg)
if adjust_title: padding += len(title) + 18
output = '{}{:>{}}'.format(msg, result, padding)
if data:
data.sort()
output += '\n' + format_table(headers, data)
if unformatted_data:
unformatted_data.sort()
output += '\n' + format_table(unformatted_headers, unformatted_data)
if data or unformatted_data:
output += '\n'
if recommended_action:
output += '\n Recommended Action: %s' % recommended_action
if doc_url:
output += '\n Reference Document: %s' % doc_url
output += '\n' * 2
prints(output)
def icurl(apitype, query):
if apitype not in ['class', 'mo']:
print('invalid API type - %s' % apitype)
return []
uri = 'http://127.0.0.1:7777/api/{}/{}'.format(apitype, query)
cmd = ['icurl', '-gs', uri]
logging.info('cmd = ' + ' '.join(cmd))
response = subprocess.check_output(cmd)
logging.debug('response: ' + str(response))
imdata = json.loads(response)['imdata']
if imdata and "error" in imdata[0].keys():
raise Exception('API call failed! Check debug log')
else:
return imdata
def get_credentials():
while True:
usr = input('Enter username for APIC login : ')
if usr: break
while True:
pwd = getpass('Enter password for corresponding User : ')
if pwd: break
print('')
return usr, pwd
def get_current_version():
""" Returns: AciVersion instance """
prints("Checking current APIC version (switch nodes are assumed to be on the same version)...", end='')
firmwares = icurl('class', 'firmwareCtrlrRunning.json')
for firmware in firmwares:
if 'node-1' in firmware['firmwareCtrlrRunning']['attributes']['dn']:
apic1_version = firmware['firmwareCtrlrRunning']['attributes']['version']
break
current_version = AciVersion(apic1_version)
prints('%s\n' % current_version)
return current_version
def get_target_version():
""" Returns: AciVersion instance """
prints("Gathering APIC Versions from Firmware Repository...\n")
repo_list = []
response_json = icurl('class',
'firmwareFirmware.json?query-target-filter=and(wcard(firmwareFirmware.isoname,"aci-apic"),eq(firmwareFirmware.type,"controller"))')
if response_json:
for version in response_json:
repo_list.append(version['firmwareFirmware']['attributes']['isoname'])
repo_list.sort()
# Display version info to User
for i, value in enumerate(repo_list):
prints("[%s]: %s" % (i + 1, value))
prints('')
version_choice = None
while version_choice is None:
version_choice = input("What is the Target Version? : ")
try:
version_choice = int(version_choice)
if version_choice < 1 or version_choice > len(repo_list): raise ValueError("")
except ValueError:
prints("Please select a value between 1 and %s" % len(repo_list))
version_choice = None
version = repo_list[version_choice - 1]
target_version = AciVersion(version)
prints('\nYou have chosen version "%s"\n' % target_version)
return target_version
else:
prints("No Firmware Detected! Please Upload APIC Firmware and re-run the script.\n")
return None
def get_vpc_nodes(**kwargs):
""" Returns list of VPC Node IDs; ['101', '102', etc...] """
prints("Collecting VPC Node IDs...\n")
vpc_nodes = []
prot_pols = kwargs.get("fabricNodePEp.json", None)
if not prot_pols:
prot_pols = icurl('class', 'fabricNodePEp.json')
if prot_pols:
for vpc_node in prot_pols:
vpc_nodes.append(vpc_node['fabricNodePEp']['attributes']['id'])
return vpc_nodes
def apic_cluster_health_check(index, total_checks, cversion, **kwargs):
title = 'APIC Cluster is Fully-Fit'
result = FAIL_UF
msg = ''
headers = ['APIC-ID\n(Seen By)', 'APIC-ID\n(Affected)', 'Admin State', 'Operational State', 'Health State']
unformatted_headers = ['Affected DN', 'Admin State', 'Operational State', 'Health State']
data = []
unformatted_data = []
doc_url = 'ACI Troubleshooting Guide 2nd Edition - http://cs.co/9003ybZ1d'
print_title(title, index, total_checks)
if cversion.older_than("4.2"):
recommended_action = 'Follow "Initial Fabric Setup" in ACI Troubleshooting Guide 2nd Edition'
else:
recommended_action = 'Troubleshoot by running "acidiag cluster" on APIC CLI'
dn_regex = node_regex + r'/av/node-(?P<winode>\d)'
infraWiNodes = icurl('class', 'infraWiNode.json')
for av in infraWiNodes:
av_attr = av['infraWiNode']['attributes']
if av_attr['health'] == 'fully-fit':
continue
dn = re.search(dn_regex, av_attr['dn'])
if dn:
data.append([dn.group('node'), dn.group('winode'),
av_attr['adminSt'], av_attr['operSt'], av_attr['health']])
else:
unformatted_data.append([av_attr['dn'], av_attr['adminSt'],
av_attr['operSt'], av_attr['health']])
if not infraWiNodes:
result = ERROR
msg = 'infraWiNode (Appliance Vector) not found!'
elif not data and not unformatted_data:
result = PASS
print_result(title, result, msg, headers, data, unformatted_headers, unformatted_data,
recommended_action, doc_url)
return result
def switch_status_check(index, total_checks, **kwargs):
title = 'Switches are all in Active state'
result = FAIL_UF
msg = ''
headers = ['Pod-ID', 'Node-ID', 'State', 'Recommended Action']
data = []
recommended_action = 'Bring this node back to "active"'
print_title(title, index, total_checks)
# fabricNode.fabricSt shows `disabled` for both Decommissioned and Maintenance (GIR).
# fabricRsDecommissionNode.debug==yes is required to show `disabled (Maintenance)`.
fabricNodes = icurl('class', 'fabricNode.json?&query-target-filter=ne(fabricNode.role,"controller")')
girNodes = icurl('class',
'fabricRsDecommissionNode.json?&query-target-filter=eq(fabricRsDecommissionNode.debug,"yes")')
for fabricNode in fabricNodes:
state = fabricNode['fabricNode']['attributes']['fabricSt']
if state == 'active':
continue
dn = re.search(node_regex, fabricNode['fabricNode']['attributes']['dn'])
pod_id = dn.group("pod")
node_id = dn.group("node")
for gir in girNodes:
if node_id == gir['fabricRsDecommissionNode']['attributes']['targetId']:
state = state + ' (Maintenance)'
data.append([pod_id, node_id, state, recommended_action])
if not fabricNodes:
result = MANUAL
msg = 'Switch fabricNode not found!'
elif not data:
result = PASS
print_result(title, result, msg, headers, data)
return result
def maintp_grp_crossing_4_0_check(index, total_checks, cversion, tversion, **kwargs):
title = 'Firmware/Maintenance Groups when crossing 4.0 Release'
result = PASS
msg = ''
headers = ["Group Name", "Group Type", "Recommended Action"]
data = []
recommended_action = 'Remove the group prior to APIC upgrade. Create a new switch group once APICs are upgraded to post-4.0.'
print_title(title, index, total_checks)
if (int(cversion.major1) >= 4) or (tversion and (int(tversion.major1) <= 3)):
result = NA
msg = 'Versions not applicable'
elif (int(cversion.major1) < 4) and not tversion:
result = MANUAL
msg = 'Target version not supplied. Skipping.'
else:
groups = icurl('mo', '/uni/fabric.json?query-target=children&target-subtree-class=maintMaintP,firmwareFwP')
for g in groups:
result = FAIL_O
if g.get('maintMaintP'):
data.append([g['maintMaintP']['attributes']['name'], 'Maintenance Group', recommended_action])
else:
data.append([g['firmwareFwP']['attributes']['name'], 'Firmware Group', recommended_action])
print_result(title, result, msg, headers, data)
return result
def ntp_status_check(index, total_checks, **kargs):
title = 'NTP Status'
result = FAIL_UF
msg = ''
headers = ["Pod-ID", "Node-ID", "Recommended Action"]
data = []
recommended_action = 'Not Synchronized. Check NTP config and NTP server reachability.'
print_title(title, index, total_checks)
fabricNodes = icurl('class', 'fabricNode.json')
nodes = [fn['fabricNode']['attributes']['id'] for fn in fabricNodes]
apicNTPs = icurl('class', 'datetimeNtpq.json')
switchNTPs = icurl('class', 'datetimeClkPol.json')
for apicNTP in apicNTPs:
if '*' == apicNTP['datetimeNtpq']['attributes']['tally']:
dn = re.search(node_regex, apicNTP['datetimeNtpq']['attributes']['dn'])
if dn and dn.group('node') in nodes:
nodes.remove(dn.group('node'))
for switchNTP in switchNTPs:
if 'synced' in switchNTP['datetimeClkPol']['attributes']['srvStatus']:
dn = re.search(node_regex, switchNTP['datetimeClkPol']['attributes']['dn'])
if dn and dn.group('node') in nodes:
nodes.remove(dn.group('node'))
for fn in fabricNodes:
if fn['fabricNode']['attributes']['id'] in nodes:
dn = re.search(node_regex, fn['fabricNode']['attributes']['dn'])
data.append([dn.group('pod'), dn.group('node'), recommended_action])
if not data:
result = PASS
print_result(title, result, msg, headers, data)
return result
def features_to_disable_check(index, total_checks, cversion, tversion, **kwargs):
title = 'Features that need to be Disabled prior to Upgrade'
result = FAIL_O
msg = ''
headers = ["Feature", "Name", "Status", "Recommended Action"]
data = []
print_title(title, index, total_checks)
apPlugins = icurl('class', 'apPlugin.json?&query-target-filter=ne(apPlugin.pluginSt,"inactive")')
infraMOs = icurl('mo', 'uni/infra.json?query-target=subtree&target-subtree-class=infrazoneZone,epControlP')
default_apps = ['IntersightDC', 'NIALite', 'NIBASE', 'ApicVision']
default_appDNs = ['pluginContr/plugin-Cisco_' + app for app in default_apps]
if apPlugins:
for apPlugin in apPlugins:
if apPlugin['apPlugin']['attributes']['dn'] not in default_appDNs:
name = apPlugin['apPlugin']['attributes']['name']
pluginSt = apPlugin['apPlugin']['attributes']['pluginSt']
data.append(['App Center', name, pluginSt, 'Disable the app'])
for mo in infraMOs:
if mo.get('infrazoneZone') and mo['infrazoneZone']['attributes']['deplMode'] == 'disabled':
name = mo['infrazoneZone']['attributes']['name']
data.append(['Config Zone', name, 'Locked',
'Change the status to "Open" or remove the zone'])
elif mo.get('epControlP') and mo['epControlP']['attributes']['adminSt'] == 'enabled':
ra = ''
if not tversion:
ra = 'Disable Rogue EP during the upgrade if your current version is 4.1 or your target version is 4.1'
else:
cv_is_4_1 = cversion.major1 == '4' and cversion.major2 == '1'
tv_is_4_1 = tversion.major1 == '4' and tversion.major2 == '1'
if cv_is_4_1 and not tv_is_4_1:
ra = 'Disable Rogue EP during the upgrade because your current version is 4.1'
elif not cv_is_4_1 and tv_is_4_1:
ra = 'Disable Rogue EP during the upgrade because your target version is 4.1'
if ra:
name = mo['epControlP']['attributes']['name']
data.append(['Rogue Endpoint', name, 'Enabled', ra])
if not data:
result = PASS
print_result(title, result, msg, headers, data)
return result
def switch_group_guideline_check(index, total_checks, **kwargs):
title = 'Switch Upgrade Group Guidelines'
result = FAIL_O
msg = ''
headers = ['Group Name', 'Pod-ID', 'Node-IDs', 'Failure Reason']
data = []
recommended_action = 'Upgrade nodes in each line above separately in another group.'
doc_url = 'Guidelines for Switch Upgrades in ACI Firmware Upgrade Overview'
print_title(title, index, total_checks)
maints = icurl('class', 'maintMaintGrp.json?rsp-subtree=children')
if not maints:
result = MANUAL
msg = 'No upgrade groups found!'
print_result(title, result, msg, headers, data,
recommended_action=recommended_action, doc_url=doc_url)
return result
spine_type = ['', 'RR ', 'IPN/ISN ']
f_spines = [defaultdict(list) for t in spine_type]
reason = 'All {}spine nodes in this pod are in the same group.'
reasons = [reason.format(t) for t in spine_type]
reason_apicleaf = 'All leaf nodes connected to APIC {} are in the same group.'
reason_vpc = 'Both leaf nodes in the same vPC pair are in the same group.'
nodes = {}
fabricNodes = icurl('class', 'fabricNode.json')
for fn in fabricNodes:
attr = fn['fabricNode']['attributes']
nodes[attr['dn']] = {'role': attr['role'], 'nodeType': attr['nodeType']}
for key in nodes:
if nodes[key]['role'] == 'spine':
dn = re.search(node_regex, key)
if not dn:
logging.error('Failed to parse - %s', key)
continue
f_spines[0][dn.group('pod')].append(int(dn.group('node')))
bgpRRs = icurl('class', 'bgpRRNodePEp.json')
for bgpRR in bgpRRs:
pod = bgpRR['bgpRRNodePEp']['attributes']['podId']
node = bgpRR['bgpRRNodePEp']['attributes']['id']
f_spines[1][pod].append(int(node))
infraL3Outs = icurl('class',
'l3extRsNodeL3OutAtt.json?query-target-filter=wcard(l3extRsNodeL3OutAtt.dn,"tn-infra/")')
for infraL3Out in infraL3Outs:
tDn = infraL3Out['l3extRsNodeL3OutAtt']['attributes']['tDn']
if nodes.get(tDn, {}).get('role') == 'spine':
dn = re.search(node_regex, tDn)
if not dn:
logging.error('Failed to parse - %s', tDn)
continue
f_spines[2][dn.group('pod')].append(int(dn.group('node')))
apic_leafs = defaultdict(set)
lldps = icurl('class', 'lldpCtrlrAdjEp.json')
for lldp in lldps:
dn = re.search(node_regex, lldp['lldpCtrlrAdjEp']['attributes']['dn'])
if not dn:
logging.error('Failed to parse - %s', lldp['lldpCtrlrAdjEp']['attributes']['dn'])
continue
apic_id_pod = '-'.join([lldp['lldpCtrlrAdjEp']['attributes']['id'], dn.group('pod')])
apic_leafs[apic_id_pod].add(int(dn.group('node')))
vpcs = icurl('class', 'fabricExplicitGEp.json?rsp-subtree=children&rsp-subtree-class=fabricNodePEp')
for m in maints:
m_nodes = []
m_name = ''
for mc in m['maintMaintGrp']['children']:
if mc.get('maintRsMgrpp'):
m_name = mc['maintRsMgrpp']['attributes']['tnMaintMaintPName']
elif mc.get('fabricNodeBlk'):
m_nodes += range(int(mc['fabricNodeBlk']['attributes']['from_']),
int(mc['fabricNodeBlk']['attributes']['to_']) + 1)
m_spines = [defaultdict(list) for t in spine_type]
for m_node in m_nodes:
for idx, fabric in enumerate(f_spines):
for pod in fabric:
if m_node in fabric[pod]:
m_spines[idx][pod].append(m_node)
break
for m, f, r in zip(m_spines, f_spines, reasons):
for pod in m:
if len(m[pod]) == len(f[pod]):
data.append([m_name, pod, ','.join(str(x) for x in m[pod]), r])
for apic_id_pod in apic_leafs:
if apic_leafs[apic_id_pod] == apic_leafs[apic_id_pod].intersection(m_nodes):
pod = apic_id_pod.split('-')[1]
apic_id = apic_id_pod.split('-')[0]
data.append([m_name, pod, ','.join(str(x) for x in apic_leafs[apic_id_pod]),
reason_apicleaf.format(apic_id)])
for vpc in vpcs:
m_vpc_peers = []
for vpc_peer in vpc['fabricExplicitGEp']['children']:
if int(vpc_peer['fabricNodePEp']['attributes']['id']) in m_nodes:
m_vpc_peers.append({
'node': vpc_peer['fabricNodePEp']['attributes']['id'],
'pod': vpc_peer['fabricNodePEp']['attributes']['podId']
})
if len(m_vpc_peers) > 1:
data.append([m_name, m_vpc_peers[0]['pod'],
','.join(x['node'] for x in m_vpc_peers),
reason_vpc])
if not data and not msg:
result = PASS
print_result(title, result, msg, headers, data,
recommended_action=recommended_action, doc_url=doc_url)
return result
def switch_bootflash_usage_check(index, total_checks, **kwargs):
title = 'Switch Node /bootflash usage'
result = FAIL_UF
msg = ''
headers = ["Pod-ID", "Node-ID", "Utilization", "Alert"]
data = []
print_title(title, index, total_checks)
response_json = kwargs.get("eqptcapacityFSPartition.json")
if not response_json:
response_json = icurl('class',
'eqptcapacityFSPartition.json?query-target-filter=eq(eqptcapacityFSPartition.path,"/bootflash")')
if not response_json: