Skip to content

Commit

Permalink
Dev: migration: refine message wording (jsc#PED-11808)
Browse files Browse the repository at this point in the history
  • Loading branch information
nicholasyang2022 committed Jan 16, 2025
1 parent 252befd commit abe07c3
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 18 deletions.
23 changes: 8 additions & 15 deletions crmsh/migration.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,12 +116,7 @@ def write_in_color(f, color: str, text: str):
f.write(text)

def end(self):
if self.has_problems:
self.write_in_color(sys.stdout, constants.RED, '[FAIL]')
sys.stdout.write(' Please fix all the "FAIL" problems above before migrating to SLES 16.\n\n')
else:
self.write_in_color(sys.stdout, constants.GREEN, '[PASS]')
sys.stdout.write(' Good to migrate to SLES 16.\n\n')
sys.stdout.write('\n')


def check(args: typing.Sequence[str]) -> int:
Expand All @@ -140,8 +135,9 @@ def check(args: typing.Sequence[str]) -> int:
ret = 0
if not parsed_args.local and not parsed_args.json:
remote_ret = check_remote()
print('------ localhost ------')
print('------ corosync @ localhost ------')
check_local(handler)
print('------ cib ------')
check_global(handler)
else:
remote_ret = 0
Expand All @@ -163,7 +159,7 @@ def check(args: typing.Sequence[str]) -> int:
sys.stdout.write(' This cluster is good to migrate to SLES 16.\n')
else:
CheckResultInteractiveHandler.write_in_color(sys.stdout, constants.RED, '[FAIL]')
sys.stdout.write(' Please fix all the "FAIL" problems above before migrating to SLES 16.\n')
sys.stdout.write(' The pacemaker cluster stack can not migrate to SLES 16. Please fix all the "FAIL" problems above before migrating to SLES 16.\n')
return ret


Expand All @@ -181,7 +177,7 @@ def check_remote():
})
ret = 0
for host, result in result.items():
sys.stdout.write(f'------ {host} ------\n')
sys.stdout.write(f'------ corosync @ {host} ------\n')
if isinstance(result, prun.SSHError):
handler.write_in_color(
sys.stdout, constants.YELLOW,
Expand Down Expand Up @@ -287,8 +283,7 @@ def check_service_status(handler: CheckResultHandler):
def check_unsupported_corosync_features(handler: CheckResultHandler):
handler.log_info("Checking used corosync features...")
transport = 'udpu' if corosync.is_unicast() else 'udp'
handler.handle_tip(f'Corosync transport "{transport}" will be deprecated in corosync 3.', [
'After migrating to SLES 16, run "crm cluster health sles16 --fix" to migrate it to transport "knet".',
handler.handle_tip(f'Corosync transport "{transport}" will be deprecated in corosync 3. Please use "knet".', [
])
if corosync.get_value("totem.rrp_mode") in {'active', 'passive'}:
handler.handle_tip(f'Corosync RRP will be deprecated in corosync 3.', [
Expand All @@ -306,8 +301,7 @@ def check_unsupported_resource_agents(handler: CheckResultHandler):
ocf_resource_agents.append(resource_agent)
elif resource_agent.m_class == 'stonith':
if resource_agent.m_type == 'external/sbd':
handler.handle_tip('stonith:external/sbd will be removed.', [
'* After migrating to SLES 16, please replace it with stonith:fence_sbd.'
handler.handle_tip('stonith:external/sbd will be removed. Please use stonith:fence_sbd', [
])
else:
stonith_resource_agents.append(resource_agent)
Expand Down Expand Up @@ -381,6 +375,5 @@ def _check_removed_resource_agents(

def _check_ocfs2(handler: CheckResultHandler, cib: lxml.etree.Element):
if cibquery.has_primitive_filesystem_with_fstype(cib, 'ocfs2'):
handler.handle_problem(False, 'OCFS2 is not supported in SLES 16.', [
'* Before migrating to SLES 16, replace it with GFS2.',
handler.handle_problem(False, 'OCFS2 is not supported in SLES 16. Please use GFS2.', [
])
6 changes: 3 additions & 3 deletions test/features/migration.feature
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ Feature: migration
And Run "crm cluster init -y -N hanode2" OK on "hanode1"
When Try "crm cluster health sles16" on "hanode1"
Then Expected return code is "1"
And Expect stdout contains snippets ["[WARN] Corosync transport \"udpu\" will be deprecated in corosync 3.", "[FAIL] Please fix all the \"FAIL\" problems above before migrating to SLES 16.", "----- localhost -----", "----- hanode2 -----"].
And Expect stdout contains snippets ["[WARN] Corosync transport \"udpu\" will be deprecated in corosync 3.", "[FAIL] The pacemaker cluster stack can not migrate to SLES 16. Please fix all the \"FAIL\" problems above before migrating to SLES 16.", "----- localhost -----", "----- hanode2 -----"].

Scenario: Should not run fixes.
When Try "crm cluster health sles16 --fix" on "hanode1"
Expand All @@ -21,10 +21,10 @@ Feature: migration
When Run "crm cluster stop --all" on "hanode1"
And Try "crm cluster health sles16" on "hanode1"
Then Expected return code is "1"
And Expect stdout contains snippets ["Check results may be outdated or inaccurate.", "[WARN] Corosync transport \"udpu\" will be deprecated in corosync 3.", "[FAIL] Please fix all the \"FAIL\" problems above before migrating to SLES 16.", "----- localhost -----", "----- hanode2 -----"].
And Expect stdout contains snippets ["Check results may be outdated or inaccurate.", "[WARN] Corosync transport \"udpu\" will be deprecated in corosync 3.", "[FAIL] The pacemaker cluster stack can not migrate to SLES 16. Please fix all the \"FAIL\" problems above before migrating to SLES 16.", "----- localhost -----", "----- hanode2 -----"].

Scenario: Run pre-migration checks when some of the nodes are offline.
When Run "systemctl stop sshd" on "hanode2"
And Try "crm cluster health sles16" on "hanode1"
Then Expected return code is "1"
And Expect stdout contains snippets ["Cannot create SSH connection to", "[FAIL] Please fix all the \"FAIL\" problems above before migrating to SLES 16.", "----- localhost -----", "----- hanode2 -----"].
And Expect stdout contains snippets ["Cannot create SSH connection to", "[FAIL] The pacemaker cluster stack can not migrate to SLES 16. Please fix all the \"FAIL\" problems above before migrating to SLES 16.", "----- localhost -----", "----- hanode2 -----"].

0 comments on commit abe07c3

Please sign in to comment.