Compare commits

...

2 Commits

Author SHA1 Message Date
7c8c71dff7 Improve handling of local connections in CLI
1. Ensure the local connection is actually always present if it exists,
and stored in the store file.

2. Remove any invalid "local" store entries if present (i.e.
pvcapid.yaml entries from legacy versions).

3. Order the connection lists such that "local" is always first.

4. Improve pretty list output format such that all fields are wider if
needed
2024-10-17 09:56:54 -04:00
861fef91e3 Add modification of Monitor hosts on XML import
Missing this means clusters with different storage hosts would fail to
start silently. Ensure these are updated like the secret UUID is as
well.
2024-10-16 16:00:54 -04:00
5 changed files with 35 additions and 16 deletions

View File

@ -1438,15 +1438,7 @@ def vm_snapshot_receive_block_createsnap(zkhandler, pool, volume, snapshot):
@ZKConnection(config)
def vm_snapshot_receive_config(zkhandler, snapshot, vm_config, source_snapshot=None):
"""
Receive a VM configuration from a remote system
This function requires some explanation.
We get a full JSON dump of the VM configuration as provided by `pvc vm info`. This contains all the information we
reasonably need to replicate the VM at the given snapshot, including metainformation.
First, we need to determine if this is an incremental or full send. If it's full, and the VM already exists,
this is an issue and we have to error. But this should have already happened with the RBD volumes.
Receive a VM configuration snapshot from a remote system, and modify it to work on our system
"""
def parse_unified_diff(diff_text, original_text):
@ -1503,13 +1495,28 @@ def vm_snapshot_receive_config(zkhandler, snapshot, vm_config, source_snapshot=N
vm_xml = vm_config["xml"]
vm_xml_diff = "\n".join(current_snapshot["xml_diff_lines"])
snapshot_vm_xml = parse_unified_diff(vm_xml_diff, vm_xml)
xml_data = etree.fromstring(snapshot_vm_xml)
# Replace the Ceph storage secret UUID with this cluster's
our_ceph_secret_uuid = config["ceph_secret_uuid"]
xml_data = etree.fromstring(snapshot_vm_xml)
ceph_secrets = xml_data.xpath("//secret[@type='ceph']")
for ceph_secret in ceph_secrets:
ceph_secret.set("uuid", our_ceph_secret_uuid)
# Replace the Ceph source hosts with this cluster's
our_ceph_storage_hosts = config["storage_hosts"]
our_ceph_storage_port = str(config["ceph_monitor_port"])
ceph_sources = xml_data.xpath("//source[@protocol='rbd']")
for ceph_source in ceph_sources:
for host in ceph_source.xpath("host"):
ceph_source.remove(host)
for ceph_storage_host in our_ceph_storage_hosts:
new_host = etree.Element("host")
new_host.set("name", ceph_storage_host)
new_host.set("port", our_ceph_storage_port)
ceph_source.append(new_host)
# Regenerate the VM XML
snapshot_vm_xml = etree.tostring(xml_data, pretty_print=True).decode("utf8")
if (

View File

@ -905,7 +905,7 @@ def cli_connection_list_format_pretty(CLI_CONFIG, data):
# Parse each connection and adjust field lengths
for connection in data:
for field, length in [(f, fields[f]["length"]) for f in fields]:
_length = len(str(connection[field]))
_length = len(str(connection[field])) + 1
if _length > length:
length = len(str(connection[field])) + 1
@ -1005,7 +1005,7 @@ def cli_connection_detail_format_pretty(CLI_CONFIG, data):
# Parse each connection and adjust field lengths
for connection in data:
for field, length in [(f, fields[f]["length"]) for f in fields]:
_length = len(str(connection[field]))
_length = len(str(connection[field])) + 1
if _length > length:
length = len(str(connection[field])) + 1

View File

@ -167,9 +167,17 @@ def get_store(store_path):
with open(store_file) as fh:
try:
store_data = jload(fh)
return store_data
except Exception:
return dict()
store_data = dict()
if path.exists(DEFAULT_STORE_DATA["cfgfile"]):
if store_data.get("local", None) != DEFAULT_STORE_DATA:
del store_data["local"]
if "local" not in store_data.keys():
store_data["local"] = DEFAULT_STORE_DATA
update_store(store_path, store_data)
return store_data
def update_store(store_path, store_data):

View File

@ -68,7 +68,8 @@ def cli_connection_list_parser(connections_config, show_keys_flag):
}
)
return connections_data
# Return, ensuring local is always first
return sorted(connections_data, key=lambda x: (x.get("name") != "local"))
def cli_connection_detail_parser(connections_config):

View File

@ -375,8 +375,11 @@ def get_parsed_configuration(config_file):
config = {**config, **config_api_ssl}
# Use coordinators as storage hosts if not explicitly specified
# These are added as FQDNs in the storage domain
if not config["storage_hosts"] or len(config["storage_hosts"]) < 1:
config["storage_hosts"] = config["coordinators"]
config["storage_hosts"] = []
for host in config["coordinators"]:
config["storage_hosts"].append(f"{host}.{config['storage_domain']}")
# Set up our token list if specified
if config["api_auth_source"] == "token":