parent
126f0742cd
commit
602dd7b714
|
@ -1 +1 @@
|
|||
{"version": "0", "root": "", "base": {"schema": "/schema", "schema.version": "/schema/version", "config": "/config", "config.maintenance": "/config/maintenance", "config.primary_node": "/config/primary_node", "config.upstream_ip": "/config/upstream_ip", "config.migration_target_selector": "/config/migration_target_selector", "lock": "/locks", "lock.primary_node": "/locks/primary_node", "lock.flush_lock": "/locks/flush_lock", "lock.domain_migrate": "/locks/domain_migrate", "cmd": "/cmd", "cmd.nodes": "/cmd/nodes", "cmd.domains": "/cmd/domains", "cmd.ceph": "/cmd/ceph", "node": "/nodes", "domain": "/domains", "network": "/networks", "storage": "/ceph", "storage.util": "/ceph/util", "osd": "/ceph/osds", "pool": "/ceph/pools", "volume": "/ceph/volumes", "snapshot": "/ceph/snapshots"}, "node": {"keepalive": "/keepalive", "mode": "/daemonmode", "staticdata": "/static_data", "data.kernel": "/oskernelversion", "data.os": "/ostype", "data.arch": "/osarch", "counts.provisioned_domains": "/domainscount", "counts.running_domains": "/runningdomains", "counts.networks": "/networkscount", "state.daemon": "/daemonstate", "state.router": "/routerstate", "state.domain": "/domainstate", "vcpu.total": "/vcputotal", "vcpu.allocated": "/vcpualloc", "memory.total": "/memtotal", "memory.used": "/memused", "memory.free": "/memfree", "memory.allocated": "/memalloc", "memory.provisioned": "/memprov", "ipmi.hostname": "/ipmihostname", "ipmi.username": "/ipmiusername", "ipmi.password": "/ipmipassword"}, "domain": {"name": "", "xml": "/xml", "state": "/state", "profile": "/profile", "stats": "/stats", "node": "/node", "last_node": "/lastnode", "failed_reason": "/failedreason", "console.log": "/consolelog", "console.vnc": "/vnc", "meta.autostart": "/node_autostart", "meta.migrate_method": "/migration_method", "meta.node_selector": "/node_selector", "meta.node_limit": "/node_limit"}, "network": {"type": "/nettype", "rules": "/firewall_rules", "nameservers": "/name_servers", "domain": "/domain", "ip4.gateway": "/ip4_gateway", "ip4.network": "/ip4_network", "ip4.dhcp": "/dhcp4_flag", "ip4.reservations": "/dhcp4_reservations", "ip4.dhcp_start": "/dhcp4_start", "ip4.dhcp_end": "/dhcp4_end", "ip6.gateway": "/ip6_gateway", "ip6.network": "/ip6_network", "ip6.dhcp": "/dhcp6_flag"}, "osd": {"node": "/node", "device": "/device", "stats": "/stats"}, "pool": {"pgs": "/pgs", "stats": "/stats"}, "volume": {"stats": "/stats"}, "snapshot": {"stats": "/stats"}}
|
||||
{"version": "0", "root": "", "base": {"schema": "/schema", "schema.version": "/schema/version", "config": "/config", "config.maintenance": "/config/maintenance", "config.primary_node": "/config/primary_node", "config.upstream_ip": "/config/upstream_ip", "config.migration_target_selector": "/config/migration_target_selector", "lock": "/locks", "lock.primary_node": "/locks/primary_node", "lock.flush_lock": "/locks/flush_lock", "lock.domain_migrate": "/locks/domain_migrate", "cmd": "/cmd", "cmd.nodes": "/cmd/nodes", "cmd.domains": "/cmd/domains", "cmd.ceph": "/cmd/ceph", "node": "/nodes", "domain": "/domains", "network": "/networks", "storage": "/ceph", "storage.util": "/ceph/util", "osd": "/ceph/osds", "pool": "/ceph/pools", "volume": "/ceph/volumes", "snapshot": "/ceph/snapshots"}, "node": {"keepalive": "/keepalive", "mode": "/daemonmode", "data.static": "/staticdata", "counts.provisioned_domains": "/domainscount", "counts.running_domains": "/runningdomains", "counts.networks": "/networkscount", "state.daemon": "/daemonstate", "state.router": "/routerstate", "state.domain": "/domainstate", "vcpu.allocated": "/vcpualloc", "memory.total": "/memtotal", "memory.used": "/memused", "memory.free": "/memfree", "memory.allocated": "/memalloc", "memory.provisioned": "/memprov", "ipmi.hostname": "/ipmihostname", "ipmi.username": "/ipmiusername", "ipmi.password": "/ipmipassword"}, "domain": {"name": "", "xml": "/xml", "state": "/state", "profile": "/profile", "stats": "/stats", "node": "/node", "last_node": "/lastnode", "failed_reason": "/failedreason", "console.log": "/consolelog", "console.vnc": "/vnc", "meta.autostart": "/node_autostart", "meta.migrate_method": "/migration_method", "meta.node_selector": "/node_selector", "meta.node_limit": "/node_limit"}, "network": {"type": "/nettype", "rules": "/firewall_rules", "nameservers": "/name_servers", "domain": "/domain", "ip4.gateway": "/ip4_gateway", "ip4.network": "/ip4_network", "ip4.dhcp": "/dhcp4_flag", "ip4.reservations": "/dhcp4_reservations", "ip4.dhcp_start": "/dhcp4_start", "ip4.dhcp_end": "/dhcp4_end", "ip6.gateway": "/ip6_gateway", "ip6.network": "/ip6_network", "ip6.dhcp": "/dhcp6_flag"}, "osd": {"node": "/node", "device": "/device", "stats": "/stats"}, "pool": {"pgs": "/pgs", "stats": "/stats"}, "volume": {"stats": "/stats"}, "snapshot": {"stats": "/stats"}}
|
|
@ -401,17 +401,13 @@ class ZKSchema(object):
|
|||
'node': {
|
||||
'keepalive': '/keepalive',
|
||||
'mode': '/daemonmode',
|
||||
'staticdata': '/static_data',
|
||||
'data.kernel': '/oskernelversion',
|
||||
'data.os': '/ostype',
|
||||
'data.arch': '/osarch',
|
||||
'data.static': '/staticdata',
|
||||
'counts.provisioned_domains': '/domainscount',
|
||||
'counts.running_domains': '/runningdomains',
|
||||
'counts.networks': '/networkscount',
|
||||
'state.daemon': '/daemonstate',
|
||||
'state.router': '/routerstate',
|
||||
'state.domain': '/domainstate',
|
||||
'vcpu.total': '/vcputotal',
|
||||
'vcpu.allocated': '/vcpualloc',
|
||||
'memory.total': '/memtotal',
|
||||
'memory.used': '/memused',
|
||||
|
@ -571,20 +567,120 @@ class ZKSchema(object):
|
|||
return current_version
|
||||
|
||||
# Validate an active schema against a Zookeeper cluster
|
||||
def validate(self, zkhandler, path='base'):
|
||||
for key in self.keys(path):
|
||||
if not zkhandler.exists(self.path(path + '.' + key)):
|
||||
print(f"Key not found: {self.path(key)}")
|
||||
return False
|
||||
return True
|
||||
def validate(self, zkhandler, logger=None):
|
||||
result = True
|
||||
|
||||
# Walk the entire tree checking our schema
|
||||
for elem in ['base']:
|
||||
for key in self.keys(elem):
|
||||
kpath = f'{elem}.{key}'
|
||||
if not zkhandler.exists(self.path(kpath)):
|
||||
if logger is not None:
|
||||
logger.out(f'Key not found: {self.path(kpath)}', state='w')
|
||||
result = False
|
||||
|
||||
for elem in ['node', 'domain', 'network', 'osd', 'pool']:
|
||||
# First read all the subelements of the key class
|
||||
for child in zkhandler.children(self.path(f'base.{elem}')):
|
||||
# For each key in the schema for that particular elem
|
||||
for ikey in self.keys(elem):
|
||||
kpath = f'{elem}.{ikey}'
|
||||
# Validate that the key exists for that child
|
||||
if not zkhandler.exists(self.path(kpath, child)):
|
||||
if logger is not None:
|
||||
logger.out(f'Key not found: {self.path(kpath, child)}', state='w')
|
||||
result = False
|
||||
|
||||
# These two have several children layers that must be parsed through
|
||||
for elem in ['volume']:
|
||||
# First read all the subelements of the key class (pool layer)
|
||||
for pchild in zkhandler.children(self.path(f'base.{elem}')):
|
||||
# Finally read all the subelements of the key class (volume layer)
|
||||
for vchild in zkhandler.children(self.path(f'base.{elem}') + f'/{pchild}'):
|
||||
child = f'{pchild}/{vchild}'
|
||||
# For each key in the schema for that particular elem
|
||||
for ikey in self.keys(elem):
|
||||
kpath = f'{elem}.{ikey}'
|
||||
# Validate that the key exists for that child
|
||||
if not zkhandler.exists(self.path(kpath, child)):
|
||||
if logger is not None:
|
||||
logger.out(f'Key not found: {self.path(kpath, child)}', state='w')
|
||||
result = False
|
||||
|
||||
for elem in ['snapshot']:
|
||||
# First read all the subelements of the key class (pool layer)
|
||||
for pchild in zkhandler.children(self.path(f'base.{elem}')):
|
||||
# Next read all the subelements of the key class (volume layer)
|
||||
for vchild in zkhandler.children(self.path(f'base.{elem}') + f'/{pchild}'):
|
||||
# Finally read all the subelements of the key class (volume layer)
|
||||
for schild in zkhandler.children(self.path(f'base.{elem}') + f'/{pchild}/{vchild}'):
|
||||
child = f'{pchild}/{vchild}/{schild}'
|
||||
# For each key in the schema for that particular elem
|
||||
for ikey in self.keys(elem):
|
||||
kpath = f'{elem}.{ikey}'
|
||||
# Validate that the key exists for that child
|
||||
if not zkhandler.exists(self.path(kpath, child)):
|
||||
if logger is not None:
|
||||
logger.out(f'Key not found: {self.path(kpath, child)}', state='w')
|
||||
result = False
|
||||
|
||||
return result
|
||||
|
||||
# Apply the current schema to the cluster
|
||||
def apply(self, zkhandler, path='base'):
|
||||
for key in self.keys(path):
|
||||
if not zkhandler.exists(self.path(path + '.' + key)):
|
||||
zkhandler.write([
|
||||
(self.path(path + '.' + key), '')
|
||||
])
|
||||
def apply(self, zkhandler):
|
||||
# Walk the entire tree checking our schema
|
||||
for elem in ['base']:
|
||||
for key in self.keys(elem):
|
||||
kpath = f'{elem}.{key}'
|
||||
if not zkhandler.exists(self.path(kpath)):
|
||||
zkhandler.write([
|
||||
(self.path(kpath), '')
|
||||
])
|
||||
|
||||
for elem in ['node', 'domain', 'network', 'osd', 'pool']:
|
||||
# First read all the subelements of the key class
|
||||
for child in zkhandler.children(self.path(f'base.{elem}')):
|
||||
# For each key in the schema for that particular elem
|
||||
for ikey in self.keys(elem):
|
||||
kpath = f'{elem}.{ikey}'
|
||||
# Validate that the key exists for that child
|
||||
if not zkhandler.exists(self.path(kpath, child)):
|
||||
zkhandler.write([
|
||||
(self.path(kpath), '')
|
||||
])
|
||||
|
||||
# These two have several children layers that must be parsed through
|
||||
for elem in ['volume']:
|
||||
# First read all the subelements of the key class (pool layer)
|
||||
for pchild in zkhandler.children(self.path(f'base.{elem}')):
|
||||
# Finally read all the subelements of the key class (volume layer)
|
||||
for vchild in zkhandler.children(self.path(f'base.{elem}') + f'/{pchild}'):
|
||||
child = f'{pchild}/{vchild}'
|
||||
# For each key in the schema for that particular elem
|
||||
for ikey in self.keys(elem):
|
||||
kpath = f'{elem}.{ikey}'
|
||||
# Validate that the key exists for that child
|
||||
if not zkhandler.exists(self.path(kpath, child)):
|
||||
zkhandler.write([
|
||||
(self.path(kpath), '')
|
||||
])
|
||||
|
||||
for elem in ['snapshot']:
|
||||
# First read all the subelements of the key class (pool layer)
|
||||
for pchild in zkhandler.children(self.path(f'base.{elem}')):
|
||||
# Next read all the subelements of the key class (volume layer)
|
||||
for vchild in zkhandler.children(self.path(f'base.{elem}') + f'/{pchild}'):
|
||||
# Finally read all the subelements of the key class (volume layer)
|
||||
for schild in zkhandler.children(self.path(f'base.{elem}') + f'/{pchild}/{vchild}'):
|
||||
child = f'{pchild}/{vchild}/{schild}'
|
||||
# For each key in the schema for that particular elem
|
||||
for ikey in self.keys(elem):
|
||||
kpath = f'{elem}.{ikey}'
|
||||
# Validate that the key exists for that child
|
||||
if not zkhandler.exists(self.path(kpath, child)):
|
||||
zkhandler.write([
|
||||
(self.path(kpath), '')
|
||||
])
|
||||
|
||||
zkhandler.write([
|
||||
(self.path('base.schema.version'), self.version)
|
||||
|
@ -669,14 +765,14 @@ class ZKSchema(object):
|
|||
diff_keys = set_a ^ set_b
|
||||
|
||||
for item in diff_keys:
|
||||
elem_item = elem + '.' + item
|
||||
elem_item = f'{elem}.{item}'
|
||||
if item not in schema_a.keys(elem) and item in schema_b.keys(elem):
|
||||
diff_add[elem_item] = schema_b.path(elem_item)
|
||||
if item in schema_a.keys(elem) and item not in schema_b.keys(elem):
|
||||
diff_remove[elem_item] = schema_a.path(elem_item)
|
||||
|
||||
for item in set_b:
|
||||
elem_item = elem + '.' + item
|
||||
elem_item = f'{elem}.{item}'
|
||||
if schema_a.path(elem_item) is not None and \
|
||||
schema_b.path(elem_item) is not None and \
|
||||
schema_a.path(elem_item) != schema_b.path(elem_item):
|
||||
|
|
Loading…
Reference in New Issue