Merge branch 'provisioner' into 'master'
Implement Provisioner daemon Closes #56 and #22 See merge request parallelvirtualcluster/pvc!1
This commit is contained in:
commit
45dbc0eef8
|
@ -27,5 +27,5 @@ for HOST in ${HOSTS[@]}; do
|
||||||
ssh $HOST $SUDO dpkg -i /tmp/pvc/*.deb
|
ssh $HOST $SUDO dpkg -i /tmp/pvc/*.deb
|
||||||
ssh $HOST $SUDO systemctl restart pvcd
|
ssh $HOST $SUDO systemctl restart pvcd
|
||||||
ssh $HOST rm -rf /tmp/pvc
|
ssh $HOST rm -rf /tmp/pvc
|
||||||
sleep 15
|
sleep 30
|
||||||
done
|
done
|
||||||
|
|
|
@ -516,14 +516,14 @@ def net_list(limit=None):
|
||||||
pvc_common.stopZKConnection(zk_conn)
|
pvc_common.stopZKConnection(zk_conn)
|
||||||
return flask.jsonify(retdata), retcode
|
return flask.jsonify(retdata), retcode
|
||||||
|
|
||||||
def net_add(vni, description, nettype, domain,
|
def net_add(vni, description, nettype, domain, name_servers,
|
||||||
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
||||||
dhcp4_flag, dhcp4_start, dhcp4_end):
|
dhcp4_flag, dhcp4_start, dhcp4_end):
|
||||||
"""
|
"""
|
||||||
Add a virtual client network to the PVC cluster.
|
Add a virtual client network to the PVC cluster.
|
||||||
"""
|
"""
|
||||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||||
retflag, retdata = pvc_network.add_network(zk_conn, vni, description, nettype, domain,
|
retflag, retdata = pvc_network.add_network(zk_conn, vni, description, nettype, domain, name_servers,
|
||||||
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
||||||
dhcp4_flag, dhcp4_start, dhcp4_end)
|
dhcp4_flag, dhcp4_start, dhcp4_end)
|
||||||
if retflag:
|
if retflag:
|
||||||
|
@ -537,7 +537,7 @@ def net_add(vni, description, nettype, domain,
|
||||||
}
|
}
|
||||||
return flask.jsonify(output), retcode
|
return flask.jsonify(output), retcode
|
||||||
|
|
||||||
def net_modify(vni, description, domain,
|
def net_modify(vni, description, domain, name_servers,
|
||||||
ip4_network, ip4_gateway,
|
ip4_network, ip4_gateway,
|
||||||
ip6_network, ip6_gateway,
|
ip6_network, ip6_gateway,
|
||||||
dhcp4_flag, dhcp4_start, dhcp4_end):
|
dhcp4_flag, dhcp4_start, dhcp4_end):
|
||||||
|
@ -545,7 +545,7 @@ def net_modify(vni, description, domain,
|
||||||
Modify a virtual client network in the PVC cluster.
|
Modify a virtual client network in the PVC cluster.
|
||||||
"""
|
"""
|
||||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||||
retflag, retdata = pvc_network.modify_network(zk_conn, vni, description, domain,
|
retflag, retdata = pvc_network.modify_network(zk_conn, vni, description, domain, name_servers,
|
||||||
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
||||||
dhcp4_flag, dhcp4_start, dhcp4_end)
|
dhcp4_flag, dhcp4_start, dhcp4_end)
|
||||||
if retflag:
|
if retflag:
|
||||||
|
|
|
@ -413,6 +413,12 @@ def api_net_root():
|
||||||
else:
|
else:
|
||||||
domain = None
|
domain = None
|
||||||
|
|
||||||
|
# Get network name servers
|
||||||
|
if 'name_server' in flask.request.values:
|
||||||
|
name_servers = flask.request.values.getlist('name_server')
|
||||||
|
else:
|
||||||
|
name_servers = None
|
||||||
|
|
||||||
# Get ipv4 network
|
# Get ipv4 network
|
||||||
if 'ip4_network' in flask.request.values:
|
if 'ip4_network' in flask.request.values:
|
||||||
ip4_network = flask.request.values['ip4_network']
|
ip4_network = flask.request.values['ip4_network']
|
||||||
|
@ -455,7 +461,7 @@ def api_net_root():
|
||||||
else:
|
else:
|
||||||
dhcp4_end = None
|
dhcp4_end = None
|
||||||
|
|
||||||
return pvcapi.net_add(vni, description, nettype, domain,
|
return pvcapi.net_add(vni, description, nettype, domain, name_servers,
|
||||||
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
||||||
dhcp4_flag, dhcp4_start, dhcp4_end)
|
dhcp4_flag, dhcp4_start, dhcp4_end)
|
||||||
|
|
||||||
|
@ -479,6 +485,12 @@ def api_net_element(network):
|
||||||
else:
|
else:
|
||||||
domain = None
|
domain = None
|
||||||
|
|
||||||
|
# Get network name servers
|
||||||
|
if 'name_server' in flask.request.values:
|
||||||
|
name_servers = flask.request.values.getlist('name_server')
|
||||||
|
else:
|
||||||
|
name_servers = None
|
||||||
|
|
||||||
# Get ipv4 network
|
# Get ipv4 network
|
||||||
if 'ip4_network' in flask.request.values:
|
if 'ip4_network' in flask.request.values:
|
||||||
ip4_network = flask.request.values['ip4_network']
|
ip4_network = flask.request.values['ip4_network']
|
||||||
|
@ -521,7 +533,7 @@ def api_net_element(network):
|
||||||
else:
|
else:
|
||||||
dhcp4_end = None
|
dhcp4_end = None
|
||||||
|
|
||||||
return pvcapi.net_modify(network, description, domain,
|
return pvcapi.net_modify(network, description, domain, name_servers,
|
||||||
ip4_network, ip4_gateway,
|
ip4_network, ip4_gateway,
|
||||||
ip6_network, ip6_gateway,
|
ip6_network, ip6_gateway,
|
||||||
dhcp4_flag, dhcp4_start, dhcp4_end)
|
dhcp4_flag, dhcp4_start, dhcp4_end)
|
||||||
|
|
|
@ -727,6 +727,11 @@ def cli_network():
|
||||||
default=None,
|
default=None,
|
||||||
help='Domain name of the network.'
|
help='Domain name of the network.'
|
||||||
)
|
)
|
||||||
|
@click.option(
|
||||||
|
'--dns-server', 'name_servers',
|
||||||
|
multiple=True,
|
||||||
|
help='DNS nameserver for network; multiple entries may be specified.'
|
||||||
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
'-i', '--ipnet', 'ip_network',
|
'-i', '--ipnet', 'ip_network',
|
||||||
default=None,
|
default=None,
|
||||||
|
@ -766,7 +771,7 @@ def cli_network():
|
||||||
@click.argument(
|
@click.argument(
|
||||||
'vni'
|
'vni'
|
||||||
)
|
)
|
||||||
def net_add(vni, description, nettype, domain, ip_network, ip_gateway, ip6_network, ip6_gateway, dhcp_flag, dhcp_start, dhcp_end):
|
def net_add(vni, description, nettype, domain, ip_network, ip_gateway, ip6_network, ip6_gateway, dhcp_flag, dhcp_start, dhcp_end, name_servers):
|
||||||
"""
|
"""
|
||||||
Add a new virtual network with VXLAN identifier VNI to the cluster.
|
Add a new virtual network with VXLAN identifier VNI to the cluster.
|
||||||
|
|
||||||
|
@ -788,7 +793,7 @@ def net_add(vni, description, nettype, domain, ip_network, ip_gateway, ip6_netwo
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
zk_conn = pvc_common.startZKConnection(zk_host)
|
zk_conn = pvc_common.startZKConnection(zk_host)
|
||||||
retcode, retmsg = pvc_network.add_network(zk_conn, vni, description, nettype, domain, ip_network, ip_gateway, ip6_network, ip6_gateway, dhcp_flag, dhcp_start, dhcp_end)
|
retcode, retmsg = pvc_network.add_network(zk_conn, vni, description, nettype, domain, name_servers, ip_network, ip_gateway, ip6_network, ip6_gateway, dhcp_flag, dhcp_start, dhcp_end)
|
||||||
cleanup(retcode, retmsg, zk_conn)
|
cleanup(retcode, retmsg, zk_conn)
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
@ -805,6 +810,11 @@ def net_add(vni, description, nettype, domain, ip_network, ip_gateway, ip6_netwo
|
||||||
default=None,
|
default=None,
|
||||||
help='Domain name of the network.'
|
help='Domain name of the network.'
|
||||||
)
|
)
|
||||||
|
@click.option(
|
||||||
|
'--dns-server', 'name_servers',
|
||||||
|
multiple=True,
|
||||||
|
help='DNS nameserver for network; multiple entries may be specified (will overwrite all previous entries).'
|
||||||
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
'-i', '--ipnet', 'ip4_network',
|
'-i', '--ipnet', 'ip4_network',
|
||||||
default=None,
|
default=None,
|
||||||
|
@ -844,7 +854,7 @@ def net_add(vni, description, nettype, domain, ip_network, ip_gateway, ip6_netwo
|
||||||
@click.argument(
|
@click.argument(
|
||||||
'vni'
|
'vni'
|
||||||
)
|
)
|
||||||
def net_modify(vni, description, domain, ip6_network, ip6_gateway, ip4_network, ip4_gateway, dhcp_flag, dhcp_start, dhcp_end):
|
def net_modify(vni, description, domain, name_servers, ip6_network, ip6_gateway, ip4_network, ip4_gateway, dhcp_flag, dhcp_start, dhcp_end):
|
||||||
"""
|
"""
|
||||||
Modify details of virtual network VNI. All fields optional; only specified fields will be updated.
|
Modify details of virtual network VNI. All fields optional; only specified fields will be updated.
|
||||||
|
|
||||||
|
@ -853,7 +863,7 @@ def net_modify(vni, description, domain, ip6_network, ip6_gateway, ip4_network,
|
||||||
"""
|
"""
|
||||||
|
|
||||||
zk_conn = pvc_common.startZKConnection(zk_host)
|
zk_conn = pvc_common.startZKConnection(zk_host)
|
||||||
retcode, retmsg = pvc_network.modify_network(zk_conn, vni, description=description, domain=domain, ip6_network=ip6_network, ip6_gateway=ip6_gateway, ip4_network=ip4_network, ip4_gateway=ip4_gateway, dhcp_flag=dhcp_flag, dhcp_start=dhcp_start, dhcp_end=dhcp_end)
|
retcode, retmsg = pvc_network.modify_network(zk_conn, vni, description=description, domain=domain, name_servers=name_servers, ip6_network=ip6_network, ip6_gateway=ip6_gateway, ip4_network=ip4_network, ip4_gateway=ip4_gateway, dhcp_flag=dhcp_flag, dhcp_start=dhcp_start, dhcp_end=dhcp_end)
|
||||||
cleanup(retcode, retmsg, zk_conn)
|
cleanup(retcode, retmsg, zk_conn)
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
|
@ -182,6 +182,11 @@ def getInformationFromXML(zk_conn, uuid):
|
||||||
if not domain_node_autostart:
|
if not domain_node_autostart:
|
||||||
domain_node_autostart = 'False'
|
domain_node_autostart = 'False'
|
||||||
|
|
||||||
|
try:
|
||||||
|
domain_profile = zkhandler.readdata(zk_conn, '/domains/{}/profile'.format(uuid))
|
||||||
|
except:
|
||||||
|
domain_profile = None
|
||||||
|
|
||||||
parsed_xml = getDomainXML(zk_conn, uuid)
|
parsed_xml = getDomainXML(zk_conn, uuid)
|
||||||
|
|
||||||
domain_uuid, domain_name, domain_description, domain_memory, domain_vcpu, domain_vcputopo = getDomainMainDetails(parsed_xml)
|
domain_uuid, domain_name, domain_description, domain_memory, domain_vcpu, domain_vcputopo = getDomainMainDetails(parsed_xml)
|
||||||
|
@ -210,6 +215,7 @@ def getInformationFromXML(zk_conn, uuid):
|
||||||
'node_selector': domain_node_selector,
|
'node_selector': domain_node_selector,
|
||||||
'node_autostart': domain_node_autostart,
|
'node_autostart': domain_node_autostart,
|
||||||
'description': domain_description,
|
'description': domain_description,
|
||||||
|
'profile': domain_profile,
|
||||||
'memory': domain_memory,
|
'memory': domain_memory,
|
||||||
'vcpu': domain_vcpu,
|
'vcpu': domain_vcpu,
|
||||||
'vcpu_topology': domain_vcputopo,
|
'vcpu_topology': domain_vcputopo,
|
||||||
|
|
|
@ -134,6 +134,7 @@ def getNetworkInformation(zk_conn, vni):
|
||||||
description = zkhandler.readdata(zk_conn, '/networks/{}'.format(vni))
|
description = zkhandler.readdata(zk_conn, '/networks/{}'.format(vni))
|
||||||
nettype = zkhandler.readdata(zk_conn, '/networks/{}/nettype'.format(vni))
|
nettype = zkhandler.readdata(zk_conn, '/networks/{}/nettype'.format(vni))
|
||||||
domain = zkhandler.readdata(zk_conn, '/networks/{}/domain'.format(vni))
|
domain = zkhandler.readdata(zk_conn, '/networks/{}/domain'.format(vni))
|
||||||
|
name_servers = zkhandler.readdata(zk_conn, '/networks/{}/name_servers'.format(vni))
|
||||||
ip6_network = zkhandler.readdata(zk_conn, '/networks/{}/ip6_network'.format(vni))
|
ip6_network = zkhandler.readdata(zk_conn, '/networks/{}/ip6_network'.format(vni))
|
||||||
ip6_gateway = zkhandler.readdata(zk_conn, '/networks/{}/ip6_gateway'.format(vni))
|
ip6_gateway = zkhandler.readdata(zk_conn, '/networks/{}/ip6_gateway'.format(vni))
|
||||||
dhcp6_flag = zkhandler.readdata(zk_conn, '/networks/{}/dhcp6_flag'.format(vni))
|
dhcp6_flag = zkhandler.readdata(zk_conn, '/networks/{}/dhcp6_flag'.format(vni))
|
||||||
|
@ -149,6 +150,7 @@ def getNetworkInformation(zk_conn, vni):
|
||||||
'description': description,
|
'description': description,
|
||||||
'type': nettype,
|
'type': nettype,
|
||||||
'domain': domain,
|
'domain': domain,
|
||||||
|
'name_servers': name_servers.split(','),
|
||||||
'ip6': {
|
'ip6': {
|
||||||
'network': ip6_network,
|
'network': ip6_network,
|
||||||
'gateway': ip6_gateway,
|
'gateway': ip6_gateway,
|
||||||
|
@ -223,7 +225,7 @@ def isValidIP(ipaddr):
|
||||||
# Direct functions
|
# Direct functions
|
||||||
#
|
#
|
||||||
def add_network(zk_conn, vni, description, nettype,
|
def add_network(zk_conn, vni, description, nettype,
|
||||||
domain, ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
||||||
dhcp4_flag, dhcp4_start, dhcp4_end):
|
dhcp4_flag, dhcp4_start, dhcp4_end):
|
||||||
# Ensure start and end DHCP ranges are set if the flag is set
|
# Ensure start and end DHCP ranges are set if the flag is set
|
||||||
if dhcp4_flag and ( not dhcp4_start or not dhcp4_end ):
|
if dhcp4_flag and ( not dhcp4_start or not dhcp4_end ):
|
||||||
|
@ -254,6 +256,7 @@ def add_network(zk_conn, vni, description, nettype,
|
||||||
'/networks/{}'.format(vni): description,
|
'/networks/{}'.format(vni): description,
|
||||||
'/networks/{}/nettype'.format(vni): nettype,
|
'/networks/{}/nettype'.format(vni): nettype,
|
||||||
'/networks/{}/domain'.format(vni): domain,
|
'/networks/{}/domain'.format(vni): domain,
|
||||||
|
'/networks/{}/name_servers'.format(vni): ','.join(name_servers),
|
||||||
'/networks/{}/ip6_network'.format(vni): ip6_network,
|
'/networks/{}/ip6_network'.format(vni): ip6_network,
|
||||||
'/networks/{}/ip6_gateway'.format(vni): ip6_gateway,
|
'/networks/{}/ip6_gateway'.format(vni): ip6_gateway,
|
||||||
'/networks/{}/dhcp6_flag'.format(vni): dhcp6_flag,
|
'/networks/{}/dhcp6_flag'.format(vni): dhcp6_flag,
|
||||||
|
@ -278,6 +281,8 @@ def modify_network(zk_conn, vni, **parameters):
|
||||||
zk_data.update({'/networks/{}'.format(vni): parameters['description']})
|
zk_data.update({'/networks/{}'.format(vni): parameters['description']})
|
||||||
if parameters['domain']:
|
if parameters['domain']:
|
||||||
zk_data.update({'/networks/{}/domain'.format(vni): parameters['domain']})
|
zk_data.update({'/networks/{}/domain'.format(vni): parameters['domain']})
|
||||||
|
if parameters['name_servers']:
|
||||||
|
zk_data.update({'/networks/{}/name_servers'.format(vni): ','.join(parameters['name_servers'])})
|
||||||
if parameters['ip4_network']:
|
if parameters['ip4_network']:
|
||||||
zk_data.update({'/networks/{}/ip4_network'.format(vni): parameters['ip4_network']})
|
zk_data.update({'/networks/{}/ip4_network'.format(vni): parameters['ip4_network']})
|
||||||
if parameters['ip4_gateway']:
|
if parameters['ip4_gateway']:
|
||||||
|
@ -644,6 +649,7 @@ def format_info(network_information, long_output):
|
||||||
ainformation.append('{}Description:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['description']))
|
ainformation.append('{}Description:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['description']))
|
||||||
if network_information['type'] == 'managed':
|
if network_information['type'] == 'managed':
|
||||||
ainformation.append('{}Domain:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['domain']))
|
ainformation.append('{}Domain:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['domain']))
|
||||||
|
ainformation.append('{}DNS Servers:{} {}'.format(ansiprint.purple(), ansiprint.end(), ', '.join(network_information['name_servers'])))
|
||||||
if network_information['ip6']['network'] != "None":
|
if network_information['ip6']['network'] != "None":
|
||||||
ainformation.append('')
|
ainformation.append('')
|
||||||
ainformation.append('{}IPv6 network:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['ip6']['network']))
|
ainformation.append('{}IPv6 network:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['ip6']['network']))
|
||||||
|
|
|
@ -157,7 +157,7 @@ def flush_locks(zk_conn, domain):
|
||||||
|
|
||||||
return success, message
|
return success, message
|
||||||
|
|
||||||
def define_vm(zk_conn, config_data, target_node, node_limit, node_selector, node_autostart):
|
def define_vm(zk_conn, config_data, target_node, node_limit, node_selector, node_autostart, profile=None):
|
||||||
# Parse the XML data
|
# Parse the XML data
|
||||||
try:
|
try:
|
||||||
parsed_xml = lxml.objectify.fromstring(config_data)
|
parsed_xml = lxml.objectify.fromstring(config_data)
|
||||||
|
@ -166,6 +166,10 @@ def define_vm(zk_conn, config_data, target_node, node_limit, node_selector, node
|
||||||
dom_uuid = parsed_xml.uuid.text
|
dom_uuid = parsed_xml.uuid.text
|
||||||
dom_name = parsed_xml.name.text
|
dom_name = parsed_xml.name.text
|
||||||
|
|
||||||
|
# Ensure that the UUID and name are unique
|
||||||
|
if searchClusterByUUID(zk_conn, dom_uuid) or searchClusterByName(zk_conn, dom_name):
|
||||||
|
return False, 'ERROR: Specified VM "{}" or UUID "{}" matches an existing VM on the cluster'.format(dom_name, dom_uuid)
|
||||||
|
|
||||||
if not target_node:
|
if not target_node:
|
||||||
target_node = common.findTargetNode(zk_conn, dom_uuid)
|
target_node = common.findTargetNode(zk_conn, dom_uuid)
|
||||||
else:
|
else:
|
||||||
|
@ -187,12 +191,13 @@ def define_vm(zk_conn, config_data, target_node, node_limit, node_selector, node
|
||||||
'/domains/{}/state'.format(dom_uuid): 'stop',
|
'/domains/{}/state'.format(dom_uuid): 'stop',
|
||||||
'/domains/{}/node'.format(dom_uuid): target_node,
|
'/domains/{}/node'.format(dom_uuid): target_node,
|
||||||
'/domains/{}/lastnode'.format(dom_uuid): '',
|
'/domains/{}/lastnode'.format(dom_uuid): '',
|
||||||
'/domains/{}/node_limit'.format(dom_uuid): node_limit,
|
'/domains/{}/node_limit'.format(dom_uuid): ','.join(node_limit),
|
||||||
'/domains/{}/node_selector'.format(dom_uuid): node_selector,
|
'/domains/{}/node_selector'.format(dom_uuid): node_selector,
|
||||||
'/domains/{}/node_autostart'.format(dom_uuid): node_autostart,
|
'/domains/{}/node_autostart'.format(dom_uuid): node_autostart,
|
||||||
'/domains/{}/failedreason'.format(dom_uuid): '',
|
'/domains/{}/failedreason'.format(dom_uuid): '',
|
||||||
'/domains/{}/consolelog'.format(dom_uuid): '',
|
'/domains/{}/consolelog'.format(dom_uuid): '',
|
||||||
'/domains/{}/rbdlist'.format(dom_uuid): ','.join(rbd_list),
|
'/domains/{}/rbdlist'.format(dom_uuid): ','.join(rbd_list),
|
||||||
|
'/domains/{}/profile'.format(dom_uuid): profile,
|
||||||
'/domains/{}/xml'.format(dom_uuid): config_data
|
'/domains/{}/xml'.format(dom_uuid): config_data
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -205,7 +210,7 @@ def modify_vm_metadata(zk_conn, domain, node_limit, node_selector, node_autostar
|
||||||
|
|
||||||
if node_limit is not None:
|
if node_limit is not None:
|
||||||
zkhandler.writedata(zk_conn, {
|
zkhandler.writedata(zk_conn, {
|
||||||
'/domains/{}/node_limit'.format(dom_uuid): node_limit
|
'/domains/{}/node_limit'.format(dom_uuid): ','.join(node_limit)
|
||||||
})
|
})
|
||||||
|
|
||||||
if node_selector is not None:
|
if node_selector is not None:
|
||||||
|
@ -688,6 +693,7 @@ def format_info(zk_conn, domain_information, long_output):
|
||||||
ainformation.append('{}UUID:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['uuid']))
|
ainformation.append('{}UUID:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['uuid']))
|
||||||
ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['name']))
|
ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['name']))
|
||||||
ainformation.append('{}Description:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['description']))
|
ainformation.append('{}Description:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['description']))
|
||||||
|
ainformation.append('{}Profile:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['profile']))
|
||||||
ainformation.append('{}Memory (M):{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['memory']))
|
ainformation.append('{}Memory (M):{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['memory']))
|
||||||
ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['vcpu']))
|
ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['vcpu']))
|
||||||
ainformation.append('{}Topology (S/C/T):{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['vcpu_topology']))
|
ainformation.append('{}Topology (S/C/T):{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['vcpu_topology']))
|
||||||
|
@ -715,6 +721,8 @@ def format_info(zk_conn, domain_information, long_output):
|
||||||
}
|
}
|
||||||
ainformation.append('{}State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), dstate_colour[domain_information['state']], domain_information['state'], ansiprint.end()))
|
ainformation.append('{}State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), dstate_colour[domain_information['state']], domain_information['state'], ansiprint.end()))
|
||||||
ainformation.append('{}Current Node:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['node']))
|
ainformation.append('{}Current Node:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['node']))
|
||||||
|
if not domain_information['last_node']:
|
||||||
|
domain_information['last_node'] = "N/A"
|
||||||
ainformation.append('{}Previous Node:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['last_node']))
|
ainformation.append('{}Previous Node:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['last_node']))
|
||||||
|
|
||||||
# Get a failure reason if applicable
|
# Get a failure reason if applicable
|
||||||
|
@ -722,9 +730,8 @@ def format_info(zk_conn, domain_information, long_output):
|
||||||
ainformation.append('')
|
ainformation.append('')
|
||||||
ainformation.append('{}Failure reason:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['failed_reason']))
|
ainformation.append('{}Failure reason:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['failed_reason']))
|
||||||
|
|
||||||
ainformation.append('')
|
|
||||||
ainformation.append('{}Migration selector:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['node_selector']))
|
ainformation.append('{}Migration selector:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['node_selector']))
|
||||||
ainformation.append('{}Node limit:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['node_limit']))
|
ainformation.append('{}Node limit:{} {}'.format(ansiprint.purple(), ansiprint.end(), ', '.join(domain_information['node_limit'])))
|
||||||
ainformation.append('{}Autostart:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['node_autostart']))
|
ainformation.append('{}Autostart:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['node_autostart']))
|
||||||
|
|
||||||
# Network list
|
# Network list
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
../client-common
|
|
@ -0,0 +1,232 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# debootstrap_script.py - PVC Provisioner example script for Debootstrap
|
||||||
|
# Part of the Parallel Virtual Cluster (PVC) system
|
||||||
|
#
|
||||||
|
# Copyright (C) 2018-2019 Joshua M. Boniface <joshua@boniface.me>
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# This script provides an example of a PVC provisioner script. It will install
|
||||||
|
# a Debian system, of the release specified in the keyword argument `deb_release`
|
||||||
|
# and from the mirror specified in the keyword argument `deb_mirror`, and
|
||||||
|
# including the packages specified in the keyword argument `deb_packages` (a list
|
||||||
|
# of strings, which is then joined together as a CSV and passed to debootstrap),
|
||||||
|
# to the configured disks, configure fstab, and install GRUB. Any later config
|
||||||
|
# should be done within the VM, for instance via cloud-init.
|
||||||
|
|
||||||
|
# This script can thus be used as an example or reference implementation of a
|
||||||
|
# PVC provisioner script and expanded upon as required.
|
||||||
|
|
||||||
|
# This script will run under root privileges as the provisioner does. Be careful
|
||||||
|
# with that.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Installation function - performs a debootstrap install of a Debian system
|
||||||
|
# Note that the only arguments are keyword arguments.
|
||||||
|
def install(**kwargs):
|
||||||
|
# The provisioner has already mounted the disks on kwargs['temporary_directory'].
|
||||||
|
# by this point, so we can get right to running the debootstrap after setting
|
||||||
|
# some nicer variable names; you don't necessarily have to do this.
|
||||||
|
vm_name = kwargs['vm_name']
|
||||||
|
temporary_directory = kwargs['temporary_directory']
|
||||||
|
disks = kwargs['disks']
|
||||||
|
networks = kwargs['networks']
|
||||||
|
# Our own required arguments. We should, though are not required to, handle
|
||||||
|
# failures of these gracefully, should administrators forget to specify them.
|
||||||
|
try:
|
||||||
|
deb_release = kwargs['deb_release']
|
||||||
|
except:
|
||||||
|
deb_release = "stable"
|
||||||
|
try:
|
||||||
|
deb_mirror = kwargs['deb_mirror']
|
||||||
|
except:
|
||||||
|
deb_mirror = "http://ftp.debian.org/debian"
|
||||||
|
try:
|
||||||
|
deb_packages = kwargs['deb_packages'].split(',')
|
||||||
|
except:
|
||||||
|
deb_packages = ["linux-image-amd64", "grub-pc", "cloud-init", "python3-cffi-backend"]
|
||||||
|
|
||||||
|
# We need to know our root disk
|
||||||
|
root_disk = None
|
||||||
|
for disk in disks:
|
||||||
|
if disk['mountpoint'] == '/':
|
||||||
|
root_disk = disk
|
||||||
|
if not root_disk:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Ensure we have debootstrap intalled on the provisioner system; this is a
|
||||||
|
# good idea to include if you plan to use anything that is not part of the
|
||||||
|
# base Debian host system, just in case the provisioner host is not properly
|
||||||
|
# configured already.
|
||||||
|
os.system(
|
||||||
|
"apt-get install -y debootstrap"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform a deboostrap installation
|
||||||
|
os.system(
|
||||||
|
"debootstrap --include={pkgs} {suite} {target} {mirror}".format(
|
||||||
|
suite=deb_release,
|
||||||
|
target=temporary_directory,
|
||||||
|
mirror=deb_mirror,
|
||||||
|
pkgs=','.join(deb_packages)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Bind mount the devfs
|
||||||
|
os.system(
|
||||||
|
"mount --bind /dev {}/dev".format(
|
||||||
|
temporary_directory
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create an fstab entry for each disk
|
||||||
|
fstab_file = "{}/etc/fstab".format(temporary_directory)
|
||||||
|
for disk in disks:
|
||||||
|
# We assume SSD-based/-like storage, and dislike atimes
|
||||||
|
options = "defaults,discard,noatime,nodiratime"
|
||||||
|
|
||||||
|
# The root and var volumes have specific values
|
||||||
|
if disk['mountpoint'] == "/":
|
||||||
|
dump = 0
|
||||||
|
cpass = 1
|
||||||
|
elif disk['mountpoint'] == '/var':
|
||||||
|
dump = 0
|
||||||
|
cpass = 2
|
||||||
|
else:
|
||||||
|
dump = 0
|
||||||
|
cpass = 0
|
||||||
|
|
||||||
|
# Append the fstab line
|
||||||
|
with open(fstab_file, 'a') as fh:
|
||||||
|
data = "/dev/{disk} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
|
||||||
|
disk=disk['disk_id'],
|
||||||
|
mountpoint=disk['mountpoint'],
|
||||||
|
filesystem=disk['filesystem'],
|
||||||
|
options=options,
|
||||||
|
dump=dump,
|
||||||
|
cpass=cpass
|
||||||
|
)
|
||||||
|
fh.write(data)
|
||||||
|
|
||||||
|
# Write the hostname
|
||||||
|
hostname_file = "{}/etc/hostname".format(temporary_directory)
|
||||||
|
with open(hostname_file, 'w') as fh:
|
||||||
|
fh.write("{}".format(vm_name))
|
||||||
|
|
||||||
|
# Fix the cloud-init.target since it's broken
|
||||||
|
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(temporary_directory)
|
||||||
|
with open(cloudinit_target_file, 'w') as fh:
|
||||||
|
data = """[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
[Unit]
|
||||||
|
Description=Cloud-init target
|
||||||
|
After=multi-user.target
|
||||||
|
"""
|
||||||
|
fh.write(data)
|
||||||
|
|
||||||
|
# NOTE: Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
|
||||||
|
# will always be on PCI bus ID 2, hence the name "ens2".
|
||||||
|
# Write a DHCP stanza for ens2
|
||||||
|
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(temporary_directory)
|
||||||
|
with open(ens2_network_file, 'w') as fh:
|
||||||
|
data = """auto ens2
|
||||||
|
iface ens2 inet dhcp
|
||||||
|
"""
|
||||||
|
fh.write(data)
|
||||||
|
|
||||||
|
# Write the DHCP config for ens2
|
||||||
|
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
|
||||||
|
with open(dhclient_file, 'w') as fh:
|
||||||
|
data = """# DHCP client configuration
|
||||||
|
# Created by vminstall for host web1.i.bonilan.net
|
||||||
|
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
|
||||||
|
interface "ens2" {
|
||||||
|
send host-name = "web1";
|
||||||
|
send fqdn.fqdn = "web1";
|
||||||
|
request subnet-mask, broadcast-address, time-offset, routers,
|
||||||
|
domain-name, domain-name-servers, domain-search, host-name,
|
||||||
|
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
|
||||||
|
netbios-name-servers, netbios-scope, interface-mtu,
|
||||||
|
rfc3442-classless-static-routes, ntp-servers;
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
fh.write(data)
|
||||||
|
|
||||||
|
# Write the GRUB configuration
|
||||||
|
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
|
||||||
|
with open(grubcfg_file, 'w') as fh:
|
||||||
|
data = """# Written by the PVC provisioner
|
||||||
|
GRUB_DEFAULT=0
|
||||||
|
GRUB_TIMEOUT=1
|
||||||
|
GRUB_DISTRIBUTOR="PVC Virtual Machine"
|
||||||
|
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/{root_disk} console=tty0 console=ttyS0,115200n8"
|
||||||
|
GRUB_CMDLINE_LINUX=""
|
||||||
|
GRUB_TERMINAL=console
|
||||||
|
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
|
||||||
|
GRUB_DISABLE_LINUX_UUID=false
|
||||||
|
""".format(root_disk=root_disk['disk_id'])
|
||||||
|
fh.write(data)
|
||||||
|
|
||||||
|
# Chroot, do some in-root tasks, then exit the chroot
|
||||||
|
# EXITING THE CHROOT IS VERY IMPORTANT OR THE FOLLOWING STAGES OF THE PROVISIONER
|
||||||
|
# WILL FAIL IN UNEXPECTED WAYS! Keep this in mind when using chroot in your scripts.
|
||||||
|
real_root = os.open("/", os.O_RDONLY)
|
||||||
|
os.chroot(temporary_directory)
|
||||||
|
fake_root = os.open("/", os.O_RDONLY)
|
||||||
|
os.fchdir(fake_root)
|
||||||
|
|
||||||
|
# Install and update GRUB
|
||||||
|
os.system(
|
||||||
|
"grub-install --force /dev/rbd/{}/{}_{}".format(root_disk['pool'], vm_name, root_disk['disk_id'])
|
||||||
|
)
|
||||||
|
os.system(
|
||||||
|
"update-grub"
|
||||||
|
)
|
||||||
|
# Set a really dumb root password [TEMPORARY]
|
||||||
|
os.system(
|
||||||
|
"echo root:test123 | chpasswd"
|
||||||
|
)
|
||||||
|
# Enable cloud-init target on (first) boot
|
||||||
|
# NOTE: Your user-data should handle this and disable it once done, or things get messy.
|
||||||
|
# That cloud-init won't run without this hack seems like a bug... but even the official
|
||||||
|
# Debian cloud images are affected, so who knows.
|
||||||
|
os.system(
|
||||||
|
"systemctl enable cloud-init.target"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Restore our original root/exit the chroot
|
||||||
|
# EXITING THE CHROOT IS VERY IMPORTANT OR THE FOLLOWING STAGES OF THE PROVISIONER
|
||||||
|
# WILL FAIL IN UNEXPECTED WAYS! Keep this in mind when using chroot in your scripts.
|
||||||
|
os.fchdir(real_root)
|
||||||
|
os.chroot(".")
|
||||||
|
os.fchdir(real_root)
|
||||||
|
os.close(fake_root)
|
||||||
|
os.close(real_root)
|
||||||
|
|
||||||
|
# Unmount the bound devfs
|
||||||
|
os.system(
|
||||||
|
"umount {}/dev".format(
|
||||||
|
temporary_directory
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Clean up file handles so paths can be unmounted
|
||||||
|
del fake_root
|
||||||
|
del real_root
|
||||||
|
|
||||||
|
# Everything else is done via cloud-init user-data
|
|
@ -0,0 +1,46 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# dummy_script.py - PVC Provisioner example script for noop
|
||||||
|
# Part of the Parallel Virtual Cluster (PVC) system
|
||||||
|
#
|
||||||
|
# Copyright (C) 2018-2019 Joshua M. Boniface <joshua@boniface.me>
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# This script provides an example of a PVC provisioner script. It will do
|
||||||
|
# nothing and return back to the provisioner without taking any action, and
|
||||||
|
# expecting no special arguments.
|
||||||
|
|
||||||
|
# This script can thus be used as an example or reference implementation of a
|
||||||
|
# PVC provisioner script and expanded upon as required.
|
||||||
|
|
||||||
|
# This script will run under root privileges as the provisioner does. Be careful
|
||||||
|
# with that.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Installation function - performs a debootstrap install of a Debian system
|
||||||
|
# Note that the only arguments are keyword arguments.
|
||||||
|
def install(**kwargs):
|
||||||
|
# The provisioner has already mounted the disks on kwargs['temporary_directory'].
|
||||||
|
# by this point, so we can get right to running the debootstrap after setting
|
||||||
|
# some nicer variable names; you don't necessarily have to do this.
|
||||||
|
vm_name = kwargs['vm_name']
|
||||||
|
temporary_directory = kwargs['temporary_directory']
|
||||||
|
disks = kwargs['disks']
|
||||||
|
networks = kwargs['networks']
|
||||||
|
# No operation - this script just returns
|
||||||
|
pass
|
|
@ -0,0 +1,16 @@
|
||||||
|
Content-Type: multipart/mixed; boundary="==BOUNDARY=="
|
||||||
|
MIME-Version: 1.0
|
||||||
|
|
||||||
|
--==BOUNDARY==
|
||||||
|
Content-Type: text/cloud-config; charset="us-ascii"
|
||||||
|
|
||||||
|
users:
|
||||||
|
- blah
|
||||||
|
|
||||||
|
--==BOUNDARY==
|
||||||
|
Content-Type: text/x-shellscript; charset="us-ascii"
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
echo "koz is koz" >> /etc/motd
|
||||||
|
|
||||||
|
--==BOUNDARY==--
|
|
@ -0,0 +1,27 @@
|
||||||
|
Content-Type: text/cloud-config; charset="us-ascii"
|
||||||
|
MIME-Version: 1.0
|
||||||
|
|
||||||
|
#cloud-config
|
||||||
|
# Example user-data file to set up an alternate /var/home, a first user and some SSH keys, and some packages
|
||||||
|
bootcmd:
|
||||||
|
- "mv /home /var/"
|
||||||
|
- "locale-gen"
|
||||||
|
package_update: true
|
||||||
|
packages:
|
||||||
|
- openssh-server
|
||||||
|
- sudo
|
||||||
|
users:
|
||||||
|
- name: deploy
|
||||||
|
gecos: Deploy User
|
||||||
|
homedir: /var/home/deploy
|
||||||
|
sudo: "ALL=(ALL) NOPASSWD: ALL"
|
||||||
|
groups: adm, sudo
|
||||||
|
lock_passwd: true
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBRBGPzlbh5xYD6k8DMZdPNEwemZzKSSpWGOuU72ehfN joshua@bonifacelabs.net 2017-04
|
||||||
|
runcmd:
|
||||||
|
- "userdel debian"
|
||||||
|
- "groupmod -g 200 deploy"
|
||||||
|
- "usermod -u 200 deploy"
|
||||||
|
- "systemctl disable cloud-init.target"
|
||||||
|
- "reboot"
|
|
@ -0,0 +1,138 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# libvirt_schema.py - Libvirt schema elements
|
||||||
|
# Part of the Parallel Virtual Cluster (PVC) system
|
||||||
|
#
|
||||||
|
# Copyright (C) 2018-2019 Joshua M. Boniface <joshua@boniface.me>
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# File header, containing default values for various non-device components
|
||||||
|
# Variables:
|
||||||
|
# * vm_name
|
||||||
|
# * vm_uuid
|
||||||
|
# * vm_description
|
||||||
|
# * vm_memory
|
||||||
|
# * vm_vcpus
|
||||||
|
# * vm_architecture
|
||||||
|
libvirt_header = """<domain type='kvm'>
|
||||||
|
<name>{vm_name}</name>
|
||||||
|
<uuid>{vm_uuid}</uuid>
|
||||||
|
<description>{vm_description}</description>
|
||||||
|
<memory unit='MiB'>{vm_memory}</memory>
|
||||||
|
<vcpu>{vm_vcpus}</vcpu>
|
||||||
|
<cpu>
|
||||||
|
<topology sockets='1' cores='{vm_vcpus}' threads='1'/>
|
||||||
|
</cpu>
|
||||||
|
<os>
|
||||||
|
<type arch='{vm_architecture}' machine='pc-i440fx-2.7'>hvm</type>
|
||||||
|
<bootmenu enable='yes'/>
|
||||||
|
<boot dev='cdrom'/>
|
||||||
|
<boot dev='hd'/>
|
||||||
|
</os>
|
||||||
|
<features>
|
||||||
|
<acpi/>
|
||||||
|
<apic/>
|
||||||
|
<pae/>
|
||||||
|
</features>
|
||||||
|
<clock offset='utc'/>
|
||||||
|
<on_poweroff>destroy</on_poweroff>
|
||||||
|
<on_reboot>restart</on_reboot>
|
||||||
|
<on_crash>restart</on_crash>
|
||||||
|
<devices>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# File footer, closing devices and domain elements
|
||||||
|
libvirt_footer = """ </devices>
|
||||||
|
</domain>"""
|
||||||
|
|
||||||
|
# Default devices for all VMs
|
||||||
|
devices_default = """ <emulator>/usr/bin/kvm</emulator>
|
||||||
|
<controller type='usb' index='0'/>
|
||||||
|
<controller type='pci' index='0' model='pci-root'/>
|
||||||
|
<rng model='virtio'>
|
||||||
|
<rate period="1000" bytes="2048"/>
|
||||||
|
<backend model='random'>/dev/random</backend>
|
||||||
|
</rng>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Serial device
|
||||||
|
# Variables:
|
||||||
|
# * vm_name
|
||||||
|
devices_serial = """ <serial type='pty'>
|
||||||
|
<log file='/var/log/libvirt/{vm_name}.log' append='on'/>
|
||||||
|
</serial>
|
||||||
|
<console type='pty'/>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# VNC device
|
||||||
|
# Variables:
|
||||||
|
# * vm_vncport
|
||||||
|
# * vm_vnc_autoport
|
||||||
|
# * vm_vnc_bind
|
||||||
|
devices_vnc = """ <graphics type='vnc' port='{vm_vncport}' autoport='{vm_vnc_autoport}' listen='{vm_vnc_bind}'/>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# VirtIO SCSI device
|
||||||
|
devices_scsi_controller = """ <controller type='scsi' index='0' model='virtio-scsi'/>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Disk device header
|
||||||
|
# Variables:
|
||||||
|
# * ceph_storage_secret
|
||||||
|
# * disk_pool
|
||||||
|
# * vm_name
|
||||||
|
# * disk_id
|
||||||
|
devices_disk_header = """ <disk type='network' device='disk'>
|
||||||
|
<driver name='qemu' discard='unmap'/>
|
||||||
|
<target dev='{disk_id}' bus='scsi'/>
|
||||||
|
<auth username='libvirt'>
|
||||||
|
<secret type='ceph' uuid='{ceph_storage_secret}'/>
|
||||||
|
</auth>
|
||||||
|
<source protocol='rbd' name='{disk_pool}/{vm_name}_{disk_id}'>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Disk device coordinator element
|
||||||
|
# Variables:
|
||||||
|
# * coordinator_name
|
||||||
|
# * coordinator_ceph_mon_port
|
||||||
|
devices_disk_coordinator = """ <host name='{coordinator_name}' port='{coordinator_ceph_mon_port}'/>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Disk device footer
|
||||||
|
devices_disk_footer = """ </source>
|
||||||
|
</disk>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# vhostmd virtualization passthrough device
|
||||||
|
devices_vhostmd = """ <disk type='file' device='disk'>
|
||||||
|
<drive name='qemu' type='raw'/>
|
||||||
|
<source file='/dev/shm/vhostmd0'/>
|
||||||
|
<target dev='sdz' bus='usb'/>
|
||||||
|
<readonly/>
|
||||||
|
</disk>
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Network interface device
|
||||||
|
# Variables:
|
||||||
|
# * eth_macaddr
|
||||||
|
# * eth_bridge
|
||||||
|
devices_net_interface = """ <interface type='bridge'>
|
||||||
|
<mac address='{eth_macaddr}'/>
|
||||||
|
<source bridge='{eth_bridge}'/>
|
||||||
|
<model type='virtio'/>
|
||||||
|
</interface>
|
||||||
|
"""
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,16 @@
|
||||||
|
# Parallel Virtual Cluster Provisioner client worker unit file
|
||||||
|
|
||||||
|
[Unit]
|
||||||
|
Description = Parallel Virtual Cluster Provisioner worker
|
||||||
|
After = network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type = simple
|
||||||
|
WorkingDirectory = /usr/share/pvc
|
||||||
|
Environment = PYTHONUNBUFFERED=true
|
||||||
|
Environment = PVC_CONFIG_FILE=/etc/pvc/pvc-provisioner.yaml
|
||||||
|
ExecStart = /usr/bin/celery worker -A pvc-provisioner.celery --concurrency 1 --loglevel INFO
|
||||||
|
Restart = on-failure
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy = multi-user.target
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,83 @@
|
||||||
|
---
|
||||||
|
# pvc-provisioner client configuration file example
|
||||||
|
#
|
||||||
|
# This configuration file specifies details for the PVC provisioner client
|
||||||
|
# running on this machine. Default values are not supported; the values in
|
||||||
|
# this sample configuration are considered defaults and can be used as-is.
|
||||||
|
#
|
||||||
|
# Copy this example to /etc/pvc/pvc-provisioner.yaml and edit to your needs.
|
||||||
|
#
|
||||||
|
# Alternatively, you may combine this configuration (anything under the
|
||||||
|
# `provisioner` section) with a PVC API configuration in a single file, and
|
||||||
|
# create links between them. By default, the only difference is the
|
||||||
|
# provisioner header and the listen port specifically.
|
||||||
|
|
||||||
|
pvc:
|
||||||
|
# debug: Enable/disable API debug mode
|
||||||
|
debug: True
|
||||||
|
# coordinators: The list of cluster coordinator hostnames
|
||||||
|
coordinators:
|
||||||
|
- hv1
|
||||||
|
- hv2
|
||||||
|
- hv3
|
||||||
|
# cluster: Information about the cluster
|
||||||
|
cluster:
|
||||||
|
# storage_hosts: The list of hosts that the Ceph monitors are valid on; if empty (the default),
|
||||||
|
# uses the list of coordinators
|
||||||
|
storage_hosts:
|
||||||
|
- ceph1
|
||||||
|
- ceph2
|
||||||
|
- ceph2
|
||||||
|
# storage_domain: The storage domain name, concatenated with the coordinators list names
|
||||||
|
# to form monitor access strings
|
||||||
|
storage_domain: "s.bonilan.net"
|
||||||
|
# ceph_monitor_port: The port that the Ceph monitor on each coordinator listens on
|
||||||
|
ceph_monitor_port: 6789
|
||||||
|
# ceph_storage_secret_uuid: Libvirt secret UUID for Ceph storage access
|
||||||
|
ceph_storage_secret_uuid: "c416032b-2ce9-457f-a5c2-18704a3485f4"
|
||||||
|
# provisioner: Configuration of the Provisioner API listener
|
||||||
|
provisioner:
|
||||||
|
# listen_address: IP address(es) to listen on; use 0.0.0.0 for all interfaces
|
||||||
|
listen_address: "10.100.0.252"
|
||||||
|
# listen_port: TCP port to listen on, usually 7375
|
||||||
|
listen_port: "7375"
|
||||||
|
# authentication: Authentication and security settings
|
||||||
|
authentication:
|
||||||
|
# enabled: Enable or disable authentication (True/False)
|
||||||
|
enabled: False
|
||||||
|
# secret_key: Per-cluster secret key for API cookies; generate with uuidgen or pwgen
|
||||||
|
secret_key: ""
|
||||||
|
# tokens: a list of authentication tokens; leave as an empty list to disable authentication
|
||||||
|
tokens:
|
||||||
|
# description: token description for management
|
||||||
|
- description: "testing"
|
||||||
|
# token: random token for authentication; generate with uuidgen or pwgen
|
||||||
|
token: ""
|
||||||
|
# ssl: SSL configuration
|
||||||
|
ssl:
|
||||||
|
# enabled: Enabled or disable SSL operation (True/False)
|
||||||
|
enabled: False
|
||||||
|
# cert_file: SSL certificate file
|
||||||
|
cert_file: ""
|
||||||
|
# key_file: SSL certificate key file
|
||||||
|
key_file: ""
|
||||||
|
# database: Backend database configuration
|
||||||
|
database:
|
||||||
|
# host: PostgreSQL hostname, usually 'localhost'
|
||||||
|
host: 10.100.0.252
|
||||||
|
# port: PostgreSQL port, invariably '5432'
|
||||||
|
port: 5432
|
||||||
|
# name: PostgreSQL database name, invariably 'pvcprov'
|
||||||
|
name: pvcprov
|
||||||
|
# user: PostgreSQL username, invariable 'pvcprov'
|
||||||
|
user: pvcprov
|
||||||
|
# pass: PostgreSQL user password, randomly generated
|
||||||
|
pass: pvcprov
|
||||||
|
# queue: Celery backend queue using the PVC Zookeeper cluster
|
||||||
|
queue:
|
||||||
|
# host: Redis hostname, usually 'localhost'
|
||||||
|
host: localhost
|
||||||
|
# port: Redis port, invariably '6279'
|
||||||
|
port: 6379
|
||||||
|
# path: Redis queue path, invariably '/0'
|
||||||
|
path: /0
|
|
@ -0,0 +1,16 @@
|
||||||
|
# Parallel Virtual Cluster Provisioner client daemon unit file
|
||||||
|
|
||||||
|
[Unit]
|
||||||
|
Description = Parallel Virtual Cluster Provisioner client daemon
|
||||||
|
After = network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type = simple
|
||||||
|
WorkingDirectory = /usr/share/pvc
|
||||||
|
Environment = PYTHONUNBUFFERED=true
|
||||||
|
Environment = PVC_CONFIG_FILE=/etc/pvc/pvc-provisioner.yaml
|
||||||
|
ExecStart = /usr/share/pvc/pvc-provisioner.py
|
||||||
|
Restart = on-failure
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy = multi-user.target
|
|
@ -0,0 +1,15 @@
|
||||||
|
create database pvcprov with owner = pvcprov connection limit = -1;
|
||||||
|
\c pvcprov
|
||||||
|
create table system_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, vcpu_count INT NOT NULL, vram_mb INT NOT NULL, serial BOOL NOT NULL, vnc BOOL NOT NULL, vnc_bind TEXT, node_limit TEXT, node_selector TEXT, start_with_node BOOL NOT NULL);
|
||||||
|
create table network_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, mac_template TEXT);
|
||||||
|
create table network (id SERIAL PRIMARY KEY, network_template INT REFERENCES network_template(id), vni INT NOT NULL);
|
||||||
|
create table storage_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE);
|
||||||
|
create table storage (id SERIAL PRIMARY KEY, storage_template INT REFERENCES storage_template(id), pool TEXT NOT NULL, disk_id TEXT NOT NULL, disk_size_gb INT NOT NULL, mountpoint TEXT, filesystem TEXT, filesystem_args TEXT);
|
||||||
|
create table userdata_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, userdata TEXT NOT NULL);
|
||||||
|
create table script (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, script TEXT NOT NULL);
|
||||||
|
create table profile (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, system_template INT REFERENCES system_template(id), network_template INT REFERENCES network_template(id), storage_template INT REFERENCES storage_template(id), userdata_template INT REFERENCES userdata_template(id), script INT REFERENCES script(id), arguments text);
|
||||||
|
grant all privileges on database pvcprov to pvcprov;
|
||||||
|
grant all privileges on all tables in schema public to pvcprov;
|
||||||
|
grant all privileges on all sequences in schema public to pvcprov;
|
||||||
|
|
||||||
|
insert into userdata_template(name, userdata) values ('empty', '');
|
|
@ -26,7 +26,7 @@ Description: Parallel Virtual Cluster common client libraries (Python 3)
|
||||||
Package: pvc-client-cli
|
Package: pvc-client-cli
|
||||||
Architecture: all
|
Architecture: all
|
||||||
Depends: pvc-client-common, python3-yaml, python3-netifaces, python3-dialog
|
Depends: pvc-client-common, python3-yaml, python3-netifaces, python3-dialog
|
||||||
Description: Parallel Virtual Cluster client (Python 3)
|
Description: Parallel Virtual Cluster CLI client (Python 3)
|
||||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||||
.
|
.
|
||||||
This package installs the PVC command-line client
|
This package installs the PVC command-line client
|
||||||
|
@ -34,8 +34,16 @@ Description: Parallel Virtual Cluster client (Python 3)
|
||||||
Package: pvc-client-api
|
Package: pvc-client-api
|
||||||
Architecture: all
|
Architecture: all
|
||||||
Depends: pvc-client-common, python3-yaml, python3-flask, python3-gevent
|
Depends: pvc-client-common, python3-yaml, python3-flask, python3-gevent
|
||||||
Description: Parallel Virtual Cluster client (Python 3)
|
Description: Parallel Virtual Cluster API client (Python 3)
|
||||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||||
.
|
.
|
||||||
This package installs the PVC API client daemon
|
This package installs the PVC API client daemon
|
||||||
|
|
||||||
|
Package: pvc-client-provisioner
|
||||||
|
Architecture: all
|
||||||
|
Depends: pvc-client-common, python3-yaml, python3-flask, python3-celery, python-celery-common
|
||||||
|
Description: Parallel Virtual Cluster Provisioner client (Python 3)
|
||||||
|
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||||
|
.
|
||||||
|
This package installs the PVC provisioner daemon
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
client-provisioner/pvc-provisioner.py usr/share/pvc
|
||||||
|
client-provisioner/pvc-provisioner.sample.yaml etc/pvc
|
||||||
|
client-provisioner/provisioner_lib usr/share/pvc
|
||||||
|
client-provisioner/pvc-provisioner.service lib/systemd/system
|
||||||
|
client-provisioner/pvc-provisioner-worker.service lib/systemd/system
|
||||||
|
client-provisioner/examples usr/share/pvc/provisioner
|
|
@ -0,0 +1,15 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# Install client binary to /usr/bin via symlink
|
||||||
|
ln -s /usr/share/pvc/provisioner.py /usr/bin/pvc-provisioner
|
||||||
|
|
||||||
|
# Reload systemd's view of the units
|
||||||
|
systemctl daemon-reload
|
||||||
|
|
||||||
|
# Restart the daemon (or warn on first install)
|
||||||
|
if systemctl is-active --quiet pvc-provisioner.service; then
|
||||||
|
systemctl restart pvc-provisioner.service
|
||||||
|
systemctl restart pvc-provisioner-worker.service
|
||||||
|
else
|
||||||
|
echo "NOTE: The PVC provisioner API daemon (pvc-provisioner.service) has not been started; create a config file at /etc/pvc/pvc-provisioner.yaml then start it."
|
||||||
|
fi
|
|
@ -0,0 +1,4 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# Remove client binary symlink
|
||||||
|
rm -f /usr/bin/pvc-provisioner
|
|
@ -0,0 +1,305 @@
|
||||||
|
# PVC Provisioner API architecture
|
||||||
|
|
||||||
|
The PVC Provisioner API is a standalone client application for PVC. It interfaces directly with the Zookeeper database to manage state, and with the Patroni PostgreSQL database to store configuration details.
|
||||||
|
|
||||||
|
The Provisioner is built using Flask and is packaged in the Debian package `pvc-client-provisioner`. The Provisioner depends on the common client functions of the `pvc-client-common` package as does the CLI client.
|
||||||
|
|
||||||
|
Details of the Provisioner API interface can be found in [the manual](/manuals/provisioner).
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
The purpose of the Provisioner API is to provide a convenient way for administrators to automate the creation of new virtual machines on the PVC cluster.
|
||||||
|
|
||||||
|
The Provisioner allows the administrator to create "templates", a unified set of configurations, which VMs can then use. These templates configure the VM resources (memory, disk, metadata), VM networks, and VM disks separately, allowing the administrator to specify very granular and dynamic configurations for new virtual machines.
|
||||||
|
|
||||||
|
Upon triggering a new VM creation, the provisioner also has facilities to create new virtual machines in three main ways:
|
||||||
|
|
||||||
|
1. Via cloning an existing RBD disk image, then performing optional post-clone actions on the volume(s).
|
||||||
|
2. Via booting an installer ISO image, stored as an RBD disk image.
|
||||||
|
3. Via custom provisioning scripts provided by the administrator.
|
||||||
|
|
||||||
|
The first option allows administrators to quickly create new virtual machines based on an existing image, either uploaded by the administrator or created from an existing virtual machine.
|
||||||
|
|
||||||
|
The second option allows administrators to install arbitrary operating systems via ISO images, which are uploaded by the administrator. Usually, auto-configuring/kickstarted ISOs are ideal for this purpose.
|
||||||
|
|
||||||
|
The third method provides extreme flexibility in setting up Unix-based virtual machines, as standard, arbitrary Python scripts can be provided by the administrator, allowing the system to automatically install and configure the VM exactly to the specifications they want. Furthermore, PVC includes integrated support for `cloud-init` inside VMs, for maximum flexibility in post-install configurations.
|
||||||
|
|
||||||
|
## System Templates
|
||||||
|
|
||||||
|
The PVC Provisioner has three categories of templates to specify the resources allocated to the virtual machine. They are: System Templates, Network Templates, and Disk Templates.
|
||||||
|
|
||||||
|
### System Templates
|
||||||
|
|
||||||
|
System templates specify the basic resources of the virtual machine: vCPUs, memory, and configuration metadata (e.g. serial/VNC/Spice consoles, migration methods, additional devices, etc.). PVC VMs use the Libvirt XML configuration format, so these templates specify the required values in the created VM configuration file. When querying details, the API will return JSON representations of the configuration, which are used here for examples.
|
||||||
|
|
||||||
|
vCPU and memory configurations are specified explicitly. For instance, a template might be called `small_webserver` and specify 2 `vcpus` and 2GB (always specified in MB) of `memory`:
|
||||||
|
|
||||||
|
```
|
||||||
|
"small_webserver": {
|
||||||
|
"vcpus": 2,
|
||||||
|
"memory": 2048
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Additional, non-default configuration values can also be specified. For instance, one can specify the `console_type` and additional values for this:
|
||||||
|
|
||||||
|
```
|
||||||
|
"serial_server": {
|
||||||
|
"vcpus": 1,
|
||||||
|
"memory": 1024,
|
||||||
|
"console_type": "serial",
|
||||||
|
"serial_device": "auto",
|
||||||
|
"serial_logfile": "/var/log/libvirt/VMNAME.xml"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The serial logfile can also be "auto" - this enables the PVC `vm log` functionality. The literal string `VMNAME` in this value will be replaced with the virtual machine name.
|
||||||
|
|
||||||
|
Configuration for a VNC console is similar:
|
||||||
|
|
||||||
|
```
|
||||||
|
"vnc_server": {
|
||||||
|
"vcpus": 4,
|
||||||
|
"memory": 4096,
|
||||||
|
"console_type": "vnc",
|
||||||
|
"vnc_port": "auto",
|
||||||
|
"vnc_listen": "0.0.0.0"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Additional PVC metadata can be configured in these templates as well. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
"limited_server": {
|
||||||
|
"vcpus": 1,
|
||||||
|
"memory": 1024,
|
||||||
|
"pvc_node_limit": "pvchv1,pvchv2",
|
||||||
|
"pvc_node_selector": "vms",
|
||||||
|
"pvc_node_autostart": "True"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Network Templates
|
||||||
|
|
||||||
|
Network template specify which PVC networks the virtual machine is active in, as well as the method used to calculate MAC addresses for VM interfaces. Networks are specified by their VNI ID or description within PVC.
|
||||||
|
|
||||||
|
For example, a system with a single interface and autogenerated MAC address:
|
||||||
|
|
||||||
|
```
|
||||||
|
"single_net_auto": {
|
||||||
|
"networks": {
|
||||||
|
"client-net-01"
|
||||||
|
},
|
||||||
|
"macaddr": "auto"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
In some cases, it may be useful for the administrator to specify a static MAC address pattern for a set of VMs, for instance if they must get consistent DHCP reservations between rebuilds. The `macaddr` field can contain templated MAC address values, in the format `AA:AA:AA:XX:XX:YZ`. In this format, `A` represents the OUI portion (usually the KVM default of `52:54:00`), `X` represents a static prefix specified by the administrator, `Y` represents the VM number/ID, which is autofilled by PVC based on the VM name (or set to 0 for numberless VM names), and `Z` represents the incremental interface ID within the VM. Therefore, to configure a static MAC address, the template could be:
|
||||||
|
|
||||||
|
```
|
||||||
|
"double_net_templated_mac": {
|
||||||
|
"networks": {
|
||||||
|
"5927",
|
||||||
|
"5928"
|
||||||
|
},
|
||||||
|
"macaddr": "52:54:00:00:01:YZ"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note the literal `Y` and `Z` characters in the value. This will expand to the following MAC addresses for a VM called `web3`, which would have VM number/ID `3`:
|
||||||
|
|
||||||
|
* Network `5927`: `52:54:00:00:01:30`
|
||||||
|
* Network `5928`: `52:54:00:00:01:31`
|
||||||
|
|
||||||
|
Similarly, a VM called `accounting`, which would have the implied VM number/ID `0`, would expand to:
|
||||||
|
|
||||||
|
* Network `5927`: `52:54:00:00:01:00`
|
||||||
|
* Network `5928`: `52:54:00:00:01:01`
|
||||||
|
|
||||||
|
Note that these automated values do not overflow; therefore, PVC does not support templated MAC addresses for >9 numbered VMs (e.g. web1-web9) within a single template, or for >10 networks within each VM. For such cases, static MAC addresses are far less useful anyways and the administrator must consider this. Also note that assigning the same static MAC template to overlapping numbered VMs (e.g. web1-web3 and mail1-mail3) will result in MAC address conflicts within a given client network and must be avoided.
|
||||||
|
|
||||||
|
### Disk Templates
|
||||||
|
|
||||||
|
Disk templates specify the disk layout, including filesystem and mountpoint for scripted deployments, for the VM. Disks are specified by their virtual disk name within the VM, and sizes are always specified in GB. For a basic, unmanaged VM with a single disk, the template may be as simple as:
|
||||||
|
|
||||||
|
```
|
||||||
|
"single_disk": {
|
||||||
|
"vda": {
|
||||||
|
"size": 20
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For a scripted VM, two additional values should be specified: the filesystem, which must be a valid filesystem usable by the VM, and the mountpoint:
|
||||||
|
|
||||||
|
```
|
||||||
|
"scripted_single_disk": {
|
||||||
|
"vda": {
|
||||||
|
"size": 20,
|
||||||
|
"filesystem": "ext4",
|
||||||
|
"mountpoint": "/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that these values are technically optional: if unspecified, PVC will not create a filesystem on the device nor attempt to mount it during the scripted configuration steps. This allows administrators to attach unallocated block devices to scripted VMs as well as the main filesystem(s) that the OS will be installed on.
|
||||||
|
|
||||||
|
More complicated disk templates are also possible by specifying incrementing `vdX` devices in the VM, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
scripted_multi_disk_srv": {
|
||||||
|
"vda": {
|
||||||
|
"size": 4,
|
||||||
|
"filesystem": "ext4",
|
||||||
|
"mountpoint": "/"
|
||||||
|
},
|
||||||
|
"vdb": {
|
||||||
|
"size": 8,
|
||||||
|
"filesystem": "ext4",
|
||||||
|
"mountpoint": "/var"
|
||||||
|
},
|
||||||
|
"vdc": {
|
||||||
|
"size": 40,
|
||||||
|
"filesystem": "xfs",
|
||||||
|
"mountpoint": "/srv"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## System Definitions
|
||||||
|
|
||||||
|
At the next level above configuraton templates, system definitions provide a way to group templates together to provide standard definitions for classes of virtual machines. This definition can then be specified, or autodetected, instead of manually specifying the 3 resource templates on VM creation, as well as specify additional provisioning metadata including the install method and provisioning script template, if applicable.
|
||||||
|
|
||||||
|
It is generally a good idea to make use of system definitions, rather than manually specifying all values at install time, in order to reduce the possibility of administrative mistakes in provisioning new VMs. They are however optional: all the required configuration information may be specified explicitly by the administrator when creating a new VM, instead of using a definition.
|
||||||
|
|
||||||
|
The `autostart` option specifies to PVC whether the VM should be automatically started after the provisioning sequence completes. It defaults to "True", and this can be avoided by setting this value to "False", requiring the administrator to manually start the VM using PVC commands afterwards.
|
||||||
|
|
||||||
|
For example, here are several VM definitions using some of the example system templates above:
|
||||||
|
|
||||||
|
```
|
||||||
|
"webX": {
|
||||||
|
"templates": {
|
||||||
|
"system": "small_webserver",
|
||||||
|
"network": "double_net_templated_mac",
|
||||||
|
"disk": "scripted_single_disk"
|
||||||
|
},
|
||||||
|
"provisioner": {
|
||||||
|
"method": "script",
|
||||||
|
"script": {
|
||||||
|
"name": "basic-pvc-debian",
|
||||||
|
"arguments": {
|
||||||
|
"keyword_argument": "my_value",
|
||||||
|
"another_argument": "some_value"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"autostart": "False"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
"windows-10": {
|
||||||
|
"templates": {
|
||||||
|
"system": "vnc_server",
|
||||||
|
"network": "single_net_auto",
|
||||||
|
"disk": "single_disk"
|
||||||
|
},
|
||||||
|
"provisioner": {
|
||||||
|
"method": "iso",
|
||||||
|
"iso": "installers/windows-10-installer-201910"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
"cloned_mail": {
|
||||||
|
"templates": {
|
||||||
|
"system": "limited_server",
|
||||||
|
"network": "single_net_auto",
|
||||||
|
"disk": "scripted_multi_disk_srv"
|
||||||
|
}
|
||||||
|
"provisioner": {
|
||||||
|
"method": "clone",
|
||||||
|
"clone": {
|
||||||
|
"source_disks": {
|
||||||
|
"vda": "templates/mailX_root",
|
||||||
|
"vdb": "templates/mailX_var"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Scripted installs
|
||||||
|
|
||||||
|
Scripted installs specify the `script` `method` in their `provisioner` metadata section. The second value, named `script`, specifies the provisioner script name which must exist, as well as any additional arguments the administrator may wish to pass to the script functions. Provisioner scripts are explained in detail in a subsequent section.
|
||||||
|
|
||||||
|
### ISO installs
|
||||||
|
|
||||||
|
ISO installs specify the `iso` `method` in their `provisioner` metadata section. The second value, named `iso`, specifies the RBD image containing the ISO which must exist having been previously uploaded by the administrator. The VM is booted immediately after basic configuration, and control is passed to the ISO to perform any installation steps; no other configuration occurrs from the PVC side.
|
||||||
|
|
||||||
|
### Clone installs
|
||||||
|
|
||||||
|
Clone installs specify the`clone` `method` in their `provisioner` metadata section. The second value, named `clone`, specifies the target virtual devices and their corresponding source RBD images, as well as the provisioner script to run after cloning.
|
||||||
|
|
||||||
|
Within the `clone` section, the `source_disks` section specifies a list of disks to clone as well as the target device. These target devices must align with disks from the Disk template, to map the source volumes to the new volumes for the VM. For example, if the Disk template specifies `vda` as a disk with `mountpoint` `/` (the `size` and `filesystem` will be ignored), and the `source_disks` value for `vda` maps to the RBD image `templates/root`, the provisioner will clone the RBD image `templates/root` to a new volume for the VM named, for example, `vms/VMNAME_vda`. If there are additional disks specified in the Disk template that are not specified in the `source_disks` list, they will be created as normal.
|
||||||
|
|
||||||
|
PVC performs no actions to a clone deployment aside from creating the additional disks mentioned above, if applicable. All configuration of the clone is the responsibility of the administrator. The cloud-init support from the `script` install method can be useful in this case to create a "golden image" that will then use cloud-init to configure itself on first boot.
|
||||||
|
|
||||||
|
## Provisioning scripts
|
||||||
|
|
||||||
|
The PVC provisioner provides a scripting framework in order to automate VM installation. This is generally the most useful with UNIX-like systems which can be installed over the network via shell scripts. For instance, the script might install a Debian VM using `debootstrap`.
|
||||||
|
|
||||||
|
Provisioner scripts are written in Python 3 and are called in a standardized way during the provisioning sequence. A single function called `install` is called during the provisioning sequence, performing OS installation, after which the system is booted.
|
||||||
|
|
||||||
|
The flow of the provisioning sequence is as follows:
|
||||||
|
|
||||||
|
1. The provisioner creates the required disks.
|
||||||
|
1. The provisioner creates a temporary directory on the local system (often the primary hypervisor, but the provisioner may be run in a dedicated virtual machine).
|
||||||
|
1. The provisioner maps the VM's RBD volumes on the local system.
|
||||||
|
1. The provisioner mounts the RBD volumes at their `mountpoint` under the temporary directory, along with several temporary virtual filesystems bind-mounted from the local system.
|
||||||
|
1. The provisioner calls the `install` function of the provisioner script and waits for it to finish execution.
|
||||||
|
1. The provisioner creates any cloud-init configuration files specified.
|
||||||
|
1. The provisioner unmounts the RBD volumes and temporary virtual filesystems (cleanup).
|
||||||
|
1. The provisioner unmaps the RBD volumes from the local system (cleanup).
|
||||||
|
1. The provisioner defines the new VM in PVC and, optionally, starts it.
|
||||||
|
|
||||||
|
*A WARNING*: It's important to remember that these provisioning scripts will run with the same privileges as the provisioner API daemon (usually root) on the system running the daemon. THIS MAY POSE A SECURITY RISK. However, the intent is that administrators of the cluster are the only ones allowed to specify these scripts, and that they check them thoroughly when adding them to the system as well as limit access to the provisioning API to trusted sources. If neither of these conditions are possible, for instance if arbitrary users must specify custom scripts without administrator oversight, then the PVC provisoner may not be ideal, and administrators are encouraged to implement their own custom provisioning engine.
|
||||||
|
|
||||||
|
### `install` function
|
||||||
|
|
||||||
|
The `install` function is the main entrypoing for a provisioning script, and is the only part of the script that is explicitly called. The provisioner calls this function after setting up the temporary install directory and mounting the volumes. Thus, this script can then perform any sort of tasks required in the VM to install it, and then finishes.
|
||||||
|
|
||||||
|
This function is passed a number of keyword arguments that it can then use during installation, described below, as well as any keyword arguments passed via optional arguments to the script.
|
||||||
|
|
||||||
|
###### `vm_name`
|
||||||
|
|
||||||
|
The `vm_name` keyword argument contains the full name of the new VM.
|
||||||
|
|
||||||
|
###### `vm_id`
|
||||||
|
|
||||||
|
The `vm_id` keyword argument contains the VM identifier (the last numeral of the VM name, or `0` for a VM that does not end in a numeral).
|
||||||
|
|
||||||
|
###### `temporary_directory`
|
||||||
|
|
||||||
|
The `temporary_directory` keyword argument contains the path to the temporary directory on which the new VM's disks are mounted. The function *must* perform any installation steps to/under this directory.
|
||||||
|
|
||||||
|
###### `disks`
|
||||||
|
|
||||||
|
The `disks` keyword argument contains a Python list of the configured disks, as dictionaries of values as specified in the Disk template. The function *may* use these values as appropriate, for instance to specify an `/etc/fstab`.
|
||||||
|
|
||||||
|
###### `networks`
|
||||||
|
|
||||||
|
The `networks` keyword argument contains a Python list of the configured networks, as dictionaries of values as specified in the Network template. The function *may* use these values as appropriate, for instance to write an `/etc/network/interfaces` file.
|
||||||
|
|
||||||
|
## Cloud-Init
|
||||||
|
|
||||||
|
PVC contains full support for cloud-init, a tool to automatically configure VMs on first boot from a defined set of metadata. The PVC provisioner includes a cloud-init metadata server that the administrator can use to provide information to running VMs.
|
||||||
|
|
||||||
|
### Configuring Cloud-Init in VMs
|
||||||
|
|
||||||
|
The PVC provisioner sequence makes no special considerations for cloud-init; the administrator must handle the installation of the cloud-init packages as well as any tweaks to the cloud.cfg file in the installation script. The provisioner does however listen on the standard EC2 interface at `http://169.254.169.254/latest/` from within the VM to provision user data.
|
||||||
|
|
||||||
|
### Configuring user-data
|
||||||
|
|
||||||
|
The PVC provisioner supports managing cloud-init user-data from within it. This data will be delivered to VMs based on the configuration options.
|
|
@ -25,6 +25,8 @@ pvc:
|
||||||
enable_storage: True
|
enable_storage: True
|
||||||
# enable_api: Enable or disable the API client, if installed, when node is Primary
|
# enable_api: Enable or disable the API client, if installed, when node is Primary
|
||||||
enable_api: True
|
enable_api: True
|
||||||
|
# enable_provisioner: Enable or disable the Provisioner client, if installed, when node is Primary
|
||||||
|
enable_provisioner: True
|
||||||
# cluster: Cluster-level configuration
|
# cluster: Cluster-level configuration
|
||||||
cluster:
|
cluster:
|
||||||
# coordinators: The list of cluster coordinator hostnames
|
# coordinators: The list of cluster coordinator hostnames
|
||||||
|
|
|
@ -219,10 +219,14 @@ class DNSNetworkInstance(object):
|
||||||
INSERT INTO records (domain_id, name, content, type, ttl, prio) VALUES
|
INSERT INTO records (domain_id, name, content, type, ttl, prio) VALUES
|
||||||
(%s, %s, %s, %s, %s, %s)
|
(%s, %s, %s, %s, %s, %s)
|
||||||
""",
|
""",
|
||||||
(domain_id, network_domain, 'nsX.{d} root.{d} 1 10800 1800 86400 86400'.format(d=self.config['cluster_domain']), 'SOA', 86400, 0)
|
(domain_id, network_domain, 'nsX.{d} root.{d} 1 10800 1800 86400 86400'.format(d=self.config['upstream_domain']), 'SOA', 86400, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
ns_servers = [network_gateway, 'pvc-ns1.{}'.format(self.config['cluster_domain']), 'pvc-ns2.{}'.format(self.config['cluster_domain'])]
|
if self.network.name_servers:
|
||||||
|
ns_servers = self.network.name_servers
|
||||||
|
else:
|
||||||
|
ns_servers = ['pvc-dns.{}'.format(self.config['upstream_domain'])]
|
||||||
|
|
||||||
for ns_server in ns_servers:
|
for ns_server in ns_servers:
|
||||||
sql_curs.execute(
|
sql_curs.execute(
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -138,6 +138,7 @@ def readConfig(pvcd_config_file, myhostname):
|
||||||
'enable_networking': o_config['pvc']['functions']['enable_networking'],
|
'enable_networking': o_config['pvc']['functions']['enable_networking'],
|
||||||
'enable_storage': o_config['pvc']['functions']['enable_storage'],
|
'enable_storage': o_config['pvc']['functions']['enable_storage'],
|
||||||
'enable_api': o_config['pvc']['functions']['enable_api'],
|
'enable_api': o_config['pvc']['functions']['enable_api'],
|
||||||
|
'enable_provisioner': o_config['pvc']['functions']['enable_provisioner'],
|
||||||
'dynamic_directory': o_config['pvc']['system']['configuration']['directories']['dynamic_directory'],
|
'dynamic_directory': o_config['pvc']['system']['configuration']['directories']['dynamic_directory'],
|
||||||
'log_directory': o_config['pvc']['system']['configuration']['directories']['log_directory'],
|
'log_directory': o_config['pvc']['system']['configuration']['directories']['log_directory'],
|
||||||
'console_log_directory': o_config['pvc']['system']['configuration']['directories']['console_log_directory'],
|
'console_log_directory': o_config['pvc']['system']['configuration']['directories']['console_log_directory'],
|
||||||
|
@ -795,7 +796,7 @@ if enable_networking:
|
||||||
# Add any missing networks to the list
|
# Add any missing networks to the list
|
||||||
for network in new_network_list:
|
for network in new_network_list:
|
||||||
if not network in network_list:
|
if not network in network_list:
|
||||||
d_network[network] = VXNetworkInstance.VXNetworkInstance(network, zk_conn, config, logger, this_node)
|
d_network[network] = VXNetworkInstance.VXNetworkInstance(network, zk_conn, config, logger, this_node, dns_aggregator)
|
||||||
if config['daemon_mode'] == 'coordinator' and d_network[network].nettype == 'managed':
|
if config['daemon_mode'] == 'coordinator' and d_network[network].nettype == 'managed':
|
||||||
try:
|
try:
|
||||||
dns_aggregator.add_network(d_network[network])
|
dns_aggregator.add_network(d_network[network])
|
||||||
|
|
|
@ -266,6 +266,9 @@ class NodeInstance(object):
|
||||||
if self.config['enable_api']:
|
if self.config['enable_api']:
|
||||||
self.logger.out('Stopping PVC API client service', state='i')
|
self.logger.out('Stopping PVC API client service', state='i')
|
||||||
common.run_os_command("systemctl stop pvc-api.service")
|
common.run_os_command("systemctl stop pvc-api.service")
|
||||||
|
if self.config['enable_provisioner']:
|
||||||
|
self.logger.out('Stopping PVC Provisioner service', state='i')
|
||||||
|
common.run_os_command("systemctl stop pvc-provisioner.service")
|
||||||
for network in self.d_network:
|
for network in self.d_network:
|
||||||
self.d_network[network].stopDHCPServer()
|
self.d_network[network].stopDHCPServer()
|
||||||
self.d_network[network].removeGateways()
|
self.d_network[network].removeGateways()
|
||||||
|
@ -284,9 +287,6 @@ class NodeInstance(object):
|
||||||
for network in self.d_network:
|
for network in self.d_network:
|
||||||
self.d_network[network].createGateways()
|
self.d_network[network].createGateways()
|
||||||
self.d_network[network].startDHCPServer()
|
self.d_network[network].startDHCPServer()
|
||||||
if self.config['enable_api']:
|
|
||||||
self.logger.out('Starting PVC API client service', state='i')
|
|
||||||
common.run_os_command("systemctl start pvc-api.service")
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
# Switch Patroni leader to the local instance
|
# Switch Patroni leader to the local instance
|
||||||
|
@ -322,6 +322,15 @@ class NodeInstance(object):
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
self.dns_aggregator.start_aggregator()
|
self.dns_aggregator.start_aggregator()
|
||||||
|
|
||||||
|
# Start the clients
|
||||||
|
if self.config['enable_api']:
|
||||||
|
self.logger.out('Starting PVC API client service', state='i')
|
||||||
|
common.run_os_command("systemctl start pvc-api.service")
|
||||||
|
if self.config['enable_provisioner']:
|
||||||
|
self.logger.out('Starting PVC Provisioner service', state='i')
|
||||||
|
common.run_os_command("systemctl start pvc-provisioner-worker.service")
|
||||||
|
common.run_os_command("systemctl start pvc-provisioner.service")
|
||||||
|
|
||||||
def createFloatingAddresses(self):
|
def createFloatingAddresses(self):
|
||||||
# VNI floating IP
|
# VNI floating IP
|
||||||
self.logger.out(
|
self.logger.out(
|
||||||
|
|
|
@ -31,12 +31,13 @@ import pvcd.common as common
|
||||||
|
|
||||||
class VXNetworkInstance(object):
|
class VXNetworkInstance(object):
|
||||||
# Initialization function
|
# Initialization function
|
||||||
def __init__ (self, vni, zk_conn, config, logger, this_node):
|
def __init__ (self, vni, zk_conn, config, logger, this_node, dns_aggregator):
|
||||||
self.vni = vni
|
self.vni = vni
|
||||||
self.zk_conn = zk_conn
|
self.zk_conn = zk_conn
|
||||||
self.config = config
|
self.config = config
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.this_node = this_node
|
self.this_node = this_node
|
||||||
|
self.dns_aggregator = dns_aggregator
|
||||||
self.vni_dev = config['vni_dev']
|
self.vni_dev = config['vni_dev']
|
||||||
self.vni_mtu = config['vni_mtu']
|
self.vni_mtu = config['vni_mtu']
|
||||||
|
|
||||||
|
@ -90,6 +91,7 @@ class VXNetworkInstance(object):
|
||||||
self.old_description = None
|
self.old_description = None
|
||||||
self.description = None
|
self.description = None
|
||||||
self.domain = None
|
self.domain = None
|
||||||
|
self.name_servers = None
|
||||||
self.ip6_gateway = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_gateway'.format(self.vni))
|
self.ip6_gateway = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_gateway'.format(self.vni))
|
||||||
self.ip6_network = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_network'.format(self.vni))
|
self.ip6_network = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_network'.format(self.vni))
|
||||||
self.ip6_cidrnetmask = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_network'.format(self.vni)).split('/')[-1]
|
self.ip6_cidrnetmask = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_network'.format(self.vni)).split('/')[-1]
|
||||||
|
@ -132,6 +134,8 @@ add rule inet filter input udp dport 53 meta iifname {bridgenic} counter accept
|
||||||
add rule inet filter input udp dport 67 meta iifname {bridgenic} counter accept
|
add rule inet filter input udp dport 67 meta iifname {bridgenic} counter accept
|
||||||
add rule inet filter input udp dport 123 meta iifname {bridgenic} counter accept
|
add rule inet filter input udp dport 123 meta iifname {bridgenic} counter accept
|
||||||
add rule inet filter input ip6 nexthdr udp udp dport 547 meta iifname {bridgenic} counter accept
|
add rule inet filter input ip6 nexthdr udp udp dport 547 meta iifname {bridgenic} counter accept
|
||||||
|
# Allow metadata API into the router from network
|
||||||
|
add rule inet filter input tcp dport 80 meta iifname {bridgenic} counter accept
|
||||||
# Block traffic into the router from network
|
# Block traffic into the router from network
|
||||||
add rule inet filter input meta iifname {bridgenic} counter drop
|
add rule inet filter input meta iifname {bridgenic} counter drop
|
||||||
""".format(
|
""".format(
|
||||||
|
@ -168,6 +172,9 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
||||||
if data and self.description != data.decode('ascii'):
|
if data and self.description != data.decode('ascii'):
|
||||||
self.old_description = self.description
|
self.old_description = self.description
|
||||||
self.description = data.decode('ascii')
|
self.description = data.decode('ascii')
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.stopDHCPServer()
|
||||||
|
self.startDHCPServer()
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/networks/{}/domain'.format(self.vni))
|
@self.zk_conn.DataWatch('/networks/{}/domain'.format(self.vni))
|
||||||
def watch_network_domain(data, stat, event=''):
|
def watch_network_domain(data, stat, event=''):
|
||||||
|
@ -178,7 +185,30 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
||||||
|
|
||||||
if data and self.domain != data.decode('ascii'):
|
if data and self.domain != data.decode('ascii'):
|
||||||
domain = data.decode('ascii')
|
domain = data.decode('ascii')
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.dns_aggregator.remove_network(self)
|
||||||
self.domain = domain
|
self.domain = domain
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.dns_aggregator.add_network(self)
|
||||||
|
self.stopDHCPServer()
|
||||||
|
self.startDHCPServer()
|
||||||
|
|
||||||
|
@self.zk_conn.DataWatch('/networks/{}/name_servers'.format(self.vni))
|
||||||
|
def watch_network_name_servers(data, stat, event=''):
|
||||||
|
if event and event.type == 'DELETED':
|
||||||
|
# The key has been deleted after existing before; terminate this watcher
|
||||||
|
# because this class instance is about to be reaped in Daemon.py
|
||||||
|
return False
|
||||||
|
|
||||||
|
if data and self.name_servers != data.decode('ascii'):
|
||||||
|
name_servers = data.decode('ascii').split(',')
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.dns_aggregator.remove_network(self)
|
||||||
|
self.name_servers = name_servers
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.dns_aggregator.add_network(self)
|
||||||
|
self.stopDHCPServer()
|
||||||
|
self.startDHCPServer()
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/networks/{}/ip6_network'.format(self.vni))
|
@self.zk_conn.DataWatch('/networks/{}/ip6_network'.format(self.vni))
|
||||||
def watch_network_ip6_network(data, stat, event=''):
|
def watch_network_ip6_network(data, stat, event=''):
|
||||||
|
@ -191,6 +221,9 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
||||||
ip6_network = data.decode('ascii')
|
ip6_network = data.decode('ascii')
|
||||||
self.ip6_network = ip6_network
|
self.ip6_network = ip6_network
|
||||||
self.ip6_cidrnetmask = ip6_network.split('/')[-1]
|
self.ip6_cidrnetmask = ip6_network.split('/')[-1]
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.stopDHCPServer()
|
||||||
|
self.startDHCPServer()
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/networks/{}/ip6_gateway'.format(self.vni))
|
@self.zk_conn.DataWatch('/networks/{}/ip6_gateway'.format(self.vni))
|
||||||
def watch_network_gateway(data, stat, event=''):
|
def watch_network_gateway(data, stat, event=''):
|
||||||
|
@ -210,6 +243,9 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
||||||
if self.dhcp_server_daemon:
|
if self.dhcp_server_daemon:
|
||||||
self.stopDHCPServer()
|
self.stopDHCPServer()
|
||||||
self.startDHCPServer()
|
self.startDHCPServer()
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.stopDHCPServer()
|
||||||
|
self.startDHCPServer()
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/networks/{}/dhcp6_flag'.format(self.vni))
|
@self.zk_conn.DataWatch('/networks/{}/dhcp6_flag'.format(self.vni))
|
||||||
def watch_network_dhcp_status(data, stat, event=''):
|
def watch_network_dhcp_status(data, stat, event=''):
|
||||||
|
@ -236,6 +272,9 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
||||||
ip4_network = data.decode('ascii')
|
ip4_network = data.decode('ascii')
|
||||||
self.ip4_network = ip4_network
|
self.ip4_network = ip4_network
|
||||||
self.ip4_cidrnetmask = ip4_network.split('/')[-1]
|
self.ip4_cidrnetmask = ip4_network.split('/')[-1]
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.stopDHCPServer()
|
||||||
|
self.startDHCPServer()
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/networks/{}/ip4_gateway'.format(self.vni))
|
@self.zk_conn.DataWatch('/networks/{}/ip4_gateway'.format(self.vni))
|
||||||
def watch_network_gateway(data, stat, event=''):
|
def watch_network_gateway(data, stat, event=''):
|
||||||
|
@ -255,6 +294,9 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
||||||
if self.dhcp_server_daemon:
|
if self.dhcp_server_daemon:
|
||||||
self.stopDHCPServer()
|
self.stopDHCPServer()
|
||||||
self.startDHCPServer()
|
self.startDHCPServer()
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.stopDHCPServer()
|
||||||
|
self.startDHCPServer()
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/networks/{}/dhcp4_flag'.format(self.vni))
|
@self.zk_conn.DataWatch('/networks/{}/dhcp4_flag'.format(self.vni))
|
||||||
def watch_network_dhcp_status(data, stat, event=''):
|
def watch_network_dhcp_status(data, stat, event=''):
|
||||||
|
@ -279,6 +321,9 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
||||||
|
|
||||||
if data and self.dhcp4_start != data.decode('ascii'):
|
if data and self.dhcp4_start != data.decode('ascii'):
|
||||||
self.dhcp4_start = data.decode('ascii')
|
self.dhcp4_start = data.decode('ascii')
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.stopDHCPServer()
|
||||||
|
self.startDHCPServer()
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/networks/{}/dhcp4_end'.format(self.vni))
|
@self.zk_conn.DataWatch('/networks/{}/dhcp4_end'.format(self.vni))
|
||||||
def watch_network_dhcp4_end(data, stat, event=''):
|
def watch_network_dhcp4_end(data, stat, event=''):
|
||||||
|
@ -289,6 +334,9 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
||||||
|
|
||||||
if data and self.dhcp4_end != data.decode('ascii'):
|
if data and self.dhcp4_end != data.decode('ascii'):
|
||||||
self.dhcp4_end = data.decode('ascii')
|
self.dhcp4_end = data.decode('ascii')
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.stopDHCPServer()
|
||||||
|
self.startDHCPServer()
|
||||||
|
|
||||||
@self.zk_conn.ChildrenWatch('/networks/{}/dhcp_reservations'.format(self.vni))
|
@self.zk_conn.ChildrenWatch('/networks/{}/dhcp_reservations'.format(self.vni))
|
||||||
def watch_network_dhcp_reservations(new_reservations, event=''):
|
def watch_network_dhcp_reservations(new_reservations, event=''):
|
||||||
|
@ -302,6 +350,9 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
||||||
self.dhcp_reservations = new_reservations
|
self.dhcp_reservations = new_reservations
|
||||||
if self.this_node.router_state == 'primary':
|
if self.this_node.router_state == 'primary':
|
||||||
self.updateDHCPReservations(old_reservations, new_reservations)
|
self.updateDHCPReservations(old_reservations, new_reservations)
|
||||||
|
if self.dhcp_server_daemon:
|
||||||
|
self.stopDHCPServer()
|
||||||
|
self.startDHCPServer()
|
||||||
|
|
||||||
@self.zk_conn.ChildrenWatch('/networks/{}/firewall_rules/in'.format(self.vni))
|
@self.zk_conn.ChildrenWatch('/networks/{}/firewall_rules/in'.format(self.vni))
|
||||||
def watch_network_firewall_rules(new_rules, event=''):
|
def watch_network_firewall_rules(new_rules, event=''):
|
||||||
|
|
Loading…
Reference in New Issue