Merge branch 'the-great-linting'

Complete linting of the project to standard flak8 styling.
This commit is contained in:
Joshua Boniface 2020-11-07 15:37:39 -05:00
commit c7a289e9bb
47 changed files with 2255 additions and 3100 deletions

16
.hooks/pre-commit Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env bash
pushd $( git rev-parse --show-toplevel ) &>/dev/null
ex=0
# Linting
./lint
if [[ $? -ne 0 ]]; then
echo "Aborting commit due to linting errors."
ex=1
fi
echo
popd &>/dev/null
exit $ex

View File

@ -2,6 +2,7 @@ from __future__ import with_statement
from alembic import context from alembic import context
from sqlalchemy import engine_from_config, pool from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig from logging.config import fileConfig
from flask import current_app
import logging import logging
# this is the Alembic Config object, which provides # this is the Alembic Config object, which provides
@ -17,7 +18,6 @@ logger = logging.getLogger('alembic.env')
# for 'autogenerate' support # for 'autogenerate' support
# from myapp import mymodel # from myapp import mymodel
# target_metadata = mymodel.Base.metadata # target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url', config.set_main_option('sqlalchemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI')) current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata target_metadata = current_app.extensions['migrate'].db.metadata
@ -81,6 +81,7 @@ def run_migrations_online():
finally: finally:
connection.close() connection.close()
if context.is_offline_mode(): if context.is_offline_mode():
run_migrations_offline() run_migrations_offline()
else: else:

View File

@ -1,7 +1,7 @@
"""PVC version 0.6 """PVC version 0.6
Revision ID: 2d1daa722a0a Revision ID: 2d1daa722a0a
Revises: Revises:
Create Date: 2020-02-15 23:14:14.733134 Create Date: 2020-02-15 23:14:14.733134
""" """

View File

@ -50,15 +50,15 @@ def install(**kwargs):
# failures of these gracefully, should administrators forget to specify them. # failures of these gracefully, should administrators forget to specify them.
try: try:
deb_release = kwargs['deb_release'] deb_release = kwargs['deb_release']
except: except Exception:
deb_release = "stable" deb_release = "stable"
try: try:
deb_mirror = kwargs['deb_mirror'] deb_mirror = kwargs['deb_mirror']
except: except Exception:
deb_mirror = "http://ftp.debian.org/debian" deb_mirror = "http://ftp.debian.org/debian"
try: try:
deb_packages = kwargs['deb_packages'].split(',') deb_packages = kwargs['deb_packages'].split(',')
except: except Exception:
deb_packages = ["linux-image-amd64", "grub-pc", "cloud-init", "python3-cffi-backend", "wget"] deb_packages = ["linux-image-amd64", "grub-pc", "cloud-init", "python3-cffi-backend", "wget"]
# We need to know our root disk # We need to know our root disk
@ -205,7 +205,7 @@ GRUB_DISABLE_LINUX_UUID=false
os.system( os.system(
"grub-install --force /dev/rbd/{}/{}_{}".format(root_disk['pool'], vm_name, root_disk['disk_id']) "grub-install --force /dev/rbd/{}/{}_{}".format(root_disk['pool'], vm_name, root_disk['disk_id'])
) )
os.system( os.system(
"update-grub" "update-grub"
) )
# Set a really dumb root password [TEMPORARY] # Set a really dumb root password [TEMPORARY]

View File

@ -30,8 +30,6 @@
# This script will run under root privileges as the provisioner does. Be careful # This script will run under root privileges as the provisioner does. Be careful
# with that. # with that.
import os
# Installation function - performs a debootstrap install of a Debian system # Installation function - performs a debootstrap install of a Debian system
# Note that the only arguments are keyword arguments. # Note that the only arguments are keyword arguments.
def install(**kwargs): def install(**kwargs):

View File

@ -20,16 +20,16 @@
# #
############################################################################### ###############################################################################
import os
from flask_migrate import Migrate, MigrateCommand from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager from flask_script import Manager
from pvcapid.flaskapi import app, db, config from pvcapid.flaskapi import app, db
from pvcapid.models import * # noqa F401,F403
migrate = Migrate(app, db) migrate = Migrate(app, db)
manager = Manager(app) manager = Manager(app)
manager.add_command('db', MigrateCommand) manager.add_command('db', MigrateCommand)
if __name__ == '__main__': if __name__ == '__main__':
manager.run() manager.run()

View File

@ -20,4 +20,4 @@
# #
############################################################################### ###############################################################################
import pvcapid.Daemon import pvcapid.Daemon # noqa: F401

View File

@ -29,7 +29,7 @@ import pvcapid.flaskapi as pvc_api
if pvc_api.config['ssl_enabled']: if pvc_api.config['ssl_enabled']:
context = (pvc_api.config['ssl_cert_file'], pvc_api.config['ssl_key_file']) context = (pvc_api.config['ssl_cert_file'], pvc_api.config['ssl_key_file'])
else: else:
context=None context = None
print('Starting PVC API daemon at {}:{} with SSL={}, Authentication={}'.format(pvc_api.config['listen_address'], pvc_api.config['listen_port'], pvc_api.config['ssl_enabled'], pvc_api.config['auth_enabled'])) print('Starting PVC API daemon at {}:{} with SSL={}, Authentication={}'.format(pvc_api.config['listen_address'], pvc_api.config['listen_port'], pvc_api.config['ssl_enabled'], pvc_api.config['auth_enabled']))
pvc_api.app.run(pvc_api.config['listen_address'], pvc_api.config['listen_port'], threaded=True, ssl_context=context) pvc_api.app.run(pvc_api.config['listen_address'], pvc_api.config['listen_port'], threaded=True, ssl_context=context)

View File

@ -20,25 +20,16 @@
# #
############################################################################### ###############################################################################
import flask
import json
import psycopg2 import psycopg2
import psycopg2.extras import psycopg2.extras
import os
import re
import time
import shlex
import subprocess
from distutils.util import strtobool as dustrtobool from distutils.util import strtobool as dustrtobool
import daemon_lib.common as pvc_common import daemon_lib.common as pvc_common
import daemon_lib.node as pvc_node
import daemon_lib.ceph as pvc_ceph import daemon_lib.ceph as pvc_ceph
import pvcapid.libvirt_schema as libvirt_schema config = None # Set in this namespace by flaskapi
from pvcapid.ova import list_ova
def strtobool(stringv): def strtobool(stringv):
if stringv is None: if stringv is None:
@ -47,9 +38,10 @@ def strtobool(stringv):
return bool(stringv) return bool(stringv)
try: try:
return bool(dustrtobool(stringv)) return bool(dustrtobool(stringv))
except: except Exception:
return False return False
# #
# Exceptions (used by Celery tasks) # Exceptions (used by Celery tasks)
# #
@ -76,6 +68,7 @@ class BenchmarkError(Exception):
# Common functions # Common functions
# #
# Database connections # Database connections
def open_database(config): def open_database(config):
conn = psycopg2.connect( conn = psycopg2.connect(
@ -88,12 +81,14 @@ def open_database(config):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
return conn, cur return conn, cur
def close_database(conn, cur, failed=False): def close_database(conn, cur, failed=False):
if not failed: if not failed:
conn.commit() conn.commit()
cur.close() cur.close()
conn.close() conn.close()
def list_benchmarks(job=None): def list_benchmarks(job=None):
if job is not None: if job is not None:
query = "SELECT * FROM {} WHERE job = %s;".format('storage_benchmarks') query = "SELECT * FROM {} WHERE job = %s;".format('storage_benchmarks')
@ -117,7 +112,8 @@ def list_benchmarks(job=None):
if data: if data:
return data, 200 return data, 200
else: else:
return { 'message': 'No benchmark found.' }, 404 return {'message': 'No benchmark found.'}, 404
def run_benchmark(self, pool): def run_benchmark(self, pool):
# Runtime imports # Runtime imports
@ -134,17 +130,16 @@ def run_benchmark(self, pool):
# Phase 0 - connect to databases # Phase 0 - connect to databases
try: try:
db_conn, db_cur = open_database(config) db_conn, db_cur = open_database(config)
except: except Exception:
print('FATAL - failed to connect to Postgres') print('FATAL - failed to connect to Postgres')
raise Exception raise Exception
try: try:
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
except: except Exception:
print('FATAL - failed to connect to Zookeeper') print('FATAL - failed to connect to Zookeeper')
raise Exception raise Exception
print("Storing running status for job '{}' in database".format(cur_time)) print("Storing running status for job '{}' in database".format(cur_time))
try: try:
query = "INSERT INTO storage_benchmarks (job, result) VALUES (%s, %s);" query = "INSERT INTO storage_benchmarks (job, result) VALUES (%s, %s);"
@ -242,12 +237,11 @@ def run_benchmark(self, pool):
--bs={bs} \ --bs={bs} \
--readwrite={rw} --readwrite={rw}
""".format( """.format(
pool=pool, pool=pool,
volume=volume, volume=volume,
test=test, test=test,
bs=test_matrix[test]['bs'], bs=test_matrix[test]['bs'],
rw=test_matrix[test]['rw'] rw=test_matrix[test]['rw'])
)
retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd) retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd)
if retcode: if retcode:
@ -324,19 +318,19 @@ def run_benchmark(self, pool):
# 7: IOPS # 7: IOPS
# 8: runtime (msec) # 8: runtime (msec)
# Total latency # Total latency
# 37: min # 37: min
# 38: max # 38: max
# 39: mean # 39: mean
# 40: stdev # 40: stdev
# Bandwidth # Bandwidth
# 41: min # 41: min
# 42: max # 42: max
# 44: mean # 44: mean
# 45: stdev # 45: stdev
# 46: # samples # 46: # samples
# IOPS # IOPS
# 47: min # 47: min
# 48: max # 48: max
# 49: mean # 49: mean
# 50: stdev # 50: stdev
# 51: # samples # 51: # samples
@ -405,7 +399,7 @@ def run_benchmark(self, pool):
# 96: mean # 96: mean
# 97: stdev # 97: stdev
# 98: # samples # 98: # samples
# CPU # CPU
# 146: user # 146: user
# 147: system # 147: system
# 148: ctx switches # 148: ctx switches
@ -446,7 +440,7 @@ def run_benchmark(self, pool):
"minfault": results[150] "minfault": results[150]
} }
} }
# Phase 3 - cleanup # Phase 3 - cleanup
self.update_state(state='RUNNING', meta={'current': 3, 'total': 3, 'status': 'Cleaning up and storing results'}) self.update_state(state='RUNNING', meta={'current': 3, 'total': 3, 'status': 'Cleaning up and storing results'})
time.sleep(1) time.sleep(1)
@ -469,4 +463,4 @@ def run_benchmark(self, pool):
close_database(db_conn, db_cur) close_database(db_conn, db_cur)
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
return { 'status': "Storage benchmark '{}' completed successfully.", 'current': 3, 'total': 3 } return {'status': "Storage benchmark '{}' completed successfully.", 'current': 3, 'total': 3}

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,6 @@
############################################################################### ###############################################################################
import flask import flask
import json
import lxml.etree as etree import lxml.etree as etree
from distutils.util import strtobool as dustrtobool from distutils.util import strtobool as dustrtobool
@ -35,6 +34,9 @@ import daemon_lib.vm as pvc_vm
import daemon_lib.network as pvc_network import daemon_lib.network as pvc_network
import daemon_lib.ceph as pvc_ceph import daemon_lib.ceph as pvc_ceph
config = None # Set in this namespace by flaskapi
def strtobool(stringv): def strtobool(stringv):
if stringv is None: if stringv is None:
return False return False
@ -42,9 +44,10 @@ def strtobool(stringv):
return bool(stringv) return bool(stringv)
try: try:
return bool(dustrtobool(stringv)) return bool(dustrtobool(stringv))
except: except Exception:
return False return False
# #
# Initialization function # Initialization function
# #
@ -82,6 +85,7 @@ def initialize_cluster():
return True return True
# #
# Cluster functions # Cluster functions
# #
@ -95,6 +99,7 @@ def cluster_status():
return retdata, 200 return retdata, 200
def cluster_maintenance(maint_state='false'): def cluster_maintenance(maint_state='false'):
""" """
Set the cluster in or out of maintenance state Set the cluster in or out of maintenance state
@ -113,6 +118,7 @@ def cluster_maintenance(maint_state='false'):
return retdata, retcode return retdata, retcode
# #
# Node functions # Node functions
# #
@ -144,6 +150,7 @@ def node_list(limit=None, daemon_state=None, coordinator_state=None, domain_stat
return retdata, retcode return retdata, retcode
def node_daemon_state(node): def node_daemon_state(node):
""" """
Return the daemon state of node NODE. Return the daemon state of node NODE.
@ -172,6 +179,7 @@ def node_daemon_state(node):
return retdata, retcode return retdata, retcode
def node_coordinator_state(node): def node_coordinator_state(node):
""" """
Return the coordinator state of node NODE. Return the coordinator state of node NODE.
@ -200,6 +208,7 @@ def node_coordinator_state(node):
return retdata, retcode return retdata, retcode
def node_domain_state(node): def node_domain_state(node):
""" """
Return the domain state of node NODE. Return the domain state of node NODE.
@ -225,11 +234,12 @@ def node_domain_state(node):
return retdata, retcode return retdata, retcode
def node_secondary(node): def node_secondary(node):
""" """
Take NODE out of primary router mode. Take NODE out of primary router mode.
""" """
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_node.secondary_node(zk_conn, node) retflag, retdata = pvc_node.secondary_node(zk_conn, node)
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
@ -243,11 +253,12 @@ def node_secondary(node):
} }
return output, retcode return output, retcode
def node_primary(node): def node_primary(node):
""" """
Set NODE to primary router mode. Set NODE to primary router mode.
""" """
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_node.primary_node(zk_conn, node) retflag, retdata = pvc_node.primary_node(zk_conn, node)
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
@ -261,6 +272,7 @@ def node_primary(node):
} }
return output, retcode return output, retcode
def node_flush(node, wait): def node_flush(node, wait):
""" """
Flush NODE of running VMs. Flush NODE of running VMs.
@ -279,6 +291,7 @@ def node_flush(node, wait):
} }
return output, retcode return output, retcode
def node_ready(node, wait): def node_ready(node, wait):
""" """
Restore NODE to active service. Restore NODE to active service.
@ -297,6 +310,7 @@ def node_ready(node, wait):
} }
return output, retcode return output, retcode
# #
# VM functions # VM functions
# #
@ -310,6 +324,7 @@ def vm_is_migrated(vm):
return retdata return retdata
def vm_state(vm): def vm_state(vm):
""" """
Return the state of virtual machine VM. Return the state of virtual machine VM.
@ -342,6 +357,7 @@ def vm_state(vm):
return retdata, retcode return retdata, retcode
def vm_node(vm): def vm_node(vm):
""" """
Return the current node of virtual machine VM. Return the current node of virtual machine VM.
@ -375,6 +391,7 @@ def vm_node(vm):
return retdata, retcode return retdata, retcode
def vm_console(vm, lines=None): def vm_console(vm, lines=None):
""" """
Return the current console log for VM. Return the current console log for VM.
@ -403,6 +420,7 @@ def vm_console(vm, lines=None):
return retdata, retcode return retdata, retcode
def vm_list(node=None, state=None, limit=None, is_fuzzy=True): def vm_list(node=None, state=None, limit=None, is_fuzzy=True):
""" """
Return a list of VMs with limit LIMIT. Return a list of VMs with limit LIMIT.
@ -431,6 +449,7 @@ def vm_list(node=None, state=None, limit=None, is_fuzzy=True):
return retdata, retcode return retdata, retcode
def vm_define(xml, node, limit, selector, autostart, migration_method): def vm_define(xml, node, limit, selector, autostart, migration_method):
""" """
Define a VM from Libvirt XML in the PVC cluster. Define a VM from Libvirt XML in the PVC cluster.
@ -440,7 +459,7 @@ def vm_define(xml, node, limit, selector, autostart, migration_method):
xml_data = etree.fromstring(xml) xml_data = etree.fromstring(xml)
new_cfg = etree.tostring(xml_data, pretty_print=True).decode('utf8') new_cfg = etree.tostring(xml_data, pretty_print=True).decode('utf8')
except Exception as e: except Exception as e:
return { 'message': 'XML is malformed or incorrect: {}'.format(e) }, 400 return {'message': 'XML is malformed or incorrect: {}'.format(e)}, 400
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_vm.define_vm(zk_conn, new_cfg, node, limit, selector, autostart, migration_method, profile=None) retflag, retdata = pvc_vm.define_vm(zk_conn, new_cfg, node, limit, selector, autostart, migration_method, profile=None)
@ -456,6 +475,7 @@ def vm_define(xml, node, limit, selector, autostart, migration_method):
} }
return output, retcode return output, retcode
def get_vm_meta(vm): def get_vm_meta(vm):
""" """
Get metadata of a VM. Get metadata of a VM.
@ -491,6 +511,7 @@ def get_vm_meta(vm):
return retdata, retcode return retdata, retcode
def update_vm_meta(vm, limit, selector, autostart, provisioner_profile, migration_method): def update_vm_meta(vm, limit, selector, autostart, provisioner_profile, migration_method):
""" """
Update metadata of a VM. Update metadata of a VM.
@ -499,7 +520,7 @@ def update_vm_meta(vm, limit, selector, autostart, provisioner_profile, migratio
if autostart is not None: if autostart is not None:
try: try:
autostart = bool(strtobool(autostart)) autostart = bool(strtobool(autostart))
except: except Exception:
autostart = False autostart = False
retflag, retdata = pvc_vm.modify_vm_metadata(zk_conn, vm, limit, selector, autostart, provisioner_profile, migration_method) retflag, retdata = pvc_vm.modify_vm_metadata(zk_conn, vm, limit, selector, autostart, provisioner_profile, migration_method)
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
@ -514,6 +535,7 @@ def update_vm_meta(vm, limit, selector, autostart, provisioner_profile, migratio
} }
return output, retcode return output, retcode
def vm_modify(name, restart, xml): def vm_modify(name, restart, xml):
""" """
Modify a VM Libvirt XML in the PVC cluster. Modify a VM Libvirt XML in the PVC cluster.
@ -523,7 +545,7 @@ def vm_modify(name, restart, xml):
xml_data = etree.fromstring(xml) xml_data = etree.fromstring(xml)
new_cfg = etree.tostring(xml_data, pretty_print=True).decode('utf8') new_cfg = etree.tostring(xml_data, pretty_print=True).decode('utf8')
except Exception as e: except Exception as e:
return { 'message': 'XML is malformed or incorrect: {}'.format(e) }, 400 return {'message': 'XML is malformed or incorrect: {}'.format(e)}, 400
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_vm.modify_vm(zk_conn, name, restart, new_cfg) retflag, retdata = pvc_vm.modify_vm(zk_conn, name, restart, new_cfg)
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
@ -538,6 +560,7 @@ def vm_modify(name, restart, xml):
} }
return output, retcode return output, retcode
def vm_undefine(name): def vm_undefine(name):
""" """
Undefine a VM from the PVC cluster. Undefine a VM from the PVC cluster.
@ -556,6 +579,7 @@ def vm_undefine(name):
} }
return output, retcode return output, retcode
def vm_remove(name): def vm_remove(name):
""" """
Remove a VM from the PVC cluster. Remove a VM from the PVC cluster.
@ -574,6 +598,7 @@ def vm_remove(name):
} }
return output, retcode return output, retcode
def vm_start(name): def vm_start(name):
""" """
Start a VM in the PVC cluster. Start a VM in the PVC cluster.
@ -592,6 +617,7 @@ def vm_start(name):
} }
return output, retcode return output, retcode
def vm_restart(name, wait): def vm_restart(name, wait):
""" """
Restart a VM in the PVC cluster. Restart a VM in the PVC cluster.
@ -610,6 +636,7 @@ def vm_restart(name, wait):
} }
return output, retcode return output, retcode
def vm_shutdown(name, wait): def vm_shutdown(name, wait):
""" """
Shutdown a VM in the PVC cluster. Shutdown a VM in the PVC cluster.
@ -628,6 +655,7 @@ def vm_shutdown(name, wait):
} }
return output, retcode return output, retcode
def vm_stop(name): def vm_stop(name):
""" """
Forcibly stop a VM in the PVC cluster. Forcibly stop a VM in the PVC cluster.
@ -646,6 +674,7 @@ def vm_stop(name):
} }
return output, retcode return output, retcode
def vm_disable(name): def vm_disable(name):
""" """
Disable a (stopped) VM in the PVC cluster. Disable a (stopped) VM in the PVC cluster.
@ -664,6 +693,7 @@ def vm_disable(name):
} }
return output, retcode return output, retcode
def vm_move(name, node, wait, force_live): def vm_move(name, node, wait, force_live):
""" """
Move a VM to another node. Move a VM to another node.
@ -682,6 +712,7 @@ def vm_move(name, node, wait, force_live):
} }
return output, retcode return output, retcode
def vm_migrate(name, node, flag_force, wait, force_live): def vm_migrate(name, node, flag_force, wait, force_live):
""" """
Temporarily migrate a VM to another node. Temporarily migrate a VM to another node.
@ -700,6 +731,7 @@ def vm_migrate(name, node, flag_force, wait, force_live):
} }
return output, retcode return output, retcode
def vm_unmigrate(name, wait, force_live): def vm_unmigrate(name, wait, force_live):
""" """
Unmigrate a migrated VM. Unmigrate a migrated VM.
@ -718,6 +750,7 @@ def vm_unmigrate(name, wait, force_live):
} }
return output, retcode return output, retcode
def vm_flush_locks(vm): def vm_flush_locks(vm):
""" """
Flush locks of a (stopped) VM. Flush locks of a (stopped) VM.
@ -731,7 +764,7 @@ def vm_flush_locks(vm):
retdata = retdata[0] retdata = retdata[0]
if retdata['state'] not in ['stop', 'disable']: if retdata['state'] not in ['stop', 'disable']:
return {"message":"VM must be stopped to flush locks"}, 400 return {"message": "VM must be stopped to flush locks"}, 400
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_vm.flush_locks(zk_conn, vm) retflag, retdata = pvc_vm.flush_locks(zk_conn, vm)
@ -747,6 +780,7 @@ def vm_flush_locks(vm):
} }
return output, retcode return output, retcode
# #
# Network functions # Network functions
# #
@ -778,6 +812,7 @@ def net_list(limit=None, is_fuzzy=True):
return retdata, retcode return retdata, retcode
def net_add(vni, description, nettype, domain, name_servers, def net_add(vni, description, nettype, domain, name_servers,
ip4_network, ip4_gateway, ip6_network, ip6_gateway, ip4_network, ip4_gateway, ip6_network, ip6_gateway,
dhcp4_flag, dhcp4_start, dhcp4_end): dhcp4_flag, dhcp4_start, dhcp4_end):
@ -788,8 +823,8 @@ def net_add(vni, description, nettype, domain, name_servers,
dhcp4_flag = bool(strtobool(dhcp4_flag)) dhcp4_flag = bool(strtobool(dhcp4_flag))
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_network.add_network(zk_conn, vni, description, nettype, domain, name_servers, retflag, retdata = pvc_network.add_network(zk_conn, vni, description, nettype, domain, name_servers,
ip4_network, ip4_gateway, ip6_network, ip6_gateway, ip4_network, ip4_gateway, ip6_network, ip6_gateway,
dhcp4_flag, dhcp4_start, dhcp4_end) dhcp4_flag, dhcp4_start, dhcp4_end)
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
if retflag: if retflag:
@ -802,6 +837,7 @@ def net_add(vni, description, nettype, domain, name_servers,
} }
return output, retcode return output, retcode
def net_modify(vni, description, domain, name_servers, def net_modify(vni, description, domain, name_servers,
ip4_network, ip4_gateway, ip4_network, ip4_gateway,
ip6_network, ip6_gateway, ip6_network, ip6_gateway,
@ -813,8 +849,8 @@ def net_modify(vni, description, domain, name_servers,
dhcp4_flag = bool(strtobool(dhcp4_flag)) dhcp4_flag = bool(strtobool(dhcp4_flag))
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_network.modify_network(zk_conn, vni, description, domain, name_servers, retflag, retdata = pvc_network.modify_network(zk_conn, vni, description, domain, name_servers,
ip4_network, ip4_gateway, ip6_network, ip6_gateway, ip4_network, ip4_gateway, ip6_network, ip6_gateway,
dhcp4_flag, dhcp4_start, dhcp4_end) dhcp4_flag, dhcp4_start, dhcp4_end)
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
if retflag: if retflag:
@ -827,6 +863,7 @@ def net_modify(vni, description, domain, name_servers,
} }
return output, retcode return output, retcode
def net_remove(network): def net_remove(network):
""" """
Remove a virtual client network from the PVC cluster. Remove a virtual client network from the PVC cluster.
@ -845,6 +882,7 @@ def net_remove(network):
} }
return output, retcode return output, retcode
def net_dhcp_list(network, limit=None, static=False): def net_dhcp_list(network, limit=None, static=False):
""" """
Return a list of DHCP leases in network NETWORK with limit LIMIT. Return a list of DHCP leases in network NETWORK with limit LIMIT.
@ -869,6 +907,7 @@ def net_dhcp_list(network, limit=None, static=False):
return retdata, retcode return retdata, retcode
def net_dhcp_add(network, ipaddress, macaddress, hostname): def net_dhcp_add(network, ipaddress, macaddress, hostname):
""" """
Add a static DHCP lease to a virtual client network. Add a static DHCP lease to a virtual client network.
@ -887,6 +926,7 @@ def net_dhcp_add(network, ipaddress, macaddress, hostname):
} }
return output, retcode return output, retcode
def net_dhcp_remove(network, macaddress): def net_dhcp_remove(network, macaddress):
""" """
Remove a static DHCP lease from a virtual client network. Remove a static DHCP lease from a virtual client network.
@ -905,6 +945,7 @@ def net_dhcp_remove(network, macaddress):
} }
return output, retcode return output, retcode
def net_acl_list(network, limit=None, direction=None, is_fuzzy=True): def net_acl_list(network, limit=None, direction=None, is_fuzzy=True):
""" """
Return a list of network ACLs in network NETWORK with limit LIMIT. Return a list of network ACLs in network NETWORK with limit LIMIT.
@ -933,6 +974,7 @@ def net_acl_list(network, limit=None, direction=None, is_fuzzy=True):
return retdata, retcode return retdata, retcode
def net_acl_add(network, direction, description, rule, order): def net_acl_add(network, direction, description, rule, order):
""" """
Add an ACL to a virtual client network. Add an ACL to a virtual client network.
@ -951,6 +993,7 @@ def net_acl_add(network, direction, description, rule, order):
} }
return output, retcode return output, retcode
def net_acl_remove(network, description): def net_acl_remove(network, description):
""" """
Remove an ACL from a virtual client network. Remove an ACL from a virtual client network.
@ -969,6 +1012,7 @@ def net_acl_remove(network, description):
} }
return output, retcode return output, retcode
# #
# Ceph functions # Ceph functions
# #
@ -987,6 +1031,7 @@ def ceph_status():
return retdata, retcode return retdata, retcode
def ceph_util(): def ceph_util():
""" """
Get the current Ceph cluster utilization. Get the current Ceph cluster utilization.
@ -1002,6 +1047,7 @@ def ceph_util():
return retdata, retcode return retdata, retcode
def ceph_osd_list(limit=None): def ceph_osd_list(limit=None):
""" """
Get the list of OSDs in the Ceph storage cluster. Get the list of OSDs in the Ceph storage cluster.
@ -1026,6 +1072,7 @@ def ceph_osd_list(limit=None):
return retdata, retcode return retdata, retcode
def ceph_osd_state(osd): def ceph_osd_state(osd):
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_ceph.get_list_osd(zk_conn, osd) retflag, retdata = pvc_ceph.get_list_osd(zk_conn, osd)
@ -1048,7 +1095,8 @@ def ceph_osd_state(osd):
in_state = retdata[0]['stats']['in'] in_state = retdata[0]['stats']['in']
up_state = retdata[0]['stats']['up'] up_state = retdata[0]['stats']['up']
return { "id": osd, "in": in_state, "up": up_state }, retcode return {"id": osd, "in": in_state, "up": up_state}, retcode
def ceph_osd_add(node, device, weight): def ceph_osd_add(node, device, weight):
""" """
@ -1068,6 +1116,7 @@ def ceph_osd_add(node, device, weight):
} }
return output, retcode return output, retcode
def ceph_osd_remove(osd_id): def ceph_osd_remove(osd_id):
""" """
Remove a Ceph OSD from the PVC Ceph storage cluster. Remove a Ceph OSD from the PVC Ceph storage cluster.
@ -1086,6 +1135,7 @@ def ceph_osd_remove(osd_id):
} }
return output, retcode return output, retcode
def ceph_osd_in(osd_id): def ceph_osd_in(osd_id):
""" """
Set in a Ceph OSD in the PVC Ceph storage cluster. Set in a Ceph OSD in the PVC Ceph storage cluster.
@ -1104,6 +1154,7 @@ def ceph_osd_in(osd_id):
} }
return output, retcode return output, retcode
def ceph_osd_out(osd_id): def ceph_osd_out(osd_id):
""" """
Set out a Ceph OSD in the PVC Ceph storage cluster. Set out a Ceph OSD in the PVC Ceph storage cluster.
@ -1122,6 +1173,7 @@ def ceph_osd_out(osd_id):
} }
return output, retcode return output, retcode
def ceph_osd_set(option): def ceph_osd_set(option):
""" """
Set options on a Ceph OSD in the PVC Ceph storage cluster. Set options on a Ceph OSD in the PVC Ceph storage cluster.
@ -1140,6 +1192,7 @@ def ceph_osd_set(option):
} }
return output, retcode return output, retcode
def ceph_osd_unset(option): def ceph_osd_unset(option):
""" """
Unset options on a Ceph OSD in the PVC Ceph storage cluster. Unset options on a Ceph OSD in the PVC Ceph storage cluster.
@ -1158,6 +1211,7 @@ def ceph_osd_unset(option):
} }
return output, retcode return output, retcode
def ceph_pool_list(limit=None, is_fuzzy=True): def ceph_pool_list(limit=None, is_fuzzy=True):
""" """
Get the list of RBD pools in the Ceph storage cluster. Get the list of RBD pools in the Ceph storage cluster.
@ -1186,6 +1240,7 @@ def ceph_pool_list(limit=None, is_fuzzy=True):
return retdata, retcode return retdata, retcode
def ceph_pool_add(name, pgs, replcfg): def ceph_pool_add(name, pgs, replcfg):
""" """
Add a Ceph RBD pool to the PVC Ceph storage cluster. Add a Ceph RBD pool to the PVC Ceph storage cluster.
@ -1204,6 +1259,7 @@ def ceph_pool_add(name, pgs, replcfg):
} }
return output, retcode return output, retcode
def ceph_pool_remove(name): def ceph_pool_remove(name):
""" """
Remove a Ceph RBD pool to the PVC Ceph storage cluster. Remove a Ceph RBD pool to the PVC Ceph storage cluster.
@ -1222,6 +1278,7 @@ def ceph_pool_remove(name):
} }
return output, retcode return output, retcode
def ceph_volume_list(pool=None, limit=None, is_fuzzy=True): def ceph_volume_list(pool=None, limit=None, is_fuzzy=True):
""" """
Get the list of RBD volumes in the Ceph storage cluster. Get the list of RBD volumes in the Ceph storage cluster.
@ -1250,6 +1307,7 @@ def ceph_volume_list(pool=None, limit=None, is_fuzzy=True):
return retdata, retcode return retdata, retcode
def ceph_volume_add(pool, name, size): def ceph_volume_add(pool, name, size):
""" """
Add a Ceph RBD volume to the PVC Ceph storage cluster. Add a Ceph RBD volume to the PVC Ceph storage cluster.
@ -1268,6 +1326,7 @@ def ceph_volume_add(pool, name, size):
} }
return output, retcode return output, retcode
def ceph_volume_clone(pool, name, source_volume): def ceph_volume_clone(pool, name, source_volume):
""" """
Clone a Ceph RBD volume to a new volume on the PVC Ceph storage cluster. Clone a Ceph RBD volume to a new volume on the PVC Ceph storage cluster.
@ -1286,6 +1345,7 @@ def ceph_volume_clone(pool, name, source_volume):
} }
return output, retcode return output, retcode
def ceph_volume_resize(pool, name, size): def ceph_volume_resize(pool, name, size):
""" """
Resize an existing Ceph RBD volume in the PVC Ceph storage cluster. Resize an existing Ceph RBD volume in the PVC Ceph storage cluster.
@ -1304,6 +1364,7 @@ def ceph_volume_resize(pool, name, size):
} }
return output, retcode return output, retcode
def ceph_volume_rename(pool, name, new_name): def ceph_volume_rename(pool, name, new_name):
""" """
Rename a Ceph RBD volume in the PVC Ceph storage cluster. Rename a Ceph RBD volume in the PVC Ceph storage cluster.
@ -1322,6 +1383,7 @@ def ceph_volume_rename(pool, name, new_name):
} }
return output, retcode return output, retcode
def ceph_volume_remove(pool, name): def ceph_volume_remove(pool, name):
""" """
Remove a Ceph RBD volume to the PVC Ceph storage cluster. Remove a Ceph RBD volume to the PVC Ceph storage cluster.
@ -1340,6 +1402,7 @@ def ceph_volume_remove(pool, name):
} }
return output, retcode return output, retcode
def ceph_volume_upload(pool, volume, img_type): def ceph_volume_upload(pool, volume, img_type):
""" """
Upload a raw file via HTTP post to a PVC Ceph volume Upload a raw file via HTTP post to a PVC Ceph volume
@ -1392,8 +1455,14 @@ def ceph_volume_upload(pool, volume, img_type):
# Save the data to the blockdev directly # Save the data to the blockdev directly
try: try:
data.save(dest_blockdev) # This sets up a custom stream_factory that writes directly into the ova_blockdev,
except: # rather than the standard stream_factory which writes to a temporary file waiting
# on a save() call. This will break if the API ever uploaded multiple files, but
# this is an acceptable workaround.
def image_stream_factory(total_content_length, filename, content_type, content_length=None):
return open(dest_blockdev, 'wb')
parse_form_data(flask.request.environ, stream_factory=image_stream_factory)
except Exception:
output = { output = {
'message': "Failed to write image file to volume." 'message': "Failed to write image file to volume."
} }
@ -1457,7 +1526,7 @@ def ceph_volume_upload(pool, volume, img_type):
def ova_stream_factory(total_content_length, filename, content_type, content_length=None): def ova_stream_factory(total_content_length, filename, content_type, content_length=None):
return open(temp_blockdev, 'wb') return open(temp_blockdev, 'wb')
parse_form_data(flask.request.environ, stream_factory=ova_stream_factory) parse_form_data(flask.request.environ, stream_factory=ova_stream_factory)
except: except Exception:
output = { output = {
'message': "Failed to upload or write image file to temporary volume." 'message': "Failed to upload or write image file to temporary volume."
} }
@ -1484,6 +1553,7 @@ def ceph_volume_upload(pool, volume, img_type):
cleanup_maps_and_volumes() cleanup_maps_and_volumes()
return output, retcode return output, retcode
def ceph_volume_snapshot_list(pool=None, volume=None, limit=None, is_fuzzy=True): def ceph_volume_snapshot_list(pool=None, volume=None, limit=None, is_fuzzy=True):
""" """
Get the list of RBD volume snapshots in the Ceph storage cluster. Get the list of RBD volume snapshots in the Ceph storage cluster.
@ -1512,6 +1582,7 @@ def ceph_volume_snapshot_list(pool=None, volume=None, limit=None, is_fuzzy=True)
return retdata, retcode return retdata, retcode
def ceph_volume_snapshot_add(pool, volume, name): def ceph_volume_snapshot_add(pool, volume, name):
""" """
Add a Ceph RBD volume snapshot to the PVC Ceph storage cluster. Add a Ceph RBD volume snapshot to the PVC Ceph storage cluster.
@ -1530,6 +1601,7 @@ def ceph_volume_snapshot_add(pool, volume, name):
} }
return output, retcode return output, retcode
def ceph_volume_snapshot_rename(pool, volume, name, new_name): def ceph_volume_snapshot_rename(pool, volume, name, new_name):
""" """
Rename a Ceph RBD volume snapshot in the PVC Ceph storage cluster. Rename a Ceph RBD volume snapshot in the PVC Ceph storage cluster.
@ -1548,6 +1620,7 @@ def ceph_volume_snapshot_rename(pool, volume, name, new_name):
} }
return output, retcode return output, retcode
def ceph_volume_snapshot_remove(pool, volume, name): def ceph_volume_snapshot_remove(pool, volume, name):
""" """
Remove a Ceph RBD volume snapshot from the PVC Ceph storage cluster. Remove a Ceph RBD volume snapshot from the PVC Ceph storage cluster.
@ -1565,4 +1638,3 @@ def ceph_volume_snapshot_remove(pool, volume, name):
'message': retdata.replace('\"', '\'') 'message': retdata.replace('\"', '\'')
} }
return output, retcode return output, retcode

View File

@ -20,7 +20,8 @@
# #
############################################################################### ###############################################################################
from pvcapid.flaskapi import app, db from pvcapid.flaskapi import db
class DBSystemTemplate(db.Model): class DBSystemTemplate(db.Model):
__tablename__ = 'system_template' __tablename__ = 'system_template'
@ -54,6 +55,7 @@ class DBSystemTemplate(db.Model):
def __repr__(self): def __repr__(self):
return '<id {}>'.format(self.id) return '<id {}>'.format(self.id)
class DBNetworkTemplate(db.Model): class DBNetworkTemplate(db.Model):
__tablename__ = 'network_template' __tablename__ = 'network_template'
@ -70,6 +72,7 @@ class DBNetworkTemplate(db.Model):
def __repr__(self): def __repr__(self):
return '<id {}>'.format(self.id) return '<id {}>'.format(self.id)
class DBNetworkElement(db.Model): class DBNetworkElement(db.Model):
__tablename__ = 'network' __tablename__ = 'network'
@ -84,6 +87,7 @@ class DBNetworkElement(db.Model):
def __repr__(self): def __repr__(self):
return '<id {}>'.format(self.id) return '<id {}>'.format(self.id)
class DBStorageTemplate(db.Model): class DBStorageTemplate(db.Model):
__tablename__ = 'storage_template' __tablename__ = 'storage_template'
@ -98,6 +102,7 @@ class DBStorageTemplate(db.Model):
def __repr__(self): def __repr__(self):
return '<id {}>'.format(self.id) return '<id {}>'.format(self.id)
class DBStorageElement(db.Model): class DBStorageElement(db.Model):
__tablename__ = 'storage' __tablename__ = 'storage'
@ -124,6 +129,7 @@ class DBStorageElement(db.Model):
def __repr__(self): def __repr__(self):
return '<id {}>'.format(self.id) return '<id {}>'.format(self.id)
class DBUserdata(db.Model): class DBUserdata(db.Model):
__tablename__ = 'userdata' __tablename__ = 'userdata'
@ -138,6 +144,7 @@ class DBUserdata(db.Model):
def __repr__(self): def __repr__(self):
return '<id {}>'.format(self.id) return '<id {}>'.format(self.id)
class DBScript(db.Model): class DBScript(db.Model):
__tablename__ = 'script' __tablename__ = 'script'
@ -152,6 +159,7 @@ class DBScript(db.Model):
def __repr__(self): def __repr__(self):
return '<id {}>'.format(self.id) return '<id {}>'.format(self.id)
class DBOva(db.Model): class DBOva(db.Model):
__tablename__ = 'ova' __tablename__ = 'ova'
@ -166,6 +174,7 @@ class DBOva(db.Model):
def __repr__(self): def __repr__(self):
return '<id {}>'.format(self.id) return '<id {}>'.format(self.id)
class DBOvaVolume(db.Model): class DBOvaVolume(db.Model):
__tablename__ = 'ova_volume' __tablename__ = 'ova_volume'
@ -188,6 +197,7 @@ class DBOvaVolume(db.Model):
def __repr__(self): def __repr__(self):
return '<id {}>'.format(self.id) return '<id {}>'.format(self.id)
class DBProfile(db.Model): class DBProfile(db.Model):
__tablename__ = 'profile' __tablename__ = 'profile'
@ -216,6 +226,7 @@ class DBProfile(db.Model):
def __repr__(self): def __repr__(self):
return '<id {}>'.format(self.id) return '<id {}>'.format(self.id)
class DBStorageBenchmarks(db.Model): class DBStorageBenchmarks(db.Model):
__tablename__ = 'storage_benchmarks' __tablename__ = 'storage_benchmarks'

View File

@ -21,31 +21,24 @@
############################################################################### ###############################################################################
import flask import flask
import json
import psycopg2 import psycopg2
import psycopg2.extras import psycopg2.extras
import os
import re import re
import time
import math import math
import tarfile import tarfile
import shutil
import shlex
import subprocess
import lxml.etree import lxml.etree
from werkzeug.formparser import parse_form_data from werkzeug.formparser import parse_form_data
import daemon_lib.common as pvc_common import daemon_lib.common as pvc_common
import daemon_lib.node as pvc_node
import daemon_lib.vm as pvc_vm
import daemon_lib.network as pvc_network
import daemon_lib.ceph as pvc_ceph import daemon_lib.ceph as pvc_ceph
import pvcapid.libvirt_schema as libvirt_schema
import pvcapid.provisioner as provisioner import pvcapid.provisioner as provisioner
config = None # Set in this namespace by flaskapi
# #
# Common functions # Common functions
# #
@ -62,12 +55,14 @@ def open_database(config):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
return conn, cur return conn, cur
def close_database(conn, cur, failed=False): def close_database(conn, cur, failed=False):
if not failed: if not failed:
conn.commit() conn.commit()
cur.close() cur.close()
conn.close() conn.close()
# #
# OVA functions # OVA functions
# #
@ -75,11 +70,11 @@ def list_ova(limit, is_fuzzy=True):
if limit: if limit:
if is_fuzzy: if is_fuzzy:
# Handle fuzzy vs. non-fuzzy limits # Handle fuzzy vs. non-fuzzy limits
if not re.match('\^.*', limit): if not re.match('[^].*', limit):
limit = '%' + limit limit = '%' + limit
else: else:
limit = limit[1:] limit = limit[1:]
if not re.match('.*\$', limit): if not re.match('.*[$]', limit):
limit = limit + '%' limit = limit + '%'
else: else:
limit = limit[:-1] limit = limit[:-1]
@ -113,12 +108,13 @@ def list_ova(limit, is_fuzzy=True):
if ova_data: if ova_data:
return ova_data, 200 return ova_data, 200
else: else:
return { 'message': 'No OVAs found.' }, 404 return {'message': 'No OVAs found.'}, 404
def delete_ova(name): def delete_ova(name):
ova_data, retcode = list_ova(name, is_fuzzy=False) ova_data, retcode = list_ova(name, is_fuzzy=False)
if retcode != 200: if retcode != 200:
retmsg = { 'message': 'The OVA "{}" does not exist.'.format(name) } retmsg = {'message': 'The OVA "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -156,14 +152,15 @@ def delete_ova(name):
args = (ova_id,) args = (ova_id,)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Removed OVA image "{}".'.format(name) } retmsg = {"message": 'Removed OVA image "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to remove OVA "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to remove OVA "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def upload_ova(pool, name, ova_size): def upload_ova(pool, name, ova_size):
ova_archive = None ova_archive = None
@ -233,7 +230,7 @@ def upload_ova(pool, name, ova_size):
def ova_stream_factory(total_content_length, filename, content_type, content_length=None): def ova_stream_factory(total_content_length, filename, content_type, content_length=None):
return open(ova_blockdev, 'wb') return open(ova_blockdev, 'wb')
parse_form_data(flask.request.environ, stream_factory=ova_stream_factory) parse_form_data(flask.request.environ, stream_factory=ova_stream_factory)
except: except Exception:
output = { output = {
'message': "Failed to upload or write OVA file to temporary volume." 'message': "Failed to upload or write OVA file to temporary volume."
} }
@ -255,7 +252,7 @@ def upload_ova(pool, name, ova_size):
return output, retcode return output, retcode
# Parse through the members list and extract the OVF file # Parse through the members list and extract the OVF file
for element in set(x for x in members if re.match('.*\.ovf$', x.name)): for element in set(x for x in members if re.match('.*[.]ovf$', x.name)):
ovf_file = ova_archive.extractfile(element) ovf_file = ova_archive.extractfile(element)
# Parse the OVF file to get our VM details # Parse the OVF file to get our VM details
@ -273,12 +270,10 @@ def upload_ova(pool, name, ova_size):
disk_identifier = "sd{}".format(chr(ord('a') + idx)) disk_identifier = "sd{}".format(chr(ord('a') + idx))
volume = "ova_{}_{}".format(name, disk_identifier) volume = "ova_{}_{}".format(name, disk_identifier)
dev_src = disk.get('src') dev_src = disk.get('src')
dev_type = dev_src.split('.')[-1]
dev_size_raw = ova_archive.getmember(dev_src).size dev_size_raw = ova_archive.getmember(dev_src).size
vm_volume_size = disk.get('capacity') vm_volume_size = disk.get('capacity')
# Normalize the dev size to bytes # Normalize the dev size to bytes
dev_size_bytes = int(pvc_ceph.format_bytes_fromhuman(dev_size_raw)[:-1])
dev_size = pvc_ceph.format_bytes_fromhuman(dev_size_raw) dev_size = pvc_ceph.format_bytes_fromhuman(dev_size_raw)
def cleanup_img_maps(): def cleanup_img_maps():
@ -321,15 +316,13 @@ def upload_ova(pool, name, ova_size):
# Open the temporary blockdev and seek to byte 0 # Open the temporary blockdev and seek to byte 0
blk_file = open(temp_blockdev, 'wb') blk_file = open(temp_blockdev, 'wb')
blk_file.seek(0) blk_file.seek(0)
# Write the contents of vmdk_file into blk_file
bytes_written = blk_file.write(vmdk_file.read())
# Close blk_file (and flush the buffers) # Close blk_file (and flush the buffers)
blk_file.close() blk_file.close()
# Close vmdk_file # Close vmdk_file
vmdk_file.close() vmdk_file.close()
# Perform an OS-level sync # Perform an OS-level sync
pvc_common.run_os_command('sync') pvc_common.run_os_command('sync')
except: except Exception:
output = { output = {
'message': "Failed to write image file '{}' to temporary volume.".format(disk.get('src')) 'message': "Failed to write image file '{}' to temporary volume.".format(disk.get('src'))
} }
@ -419,16 +412,17 @@ def upload_ova(pool, name, ova_size):
retcode = 200 retcode = 200
return output, retcode return output, retcode
# #
# OVF parser # OVF parser
# #
class OVFParser(object): class OVFParser(object):
RASD_TYPE = { RASD_TYPE = {
"1": "vmci", "1": "vmci",
"3": "vcpus", "3": "vcpus",
"4": "vram", "4": "vram",
"5": "ide-controller", "5": "ide-controller",
"6": "scsi-controller", "6": "scsi-controller",
"10": "ethernet-adapter", "10": "ethernet-adapter",
"15": "cdrom", "15": "cdrom",
"17": "disk", "17": "disk",
@ -442,7 +436,7 @@ class OVFParser(object):
path = "{{{schema}}}References/{{{schema}}}File".format(schema=self.OVF_SCHEMA) path = "{{{schema}}}References/{{{schema}}}File".format(schema=self.OVF_SCHEMA)
id_attr = "{{{schema}}}id".format(schema=self.OVF_SCHEMA) id_attr = "{{{schema}}}id".format(schema=self.OVF_SCHEMA)
href_attr = "{{{schema}}}href".format(schema=self.OVF_SCHEMA) href_attr = "{{{schema}}}href".format(schema=self.OVF_SCHEMA)
current_list = self.xml.findall(path) current_list = self.xml.findall(path)
results = [(x.get(id_attr), x.get(href_attr)) for x in current_list] results = [(x.get(id_attr), x.get(href_attr)) for x in current_list]
return results return results
@ -452,12 +446,12 @@ class OVFParser(object):
ref_attr = "{{{schema}}}fileRef".format(schema=self.OVF_SCHEMA) ref_attr = "{{{schema}}}fileRef".format(schema=self.OVF_SCHEMA)
cap_attr = "{{{schema}}}capacity".format(schema=self.OVF_SCHEMA) cap_attr = "{{{schema}}}capacity".format(schema=self.OVF_SCHEMA)
cap_units = "{{{schema}}}capacityAllocationUnits".format(schema=self.OVF_SCHEMA) cap_units = "{{{schema}}}capacityAllocationUnits".format(schema=self.OVF_SCHEMA)
current_list = self.xml.findall(path) current_list = self.xml.findall(path)
results = [(x.get(id_attr), x.get(ref_attr), x.get(cap_attr), x.get(cap_units)) for x in current_list] results = [(x.get(id_attr), x.get(ref_attr), x.get(cap_attr), x.get(cap_units)) for x in current_list]
return results return results
def _getAttributes(self, virtual_system, path, attribute): def _getAttributes(self, virtual_system, path, attribute):
current_list = virtual_system.findall(path) current_list = virtual_system.findall(path)
results = [x.get(attribute) for x in current_list] results = [x.get(attribute) for x in current_list]
return results return results
@ -493,7 +487,7 @@ class OVFParser(object):
for item in hardware_list: for item in hardware_list:
try: try:
item_type = self.RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)).text] item_type = self.RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)).text]
except: except Exception:
continue continue
quantity = item.find("{{{rasd}}}VirtualQuantity".format(rasd=self.RASD_SCHEMA)) quantity = item.find("{{{rasd}}}VirtualQuantity".format(rasd=self.RASD_SCHEMA))
if quantity is None: if quantity is None:
@ -514,7 +508,7 @@ class OVFParser(object):
"{{{schema}}}VirtualHardwareSection/{{{schema}}}StorageItem".format(schema=self.OVF_SCHEMA) "{{{schema}}}VirtualHardwareSection/{{{schema}}}StorageItem".format(schema=self.OVF_SCHEMA)
) )
disk_list = [] disk_list = []
for item in hardware_list: for item in hardware_list:
item_type = None item_type = None
@ -543,7 +537,7 @@ class OVFParser(object):
# Handle the unit conversion # Handle the unit conversion
base_unit, action, multiple = disk_capacity_unit.split() base_unit, action, multiple = disk_capacity_unit.split()
multiple_base, multiple_exponent = multiple.split('^') multiple_base, multiple_exponent = multiple.split('^')
disk_capacity = int(disk_capacity) * ( int(multiple_base) ** int(multiple_exponent) ) disk_capacity = int(disk_capacity) * (int(multiple_base) ** int(multiple_exponent))
# Append the disk with all details to the list # Append the disk with all details to the list
disk_list.append({ disk_list.append({

View File

@ -20,15 +20,10 @@
# #
############################################################################### ###############################################################################
import flask
import json import json
import psycopg2 import psycopg2
import psycopg2.extras import psycopg2.extras
import os
import re import re
import time
import shlex
import subprocess
from distutils.util import strtobool as dustrtobool from distutils.util import strtobool as dustrtobool
@ -42,6 +37,9 @@ import pvcapid.libvirt_schema as libvirt_schema
from pvcapid.ova import list_ova from pvcapid.ova import list_ova
config = None # Set in this namespace by flaskapi
def strtobool(stringv): def strtobool(stringv):
if stringv is None: if stringv is None:
return False return False
@ -49,9 +47,10 @@ def strtobool(stringv):
return bool(stringv) return bool(stringv)
try: try:
return bool(dustrtobool(stringv)) return bool(dustrtobool(stringv))
except: except Exception:
return False return False
# #
# Exceptions (used by Celery tasks) # Exceptions (used by Celery tasks)
# #
@ -61,18 +60,21 @@ class ValidationError(Exception):
""" """
pass pass
class ClusterError(Exception): class ClusterError(Exception):
""" """
An exception that results from the PVC cluster being out of alignment with the action. An exception that results from the PVC cluster being out of alignment with the action.
""" """
pass pass
class ProvisioningError(Exception): class ProvisioningError(Exception):
""" """
An exception that results from a failure of a provisioning command. An exception that results from a failure of a provisioning command.
""" """
pass pass
# #
# Common functions # Common functions
# #
@ -89,12 +91,14 @@ def open_database(config):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
return conn, cur return conn, cur
def close_database(conn, cur, failed=False): def close_database(conn, cur, failed=False):
if not failed: if not failed:
conn.commit() conn.commit()
cur.close() cur.close()
conn.close() conn.close()
# #
# Template List functions # Template List functions
# #
@ -102,11 +106,11 @@ def list_template(limit, table, is_fuzzy=True):
if limit: if limit:
if is_fuzzy: if is_fuzzy:
# Handle fuzzy vs. non-fuzzy limits # Handle fuzzy vs. non-fuzzy limits
if not re.match('\^.*', limit): if not re.match('[^].*', limit):
limit = '%' + limit limit = '%' + limit
else: else:
limit = limit[1:] limit = limit[1:]
if not re.match('.*\$', limit): if not re.match('.*[$]', limit):
limit = limit + '%' limit = limit + '%'
else: else:
limit = limit[:-1] limit = limit[:-1]
@ -122,7 +126,7 @@ def list_template(limit, table, is_fuzzy=True):
data = cur.fetchall() data = cur.fetchall()
if not isinstance(data, list): if not isinstance(data, list):
data = [ data ] data = [data]
if table == 'network_template': if table == 'network_template':
for template_id, template_data in enumerate(data): for template_id, template_data in enumerate(data):
@ -146,6 +150,7 @@ def list_template(limit, table, is_fuzzy=True):
return data return data
def list_template_system(limit, is_fuzzy=True): def list_template_system(limit, is_fuzzy=True):
""" """
Obtain a list of system templates. Obtain a list of system templates.
@ -154,7 +159,8 @@ def list_template_system(limit, is_fuzzy=True):
if data: if data:
return data, 200 return data, 200
else: else:
return { 'message': 'No system templates found.' }, 404 return {'message': 'No system templates found.'}, 404
def list_template_network(limit, is_fuzzy=True): def list_template_network(limit, is_fuzzy=True):
""" """
@ -164,7 +170,8 @@ def list_template_network(limit, is_fuzzy=True):
if data: if data:
return data, 200 return data, 200
else: else:
return { 'message': 'No network templates found.' }, 404 return {'message': 'No network templates found.'}, 404
def list_template_network_vnis(name): def list_template_network_vnis(name):
""" """
@ -175,7 +182,8 @@ def list_template_network_vnis(name):
if networks: if networks:
return networks, 200 return networks, 200
else: else:
return { 'message': 'No network template networks found.' }, 404 return {'message': 'No network template networks found.'}, 404
def list_template_storage(limit, is_fuzzy=True): def list_template_storage(limit, is_fuzzy=True):
""" """
@ -185,7 +193,8 @@ def list_template_storage(limit, is_fuzzy=True):
if data: if data:
return data, 200 return data, 200
else: else:
return { 'message': 'No storage templates found.' }, 404 return {'message': 'No storage templates found.'}, 404
def list_template_storage_disks(name): def list_template_storage_disks(name):
""" """
@ -196,7 +205,8 @@ def list_template_storage_disks(name):
if disks: if disks:
return disks, 200 return disks, 200
else: else:
return { 'message': 'No storage template disks found.' }, 404 return {'message': 'No storage template disks found.'}, 404
def template_list(limit): def template_list(limit):
system_templates, code = list_template_system(limit) system_templates, code = list_template_system(limit)
@ -209,14 +219,15 @@ def template_list(limit):
if code != 200: if code != 200:
storage_templates = [] storage_templates = []
return { "system_templates": system_templates, "network_templates": network_templates, "storage_templates": storage_templates } return {"system_templates": system_templates, "network_templates": network_templates, "storage_templates": storage_templates}
# #
# Template Create functions # Template Create functions
# #
def create_template_system(name, vcpu_count, vram_mb, serial=False, vnc=False, vnc_bind=None, node_limit=None, node_selector=None, node_autostart=False, migration_method=None, ova=None): def create_template_system(name, vcpu_count, vram_mb, serial=False, vnc=False, vnc_bind=None, node_limit=None, node_selector=None, node_autostart=False, migration_method=None, ova=None):
if list_template_system(name, is_fuzzy=False)[-1] != 404: if list_template_system(name, is_fuzzy=False)[-1] != 404:
retmsg = { 'message': 'The system template "{}" already exists.'.format(name) } retmsg = {'message': 'The system template "{}" already exists.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -226,17 +237,18 @@ def create_template_system(name, vcpu_count, vram_mb, serial=False, vnc=False, v
conn, cur = open_database(config) conn, cur = open_database(config)
try: try:
cur.execute(query, args) cur.execute(query, args)
retmsg = { 'message': 'Added new system template "{}".'.format(name) } retmsg = {'message': 'Added new system template "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to create system template "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to create system template "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def create_template_network(name, mac_template=None): def create_template_network(name, mac_template=None):
if list_template_network(name, is_fuzzy=False)[-1] != 404: if list_template_network(name, is_fuzzy=False)[-1] != 404:
retmsg = { 'message': 'The network template "{}" already exists.'.format(name) } retmsg = {'message': 'The network template "{}" already exists.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -245,17 +257,18 @@ def create_template_network(name, mac_template=None):
query = "INSERT INTO network_template (name, mac_template) VALUES (%s, %s);" query = "INSERT INTO network_template (name, mac_template) VALUES (%s, %s);"
args = (name, mac_template) args = (name, mac_template)
cur.execute(query, args) cur.execute(query, args)
retmsg = { 'message': 'Added new network template "{}".'.format(name) } retmsg = {'message': 'Added new network template "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to create network template "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to create network template "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def create_template_network_element(name, vni): def create_template_network_element(name, vni):
if list_template_network(name, is_fuzzy=False)[-1] != 200: if list_template_network(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The network template "{}" does not exist.'.format(name) } retmsg = {'message': 'The network template "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -267,7 +280,7 @@ def create_template_network_element(name, vni):
if int(network['vni']) == int(vni): if int(network['vni']) == int(vni):
found_vni = True found_vni = True
if found_vni: if found_vni:
retmsg = { 'message': 'The VNI "{}" in network template "{}" already exists.'.format(vni, name) } retmsg = {'message': 'The VNI "{}" in network template "{}" already exists.'.format(vni, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -280,17 +293,18 @@ def create_template_network_element(name, vni):
query = "INSERT INTO network (network_template, vni) VALUES (%s, %s);" query = "INSERT INTO network (network_template, vni) VALUES (%s, %s);"
args = (template_id, vni) args = (template_id, vni)
cur.execute(query, args) cur.execute(query, args)
retmsg = { 'message': 'Added new network "{}" to network template "{}".'.format(vni, name) } retmsg = {'message': 'Added new network "{}" to network template "{}".'.format(vni, name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to create entry "{}": {}'.format(vni, e) } retmsg = {'message': 'Failed to create entry "{}": {}'.format(vni, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def create_template_storage(name): def create_template_storage(name):
if list_template_storage(name, is_fuzzy=False)[-1] != 404: if list_template_storage(name, is_fuzzy=False)[-1] != 404:
retmsg = { 'message': 'The storage template "{}" already exists.'.format(name) } retmsg = {'message': 'The storage template "{}" already exists.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -299,17 +313,18 @@ def create_template_storage(name):
query = "INSERT INTO storage_template (name) VALUES (%s);" query = "INSERT INTO storage_template (name) VALUES (%s);"
args = (name,) args = (name,)
cur.execute(query, args) cur.execute(query, args)
retmsg = { 'message': 'Added new storage template "{}".'.format(name) } retmsg = {'message': 'Added new storage template "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to create entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to create entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def create_template_storage_element(name, disk_id, pool, source_volume=None, disk_size_gb=None, filesystem=None, filesystem_args=[], mountpoint=None): def create_template_storage_element(name, disk_id, pool, source_volume=None, disk_size_gb=None, filesystem=None, filesystem_args=[], mountpoint=None):
if list_template_storage(name, is_fuzzy=False)[-1] != 200: if list_template_storage(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The storage template "{}" does not exist.'.format(name) } retmsg = {'message': 'The storage template "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -321,17 +336,17 @@ def create_template_storage_element(name, disk_id, pool, source_volume=None, dis
if disk['disk_id'] == disk_id: if disk['disk_id'] == disk_id:
found_disk = True found_disk = True
if found_disk: if found_disk:
retmsg = { 'message': 'The disk "{}" in storage template "{}" already exists.'.format(disk_id, name) } retmsg = {'message': 'The disk "{}" in storage template "{}" already exists.'.format(disk_id, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
if mountpoint and not filesystem: if mountpoint and not filesystem:
retmsg = { "message": "A filesystem must be specified along with a mountpoint." } retmsg = {"message": "A filesystem must be specified along with a mountpoint."}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
if source_volume and (disk_size_gb or filesystem or mountpoint): if source_volume and (disk_size_gb or filesystem or mountpoint):
retmsg = { "message": "Clone volumes are not compatible with disk size, filesystem, or mountpoint specifications." } retmsg = {"message": "Clone volumes are not compatible with disk size, filesystem, or mountpoint specifications."}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -348,20 +363,21 @@ def create_template_storage_element(name, disk_id, pool, source_volume=None, dis
fsargs = '' fsargs = ''
args = (template_id, pool, disk_id, source_volume, disk_size_gb, mountpoint, filesystem, fsargs) args = (template_id, pool, disk_id, source_volume, disk_size_gb, mountpoint, filesystem, fsargs)
cur.execute(query, args) cur.execute(query, args)
retmsg = { 'message': 'Added new disk "{}" to storage template "{}".'.format(disk_id, name) } retmsg = {'message': 'Added new disk "{}" to storage template "{}".'.format(disk_id, name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to create entry "{}": {}'.format(disk_id, e) } retmsg = {'message': 'Failed to create entry "{}": {}'.format(disk_id, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
# #
# Template Modify functions # Template Modify functions
# #
def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc=None, vnc_bind=None, node_limit=None, node_selector=None, node_autostart=None, migration_method=None): def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc=None, vnc_bind=None, node_limit=None, node_selector=None, node_autostart=None, migration_method=None):
if list_template_system(name, is_fuzzy=False)[-1] != 200: if list_template_system(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The system template "{}" does not exist.'.format(name) } retmsg = {'message': 'The system template "{}" does not exist.'.format(name)}
retcode = 404 retcode = 404
return retmsg, retcode return retmsg, retcode
@ -370,8 +386,8 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
if vcpu_count is not None: if vcpu_count is not None:
try: try:
vcpu_count = int(vcpu_count) vcpu_count = int(vcpu_count)
except: except Exception:
retmsg = { 'message': 'The vcpus value must be an integer.' } retmsg = {'message': 'The vcpus value must be an integer.'}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'vcpu_count', 'data': vcpu_count}) fields.append({'field': 'vcpu_count', 'data': vcpu_count})
@ -379,8 +395,8 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
if vram_mb is not None: if vram_mb is not None:
try: try:
vram_mb = int(vram_mb) vram_mb = int(vram_mb)
except: except Exception:
retmsg = { 'message': 'The vram value must be an integer.' } retmsg = {'message': 'The vram value must be an integer.'}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'vram_mb', 'data': vram_mb}) fields.append({'field': 'vram_mb', 'data': vram_mb})
@ -388,8 +404,8 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
if serial is not None: if serial is not None:
try: try:
serial = bool(strtobool(serial)) serial = bool(strtobool(serial))
except: except Exception:
retmsg = { 'message': 'The serial value must be a boolean.' } retmsg = {'message': 'The serial value must be a boolean.'}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'serial', 'data': serial}) fields.append({'field': 'serial', 'data': serial})
@ -397,8 +413,8 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
if vnc is not None: if vnc is not None:
try: try:
vnc = bool(strtobool(vnc)) vnc = bool(strtobool(vnc))
except: except Exception:
retmsg = { 'message': 'The vnc value must be a boolean.' } retmsg = {'message': 'The vnc value must be a boolean.'}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'vnc', 'data': vnc}) fields.append({'field': 'vnc', 'data': vnc})
@ -415,8 +431,8 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
if node_autostart is not None: if node_autostart is not None:
try: try:
node_autostart = bool(strtobool(node_autostart)) node_autostart = bool(strtobool(node_autostart))
except: except Exception:
retmsg = { 'message': 'The node_autostart value must be a boolean.' } retmsg = {'message': 'The node_autostart value must be a boolean.'}
retcode = 400 retcode = 400
fields.append({'field': 'node_autostart', 'data': node_autostart}) fields.append({'field': 'node_autostart', 'data': node_autostart})
@ -429,20 +445,21 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
query = "UPDATE system_template SET {} = %s WHERE name = %s;".format(field.get('field')) query = "UPDATE system_template SET {} = %s WHERE name = %s;".format(field.get('field'))
args = (field.get('data'), name) args = (field.get('data'), name)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Modified system template "{}".'.format(name) } retmsg = {"message": 'Modified system template "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to modify entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to modify entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
# #
# Template Delete functions # Template Delete functions
# #
def delete_template_system(name): def delete_template_system(name):
if list_template_system(name, is_fuzzy=False)[-1] != 200: if list_template_system(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The system template "{}" does not exist.'.format(name) } retmsg = {'message': 'The system template "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -451,17 +468,18 @@ def delete_template_system(name):
query = "DELETE FROM system_template WHERE name = %s;" query = "DELETE FROM system_template WHERE name = %s;"
args = (name,) args = (name,)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Removed system template "{}".'.format(name) } retmsg = {"message": 'Removed system template "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to delete entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to delete entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def delete_template_network(name): def delete_template_network(name):
if list_template_network(name, is_fuzzy=False)[-1] != 200: if list_template_network(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The network template "{}" does not exist.'.format(name) } retmsg = {'message': 'The network template "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -477,17 +495,18 @@ def delete_template_network(name):
query = "DELETE FROM network_template WHERE name = %s;" query = "DELETE FROM network_template WHERE name = %s;"
args = (name,) args = (name,)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Removed network template "{}".'.format(name) } retmsg = {"message": 'Removed network template "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to delete entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to delete entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def delete_template_network_element(name, vni): def delete_template_network_element(name, vni):
if list_template_network(name, is_fuzzy=False)[-1] != 200: if list_template_network(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The network template "{}" does not exist.'.format(name) } retmsg = {'message': 'The network template "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -497,7 +516,7 @@ def delete_template_network_element(name, vni):
if network['vni'] == int(vni): if network['vni'] == int(vni):
found_vni = True found_vni = True
if not found_vni: if not found_vni:
retmsg = { 'message': 'The VNI "{}" in network template "{}" does not exist.'.format(vni, name) } retmsg = {'message': 'The VNI "{}" in network template "{}" does not exist.'.format(vni, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -510,17 +529,18 @@ def delete_template_network_element(name, vni):
query = "DELETE FROM network WHERE network_template = %s and vni = %s;" query = "DELETE FROM network WHERE network_template = %s and vni = %s;"
args = (template_id, vni) args = (template_id, vni)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Removed network "{}" from network template "{}".'.format(vni, name) } retmsg = {"message": 'Removed network "{}" from network template "{}".'.format(vni, name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to delete entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to delete entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def delete_template_storage(name): def delete_template_storage(name):
if list_template_storage(name, is_fuzzy=False)[-1] != 200: if list_template_storage(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The storage template "{}" does not exist.'.format(name) } retmsg = {'message': 'The storage template "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -536,17 +556,18 @@ def delete_template_storage(name):
query = "DELETE FROM storage_template WHERE name = %s;" query = "DELETE FROM storage_template WHERE name = %s;"
args = (name,) args = (name,)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Removed storage template "{}".'.format(name) } retmsg = {"message": 'Removed storage template "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to delete entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to delete entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def delete_template_storage_element(name, disk_id): def delete_template_storage_element(name, disk_id):
if list_template_storage(name, is_fuzzy=False)[-1] != 200: if list_template_storage(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The storage template "{}" does not exist.'.format(name) } retmsg = {'message': 'The storage template "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -556,7 +577,7 @@ def delete_template_storage_element(name, disk_id):
if disk['disk_id'] == disk_id: if disk['disk_id'] == disk_id:
found_disk = True found_disk = True
if not found_disk: if not found_disk:
retmsg = { 'message': 'The disk "{}" in storage template "{}" does not exist.'.format(disk_id, name) } retmsg = {'message': 'The disk "{}" in storage template "{}" does not exist.'.format(disk_id, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -569,14 +590,15 @@ def delete_template_storage_element(name, disk_id):
query = "DELETE FROM storage WHERE storage_template = %s and disk_id = %s;" query = "DELETE FROM storage WHERE storage_template = %s and disk_id = %s;"
args = (template_id, disk_id) args = (template_id, disk_id)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Removed disk "{}" from storage template "{}".'.format(disk_id, name) } retmsg = {"message": 'Removed disk "{}" from storage template "{}".'.format(disk_id, name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to delete entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to delete entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
# #
# Userdata functions # Userdata functions
# #
@ -584,11 +606,11 @@ def list_userdata(limit, is_fuzzy=True):
if limit: if limit:
if is_fuzzy: if is_fuzzy:
# Handle fuzzy vs. non-fuzzy limits # Handle fuzzy vs. non-fuzzy limits
if not re.match('\^.*', limit): if not re.match('[^].*', limit):
limit = '%' + limit limit = '%' + limit
else: else:
limit = limit[1:] limit = limit[1:]
if not re.match('.*\$', limit): if not re.match('.*[$]', limit):
limit = limit + '%' limit = limit + '%'
else: else:
limit = limit[:-1] limit = limit[:-1]
@ -606,11 +628,12 @@ def list_userdata(limit, is_fuzzy=True):
if data: if data:
return data, 200 return data, 200
else: else:
return { 'message': 'No userdata documents found.' }, 404 return {'message': 'No userdata documents found.'}, 404
def create_userdata(name, userdata): def create_userdata(name, userdata):
if list_userdata(name, is_fuzzy=False)[-1] != 404: if list_userdata(name, is_fuzzy=False)[-1] != 404:
retmsg = { 'message': 'The userdata document "{}" already exists.'.format(name) } retmsg = {'message': 'The userdata document "{}" already exists.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -619,17 +642,18 @@ def create_userdata(name, userdata):
query = "INSERT INTO userdata (name, userdata) VALUES (%s, %s);" query = "INSERT INTO userdata (name, userdata) VALUES (%s, %s);"
args = (name, userdata) args = (name, userdata)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Created userdata document "{}".'.format(name) } retmsg = {"message": 'Created userdata document "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to create entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to create entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def update_userdata(name, userdata): def update_userdata(name, userdata):
if list_userdata(name, is_fuzzy=False)[-1] != 200: if list_userdata(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The userdata "{}" does not exist.'.format(name) } retmsg = {'message': 'The userdata "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -641,17 +665,18 @@ def update_userdata(name, userdata):
query = "UPDATE userdata SET userdata = %s WHERE id = %s;" query = "UPDATE userdata SET userdata = %s WHERE id = %s;"
args = (userdata, tid) args = (userdata, tid)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Updated userdata document "{}".'.format(name) } retmsg = {"message": 'Updated userdata document "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to update entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to update entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def delete_userdata(name): def delete_userdata(name):
if list_userdata(name, is_fuzzy=False)[-1] != 200: if list_userdata(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The userdata "{}" does not exist.'.format(name) } retmsg = {'message': 'The userdata "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -660,14 +685,15 @@ def delete_userdata(name):
query = "DELETE FROM userdata WHERE name = %s;" query = "DELETE FROM userdata WHERE name = %s;"
args = (name,) args = (name,)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Removed userdata document "{}".'.format(name) } retmsg = {"message": 'Removed userdata document "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to delete entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to delete entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
# #
# Script functions # Script functions
# #
@ -675,11 +701,11 @@ def list_script(limit, is_fuzzy=True):
if limit: if limit:
if is_fuzzy: if is_fuzzy:
# Handle fuzzy vs. non-fuzzy limits # Handle fuzzy vs. non-fuzzy limits
if not re.match('\^.*', limit): if not re.match('[^].*', limit):
limit = '%' + limit limit = '%' + limit
else: else:
limit = limit[1:] limit = limit[1:]
if not re.match('.*\$', limit): if not re.match('.*[$]', limit):
limit = limit + '%' limit = limit + '%'
else: else:
limit = limit[:-1] limit = limit[:-1]
@ -697,11 +723,12 @@ def list_script(limit, is_fuzzy=True):
if data: if data:
return data, 200 return data, 200
else: else:
return { 'message': 'No scripts found.' }, 404 return {'message': 'No scripts found.'}, 404
def create_script(name, script): def create_script(name, script):
if list_script(name, is_fuzzy=False)[-1] != 404: if list_script(name, is_fuzzy=False)[-1] != 404:
retmsg = { 'message': 'The script "{}" already exists.'.format(name) } retmsg = {'message': 'The script "{}" already exists.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -710,17 +737,18 @@ def create_script(name, script):
query = "INSERT INTO script (name, script) VALUES (%s, %s);" query = "INSERT INTO script (name, script) VALUES (%s, %s);"
args = (name, script) args = (name, script)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Created provisioning script "{}".'.format(name) } retmsg = {"message": 'Created provisioning script "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to create entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to create entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def update_script(name, script): def update_script(name, script):
if list_script(name, is_fuzzy=False)[-1] != 200: if list_script(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The script "{}" does not exist.'.format(name) } retmsg = {'message': 'The script "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -732,17 +760,18 @@ def update_script(name, script):
query = "UPDATE script SET script = %s WHERE id = %s;" query = "UPDATE script SET script = %s WHERE id = %s;"
args = (script, tid) args = (script, tid)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Updated provisioning script "{}".'.format(name) } retmsg = {"message": 'Updated provisioning script "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to update entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to update entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def delete_script(name): def delete_script(name):
if list_script(name, is_fuzzy=False)[-1] != 200: if list_script(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The script "{}" does not exist.'.format(name) } retmsg = {'message': 'The script "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -751,14 +780,15 @@ def delete_script(name):
query = "DELETE FROM script WHERE name = %s;" query = "DELETE FROM script WHERE name = %s;"
args = (name,) args = (name,)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Removed provisioning script "{}".'.format(name) } retmsg = {"message": 'Removed provisioning script "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to delete entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to delete entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
# #
# Profile functions # Profile functions
# #
@ -766,11 +796,11 @@ def list_profile(limit, is_fuzzy=True):
if limit: if limit:
if is_fuzzy: if is_fuzzy:
# Handle fuzzy vs. non-fuzzy limits # Handle fuzzy vs. non-fuzzy limits
if not re.match('\^.*', limit): if not re.match('[^].*', limit):
limit = '%' + limit limit = '%' + limit
else: else:
limit = limit[1:] limit = limit[1:]
if not re.match('.*\$', limit): if not re.match('.*[$]', limit):
limit = limit + '%' limit = limit + '%'
else: else:
limit = limit[:-1] limit = limit[:-1]
@ -797,7 +827,7 @@ def list_profile(limit, is_fuzzy=True):
cur.execute(query, args) cur.execute(query, args)
try: try:
name = cur.fetchone()['name'] name = cur.fetchone()['name']
except Exception as e: except Exception:
name = "N/A" name = "N/A"
profile_data[etype] = name profile_data[etype] = name
# Split the arguments back into a list # Split the arguments back into a list
@ -808,16 +838,17 @@ def list_profile(limit, is_fuzzy=True):
if data: if data:
return data, 200 return data, 200
else: else:
return { 'message': 'No profiles found.' }, 404 return {'message': 'No profiles found.'}, 404
def create_profile(name, profile_type, system_template, network_template, storage_template, userdata=None, script=None, ova=None, arguments=None): def create_profile(name, profile_type, system_template, network_template, storage_template, userdata=None, script=None, ova=None, arguments=None):
if list_profile(name, is_fuzzy=False)[-1] != 404: if list_profile(name, is_fuzzy=False)[-1] != 404:
retmsg = { 'message': 'The profile "{}" already exists.'.format(name) } retmsg = {'message': 'The profile "{}" already exists.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
if profile_type not in ['provisioner', 'ova']: if profile_type not in ['provisioner', 'ova']:
retmsg = { 'message': 'A valid profile type (provisioner, ova) must be specified.' } retmsg = {'message': 'A valid profile type (provisioner, ova) must be specified.'}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -829,7 +860,7 @@ def create_profile(name, profile_type, system_template, network_template, storag
if template['name'] == system_template: if template['name'] == system_template:
system_template_id = template['id'] system_template_id = template['id']
if not system_template_id: if not system_template_id:
retmsg = { 'message': 'The system template "{}" for profile "{}" does not exist.'.format(system_template, name) } retmsg = {'message': 'The system template "{}" for profile "{}" does not exist.'.format(system_template, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -841,7 +872,7 @@ def create_profile(name, profile_type, system_template, network_template, storag
if template['name'] == network_template: if template['name'] == network_template:
network_template_id = template['id'] network_template_id = template['id']
if not network_template_id and profile_type != 'ova': if not network_template_id and profile_type != 'ova':
retmsg = { 'message': 'The network template "{}" for profile "{}" does not exist.'.format(network_template, name) } retmsg = {'message': 'The network template "{}" for profile "{}" does not exist.'.format(network_template, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -853,7 +884,7 @@ def create_profile(name, profile_type, system_template, network_template, storag
if template['name'] == storage_template: if template['name'] == storage_template:
storage_template_id = template['id'] storage_template_id = template['id']
if not storage_template_id and profile_type != 'ova': if not storage_template_id and profile_type != 'ova':
retmsg = { 'message': 'The storage template "{}" for profile "{}" does not exist.'.format(storage_template, name) } retmsg = {'message': 'The storage template "{}" for profile "{}" does not exist.'.format(storage_template, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -891,17 +922,18 @@ def create_profile(name, profile_type, system_template, network_template, storag
query = "INSERT INTO profile (name, profile_type, system_template, network_template, storage_template, userdata, script, ova, arguments) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);" query = "INSERT INTO profile (name, profile_type, system_template, network_template, storage_template, userdata, script, ova, arguments) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);"
args = (name, profile_type, system_template_id, network_template_id, storage_template_id, userdata_id, script_id, ova_id, arguments_formatted) args = (name, profile_type, system_template_id, network_template_id, storage_template_id, userdata_id, script_id, ova_id, arguments_formatted)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Created VM profile "{}".'.format(name) } retmsg = {"message": 'Created VM profile "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to create entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to create entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def modify_profile(name, profile_type, system_template, network_template, storage_template, userdata, script, ova, arguments=None): def modify_profile(name, profile_type, system_template, network_template, storage_template, userdata, script, ova, arguments=None):
if list_profile(name, is_fuzzy=False)[-1] != 200: if list_profile(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The profile "{}" does not exist.'.format(name) } retmsg = {'message': 'The profile "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -909,7 +941,7 @@ def modify_profile(name, profile_type, system_template, network_template, storag
if profile_type is not None: if profile_type is not None:
if profile_type not in ['provisioner', 'ova']: if profile_type not in ['provisioner', 'ova']:
retmsg = { 'message': 'A valid profile type (provisioner, ova) must be specified.' } retmsg = {'message': 'A valid profile type (provisioner, ova) must be specified.'}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'type', 'data': profile_type}) fields.append({'field': 'type', 'data': profile_type})
@ -921,7 +953,7 @@ def modify_profile(name, profile_type, system_template, network_template, storag
if template['name'] == system_template: if template['name'] == system_template:
system_template_id = template['id'] system_template_id = template['id']
if not system_template_id: if not system_template_id:
retmsg = { 'message': 'The system template "{}" for profile "{}" does not exist.'.format(system_template, name) } retmsg = {'message': 'The system template "{}" for profile "{}" does not exist.'.format(system_template, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'system_template', 'data': system_template_id}) fields.append({'field': 'system_template', 'data': system_template_id})
@ -933,7 +965,7 @@ def modify_profile(name, profile_type, system_template, network_template, storag
if template['name'] == network_template: if template['name'] == network_template:
network_template_id = template['id'] network_template_id = template['id']
if not network_template_id: if not network_template_id:
retmsg = { 'message': 'The network template "{}" for profile "{}" does not exist.'.format(network_template, name) } retmsg = {'message': 'The network template "{}" for profile "{}" does not exist.'.format(network_template, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'network_template', 'data': network_template_id}) fields.append({'field': 'network_template', 'data': network_template_id})
@ -945,7 +977,7 @@ def modify_profile(name, profile_type, system_template, network_template, storag
if template['name'] == storage_template: if template['name'] == storage_template:
storage_template_id = template['id'] storage_template_id = template['id']
if not storage_template_id: if not storage_template_id:
retmsg = { 'message': 'The storage template "{}" for profile "{}" does not exist.'.format(storage_template, name) } retmsg = {'message': 'The storage template "{}" for profile "{}" does not exist.'.format(storage_template, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'storage_template', 'data': storage_template_id}) fields.append({'field': 'storage_template', 'data': storage_template_id})
@ -957,7 +989,7 @@ def modify_profile(name, profile_type, system_template, network_template, storag
if template['name'] == userdata: if template['name'] == userdata:
userdata_id = template['id'] userdata_id = template['id']
if not userdata_id: if not userdata_id:
retmsg = { 'message': 'The userdata template "{}" for profile "{}" does not exist.'.format(userdata, name) } retmsg = {'message': 'The userdata template "{}" for profile "{}" does not exist.'.format(userdata, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'userdata', 'data': userdata_id}) fields.append({'field': 'userdata', 'data': userdata_id})
@ -969,7 +1001,7 @@ def modify_profile(name, profile_type, system_template, network_template, storag
if scr['name'] == script: if scr['name'] == script:
script_id = scr['id'] script_id = scr['id']
if not script_id: if not script_id:
retmsg = { 'message': 'The script "{}" for profile "{}" does not exist.'.format(script, name) } retmsg = {'message': 'The script "{}" for profile "{}" does not exist.'.format(script, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'script', 'data': script_id}) fields.append({'field': 'script', 'data': script_id})
@ -981,7 +1013,7 @@ def modify_profile(name, profile_type, system_template, network_template, storag
if ov['name'] == ova: if ov['name'] == ova:
ova_id = ov['id'] ova_id = ov['id']
if not ova_id: if not ova_id:
retmsg = { 'message': 'The OVA "{}" for profile "{}" does not exist.'.format(ova, name) } retmsg = {'message': 'The OVA "{}" for profile "{}" does not exist.'.format(ova, name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'ova', 'data': ova_id}) fields.append({'field': 'ova', 'data': ova_id})
@ -999,17 +1031,18 @@ def modify_profile(name, profile_type, system_template, network_template, storag
query = "UPDATE profile SET {}=%s WHERE name=%s;".format(field.get('field')) query = "UPDATE profile SET {}=%s WHERE name=%s;".format(field.get('field'))
args = (field.get('data'), name) args = (field.get('data'), name)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Modified VM profile "{}".'.format(name) } retmsg = {"message": 'Modified VM profile "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to modify entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to modify entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
def delete_profile(name): def delete_profile(name):
if list_profile(name, is_fuzzy=False)[-1] != 200: if list_profile(name, is_fuzzy=False)[-1] != 200:
retmsg = { 'message': 'The profile "{}" does not exist.'.format(name) } retmsg = {'message': 'The profile "{}" does not exist.'.format(name)}
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -1018,14 +1051,15 @@ def delete_profile(name):
query = "DELETE FROM profile WHERE name = %s;" query = "DELETE FROM profile WHERE name = %s;"
args = (name,) args = (name,)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Removed VM profile "{}".'.format(name) } retmsg = {"message": 'Removed VM profile "{}".'.format(name)}
retcode = 200 retcode = 200
except Exception as e: except Exception as e:
retmsg = { 'message': 'Failed to delete entry "{}": {}'.format(name, e) } retmsg = {'message': 'Failed to delete entry "{}": {}'.format(name, e)}
retcode = 400 retcode = 400
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
# #
# Main VM provisioning function - executed by the Celery worker # Main VM provisioning function - executed by the Celery worker
# #
@ -1044,13 +1078,13 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
# Phase 0 - connect to databases # Phase 0 - connect to databases
try: try:
db_conn, db_cur = open_database(config) db_conn, db_cur = open_database(config)
except: except Exception:
print('FATAL - failed to connect to Postgres') print('FATAL - failed to connect to Postgres')
raise Exception raise Exception
try: try:
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
except: except Exception:
print('FATAL - failed to connect to Zookeeper') print('FATAL - failed to connect to Zookeeper')
raise Exception raise Exception
@ -1060,13 +1094,13 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
# * Assemble a VM configuration dictionary # * Assemble a VM configuration dictionary
self.update_state(state='RUNNING', meta={'current': 1, 'total': 10, 'status': 'Collecting configuration'}) self.update_state(state='RUNNING', meta={'current': 1, 'total': 10, 'status': 'Collecting configuration'})
time.sleep(1) time.sleep(1)
vm_id = re.findall(r'/(\d+)$/', vm_name) vm_id = re.findall(r'/(\d+)$/', vm_name)
if not vm_id: if not vm_id:
vm_id = 0 vm_id = 0
else: else:
vm_id = vm_id[0] vm_id = vm_id[0]
vm_data = dict() vm_data = dict()
# Get the profile information # Get the profile information
@ -1078,10 +1112,10 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
vm_data['script_arguments'] = profile_data.get('arguments').split('|') vm_data['script_arguments'] = profile_data.get('arguments').split('|')
else: else:
vm_data['script_arguments'] = [] vm_data['script_arguments'] = []
if profile_data.get('profile_type') == 'ova': if profile_data.get('profile_type') == 'ova':
is_ova_install = True is_ova_install = True
is_script_install = False # By definition is_script_install = False # By definition
else: else:
is_ova_install = False is_ova_install = False
@ -1163,7 +1197,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
last_free = 0 last_free = 0
for node in nodes: for node in nodes:
# Skip the node if it is not ready to run VMs # Skip the node if it is not ready to run VMs
if node ['daemon_state'] != "run" or node['domain_state'] != "ready": if node['daemon_state'] != "run" or node['domain_state'] != "ready":
continue continue
# Skip the node if its free memory is less than the new VM's size, plus a 512MB buffer # Skip the node if its free memory is less than the new VM's size, plus a 512MB buffer
if node['memory']['free'] < (vm_data['system_details']['vram_mb'] + 512): if node['memory']['free'] < (vm_data['system_details']['vram_mb'] + 512):
@ -1182,7 +1216,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
cluster_networks, _discard = pvc_network.getClusterNetworkList(zk_conn) cluster_networks, _discard = pvc_network.getClusterNetworkList(zk_conn)
for network in vm_data['networks']: for network in vm_data['networks']:
vni = str(network['vni']) vni = str(network['vni'])
if not vni in cluster_networks: if vni not in cluster_networks:
raise ClusterError('The network VNI "{}" is not present on the cluster.'.format(vni)) raise ClusterError('The network VNI "{}" is not present on the cluster.'.format(vni))
print("All configured networks for VM are valid") print("All configured networks for VM are valid")
@ -1212,7 +1246,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
pool_information = pvc_ceph.getPoolInformation(zk_conn, pool) pool_information = pvc_ceph.getPoolInformation(zk_conn, pool)
if not pool_information: if not pool_information:
raise raise
except: except Exception:
raise ClusterError('Pool "{}" is not present on the cluster.'.format(pool)) raise ClusterError('Pool "{}" is not present on the cluster.'.format(pool))
pool_free_space_gb = int(pool_information['stats']['free_bytes'] / 1024 / 1024 / 1024) pool_free_space_gb = int(pool_information['stats']['free_bytes'] / 1024 / 1024 / 1024)
pool_vm_usage_gb = int(pools[pool]) pool_vm_usage_gb = int(pools[pool])
@ -1230,7 +1264,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
continue continue
if volume['filesystem'] and volume['filesystem'] not in used_filesystems: if volume['filesystem'] and volume['filesystem'] not in used_filesystems:
used_filesystems.append(volume['filesystem']) used_filesystems.append(volume['filesystem'])
for filesystem in used_filesystems: for filesystem in used_filesystems:
if filesystem == 'swap': if filesystem == 'swap':
retcode, stdout, stderr = pvc_common.run_os_command("which mkswap") retcode, stdout, stderr = pvc_common.run_os_command("which mkswap")
@ -1266,7 +1300,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
loader.exec_module(installer_script) loader.exec_module(installer_script)
# Verify that the install() function is valid # Verify that the install() function is valid
if not "install" in dir(installer_script): if "install" not in dir(installer_script):
raise ProvisioningError("Specified script does not contain an install() function.") raise ProvisioningError("Specified script does not contain an install() function.")
print("Provisioning script imported successfully") print("Provisioning script imported successfully")
@ -1316,9 +1350,9 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
) )
else: else:
mac_prefix = '52:54:00' mac_prefix = '52:54:00'
random_octet_A = '{:x}'.format(random.randint(16,238)) random_octet_A = '{:x}'.format(random.randint(16, 238))
random_octet_B = '{:x}'.format(random.randint(16,238)) random_octet_B = '{:x}'.format(random.randint(16, 238))
random_octet_C = '{:x}'.format(random.randint(16,238)) random_octet_C = '{:x}'.format(random.randint(16, 238))
macgen_template = '{prefix}:{octetA}:{octetB}:{octetC}' macgen_template = '{prefix}:{octetA}:{octetB}:{octetC}'
eth_macaddr = macgen_template.format( eth_macaddr = macgen_template.format(
@ -1416,7 +1450,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
# * Create each Ceph storage volume for the disks # * Create each Ceph storage volume for the disks
self.update_state(state='RUNNING', meta={'current': 6, 'total': 10, 'status': 'Creating storage volumes'}) self.update_state(state='RUNNING', meta={'current': 6, 'total': 10, 'status': 'Creating storage volumes'})
time.sleep(1) time.sleep(1)
for volume in vm_data['volumes']: for volume in vm_data['volumes']:
if volume.get('source_volume') is not None: if volume.get('source_volume') is not None:
success, message = pvc_ceph.clone_volume(zk_conn, volume['pool'], "{}_{}".format(vm_name, volume['disk_id']), volume['source_volume']) success, message = pvc_ceph.clone_volume(zk_conn, volume['pool'], "{}_{}".format(vm_name, volume['disk_id']), volume['source_volume'])
@ -1477,7 +1511,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
else: else:
if volume.get('source_volume') is not None: if volume.get('source_volume') is not None:
continue continue
if volume.get('filesystem') is None: if volume.get('filesystem') is None:
continue continue
@ -1634,4 +1668,3 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
return {'status': 'VM "{}" with profile "{}" has been provisioned and started successfully'.format(vm_name, vm_profile), 'current': 10, 'total': 10} return {'status': 'VM "{}" with profile "{}" has been provisioned and started successfully'.format(vm_name, vm_profile), 'current': 10, 'total': 10}

View File

@ -22,24 +22,40 @@
import datetime import datetime
# ANSII colours for output # ANSII colours for output
def red(): def red():
return '\033[91m' return '\033[91m'
def blue(): def blue():
return '\033[94m' return '\033[94m'
def cyan(): def cyan():
return '\033[96m' return '\033[96m'
def green(): def green():
return '\033[92m' return '\033[92m'
def yellow(): def yellow():
return '\033[93m' return '\033[93m'
def purple(): def purple():
return '\033[95m' return '\033[95m'
def bold(): def bold():
return '\033[1m' return '\033[1m'
def end(): def end():
return '\033[0m' return '\033[0m'
# Print function # Print function
def echo(message, prefix, state): def echo(message, prefix, state):
# Get the date # Get the date

View File

@ -20,9 +20,7 @@
# #
############################################################################### ###############################################################################
import re
import json import json
import time
import math import math
from requests_toolbelt.multipart.encoder import MultipartEncoder, MultipartEncoderMonitor from requests_toolbelt.multipart.encoder import MultipartEncoder, MultipartEncoderMonitor
@ -34,15 +32,28 @@ from cli_lib.common import UploadProgressBar, call_api
# Supplemental functions # Supplemental functions
# #
# Format byte sizes to/from human-readable units # Matrix of human-to-byte values
byte_unit_matrix = { byte_unit_matrix = {
'B': 1, 'B': 1,
'K': 1024, 'K': 1024,
'M': 1024*1024, 'M': 1024 * 1024,
'G': 1024*1024*1024, 'G': 1024 * 1024 * 1024,
'T': 1024*1024*1024*1024, 'T': 1024 * 1024 * 1024 * 1024,
'P': 1024*1024*1024*1024*1024 'P': 1024 * 1024 * 1024 * 1024 * 1024
} }
# Matrix of human-to-metric values
ops_unit_matrix = {
'': 1,
'K': 1000,
'M': 1000 * 1000,
'G': 1000 * 1000 * 1000,
'T': 1000 * 1000 * 1000 * 1000,
'P': 1000 * 1000 * 1000 * 1000 * 1000
}
# Format byte sizes to/from human-readable units
def format_bytes_tohuman(databytes): def format_bytes_tohuman(databytes):
datahuman = '' datahuman = ''
for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get, reverse=True): for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get, reverse=True):
@ -57,6 +68,7 @@ def format_bytes_tohuman(databytes):
return datahuman return datahuman
def format_bytes_fromhuman(datahuman): def format_bytes_fromhuman(datahuman):
# Trim off human-readable character # Trim off human-readable character
dataunit = datahuman[-1] dataunit = datahuman[-1]
@ -64,15 +76,8 @@ def format_bytes_fromhuman(datahuman):
databytes = datasize * byte_unit_matrix[dataunit] databytes = datasize * byte_unit_matrix[dataunit]
return '{}B'.format(databytes) return '{}B'.format(databytes)
# Format ops sizes to/from human-readable units # Format ops sizes to/from human-readable units
ops_unit_matrix = {
'': 1,
'K': 1000,
'M': 1000*1000,
'G': 1000*1000*1000,
'T': 1000*1000*1000*1000,
'P': 1000*1000*1000*1000*1000
}
def format_ops_tohuman(dataops): def format_ops_tohuman(dataops):
datahuman = '' datahuman = ''
for unit in sorted(ops_unit_matrix, key=ops_unit_matrix.get, reverse=True): for unit in sorted(ops_unit_matrix, key=ops_unit_matrix.get, reverse=True):
@ -87,6 +92,7 @@ def format_ops_tohuman(dataops):
return datahuman return datahuman
def format_ops_fromhuman(datahuman): def format_ops_fromhuman(datahuman):
# Trim off human-readable character # Trim off human-readable character
dataunit = datahuman[-1] dataunit = datahuman[-1]
@ -94,10 +100,12 @@ def format_ops_fromhuman(datahuman):
dataops = datasize * ops_unit_matrix[dataunit] dataops = datasize * ops_unit_matrix[dataunit]
return '{}'.format(dataops) return '{}'.format(dataops)
def format_pct_tohuman(datapct): def format_pct_tohuman(datapct):
datahuman = "{0:.1f}".format(float(datapct * 100.0)) datahuman = "{0:.1f}".format(float(datapct * 100.0))
return datahuman return datahuman
# #
# Status functions # Status functions
# #
@ -115,7 +123,8 @@ def ceph_status(config):
return True, response.json() return True, response.json()
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ceph_util(config): def ceph_util(config):
""" """
Get utilization of the Ceph cluster Get utilization of the Ceph cluster
@ -130,7 +139,8 @@ def ceph_util(config):
return True, response.json() return True, response.json()
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def format_raw_output(status_data): def format_raw_output(status_data):
ainformation = list() ainformation = list()
ainformation.append('{bold}Ceph cluster {stype} (primary node {end}{blue}{primary}{end}{bold}){end}\n'.format(bold=ansiprint.bold(), end=ansiprint.end(), blue=ansiprint.blue(), stype=status_data['type'], primary=status_data['primary_node'])) ainformation.append('{bold}Ceph cluster {stype} (primary node {end}{blue}{primary}{end}{bold}){end}\n'.format(bold=ansiprint.bold(), end=ansiprint.end(), blue=ansiprint.blue(), stype=status_data['type'], primary=status_data['primary_node']))
@ -139,6 +149,7 @@ def format_raw_output(status_data):
return '\n'.join(ainformation) return '\n'.join(ainformation)
# #
# OSD functions # OSD functions
# #
@ -157,6 +168,7 @@ def ceph_osd_info(config, osd):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ceph_osd_list(config, limit): def ceph_osd_list(config, limit):
""" """
Get list information about Ceph OSDs (limited by {limit}) Get list information about Ceph OSDs (limited by {limit})
@ -176,6 +188,7 @@ def ceph_osd_list(config, limit):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ceph_osd_add(config, node, device, weight): def ceph_osd_add(config, node, device, weight):
""" """
Add new Ceph OSD Add new Ceph OSD
@ -198,6 +211,7 @@ def ceph_osd_add(config, node, device, weight):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ceph_osd_remove(config, osdid): def ceph_osd_remove(config, osdid):
""" """
Remove Ceph OSD Remove Ceph OSD
@ -218,6 +232,7 @@ def ceph_osd_remove(config, osdid):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ceph_osd_state(config, osdid, state): def ceph_osd_state(config, osdid, state):
""" """
Set state of Ceph OSD Set state of Ceph OSD
@ -238,6 +253,7 @@ def ceph_osd_state(config, osdid, state):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ceph_osd_option(config, option, action): def ceph_osd_option(config, option, action):
""" """
Set cluster option of Ceph OSDs Set cluster option of Ceph OSDs
@ -259,6 +275,7 @@ def ceph_osd_option(config, option, action):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def getOutputColoursOSD(osd_information): def getOutputColoursOSD(osd_information):
# Set the UP status # Set the UP status
if osd_information['stats']['up'] == 1: if osd_information['stats']['up'] == 1:
@ -278,13 +295,14 @@ def getOutputColoursOSD(osd_information):
return osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour return osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour
def format_list_osd(osd_list): def format_list_osd(osd_list):
# Handle empty list # Handle empty list
if not osd_list: if not osd_list:
osd_list = list() osd_list = list()
# Handle single-item list # Handle single-item list
if not isinstance(osd_list, list): if not isinstance(osd_list, list):
osd_list = [ osd_list ] osd_list = [osd_list]
osd_list_output = [] osd_list_output = []
@ -414,44 +432,43 @@ Rd: {osd_rdops: <{osd_rdops_length}} \
Wr: {osd_wrops: <{osd_wrops_length}} \ Wr: {osd_wrops: <{osd_wrops_length}} \
{osd_wrdata: <{osd_wrdata_length}} \ {osd_wrdata: <{osd_wrdata_length}} \
{end_bold}'.format( {end_bold}'.format(
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
osd_id_length=osd_id_length, osd_id_length=osd_id_length,
osd_node_length=osd_node_length, osd_node_length=osd_node_length,
osd_up_length=osd_up_length, osd_up_length=osd_up_length,
osd_in_length=osd_in_length, osd_in_length=osd_in_length,
osd_size_length=osd_size_length, osd_size_length=osd_size_length,
osd_pgs_length=osd_pgs_length, osd_pgs_length=osd_pgs_length,
osd_weight_length=osd_weight_length, osd_weight_length=osd_weight_length,
osd_reweight_length=osd_reweight_length, osd_reweight_length=osd_reweight_length,
osd_used_length=osd_used_length, osd_used_length=osd_used_length,
osd_free_length=osd_free_length, osd_free_length=osd_free_length,
osd_util_length=osd_util_length, osd_util_length=osd_util_length,
osd_var_length=osd_var_length, osd_var_length=osd_var_length,
osd_wrops_length=osd_wrops_length, osd_wrops_length=osd_wrops_length,
osd_wrdata_length=osd_wrdata_length, osd_wrdata_length=osd_wrdata_length,
osd_rdops_length=osd_rdops_length, osd_rdops_length=osd_rdops_length,
osd_rddata_length=osd_rddata_length, osd_rddata_length=osd_rddata_length,
osd_id='ID', osd_id='ID',
osd_node='Node', osd_node='Node',
osd_up='Up', osd_up='Up',
osd_in='In', osd_in='In',
osd_size='Size', osd_size='Size',
osd_pgs='PGs', osd_pgs='PGs',
osd_weight='Wt', osd_weight='Wt',
osd_reweight='ReWt', osd_reweight='ReWt',
osd_used='Used', osd_used='Used',
osd_free='Free', osd_free='Free',
osd_util='Util%', osd_util='Util%',
osd_var='Var', osd_var='Var',
osd_wrops='OPS', osd_wrops='OPS',
osd_wrdata='Data', osd_wrdata='Data',
osd_rdops='OPS', osd_rdops='OPS',
osd_rddata='Data' osd_rddata='Data')
)
) )
for osd_information in sorted(osd_list, key = lambda x: int(x['id'])): for osd_information in sorted(osd_list, key=lambda x: int(x['id'])):
try: try:
# If this happens, the node hasn't checked in fully yet, so just ignore it # If this happens, the node hasn't checked in fully yet, so just ignore it
if osd_information['stats']['node'] == '|': if osd_information['stats']['node'] == '|':
@ -482,44 +499,43 @@ Wr: {osd_wrops: <{osd_wrops_length}} \
{osd_wrops: <{osd_wrops_length}} \ {osd_wrops: <{osd_wrops_length}} \
{osd_wrdata: <{osd_wrdata_length}} \ {osd_wrdata: <{osd_wrdata_length}} \
{end_bold}'.format( {end_bold}'.format(
bold='', bold='',
end_bold='', end_bold='',
end_colour=ansiprint.end(), end_colour=ansiprint.end(),
osd_id_length=osd_id_length, osd_id_length=osd_id_length,
osd_node_length=osd_node_length, osd_node_length=osd_node_length,
osd_up_length=osd_up_length, osd_up_length=osd_up_length,
osd_in_length=osd_in_length, osd_in_length=osd_in_length,
osd_size_length=osd_size_length, osd_size_length=osd_size_length,
osd_pgs_length=osd_pgs_length, osd_pgs_length=osd_pgs_length,
osd_weight_length=osd_weight_length, osd_weight_length=osd_weight_length,
osd_reweight_length=osd_reweight_length, osd_reweight_length=osd_reweight_length,
osd_used_length=osd_used_length, osd_used_length=osd_used_length,
osd_free_length=osd_free_length, osd_free_length=osd_free_length,
osd_util_length=osd_util_length, osd_util_length=osd_util_length,
osd_var_length=osd_var_length, osd_var_length=osd_var_length,
osd_wrops_length=osd_wrops_length, osd_wrops_length=osd_wrops_length,
osd_wrdata_length=osd_wrdata_length, osd_wrdata_length=osd_wrdata_length,
osd_rdops_length=osd_rdops_length, osd_rdops_length=osd_rdops_length,
osd_rddata_length=osd_rddata_length, osd_rddata_length=osd_rddata_length,
osd_id=osd_information['id'], osd_id=osd_information['id'],
osd_node=osd_information['stats']['node'], osd_node=osd_information['stats']['node'],
osd_up_colour=osd_up_colour, osd_up_colour=osd_up_colour,
osd_up=osd_up_flag, osd_up=osd_up_flag,
osd_in_colour=osd_in_colour, osd_in_colour=osd_in_colour,
osd_in=osd_in_flag, osd_in=osd_in_flag,
osd_size=osd_information['stats']['size'], osd_size=osd_information['stats']['size'],
osd_pgs=osd_information['stats']['pgs'], osd_pgs=osd_information['stats']['pgs'],
osd_weight=osd_information['stats']['weight'], osd_weight=osd_information['stats']['weight'],
osd_reweight=osd_information['stats']['reweight'], osd_reweight=osd_information['stats']['reweight'],
osd_used=osd_information['stats']['used'], osd_used=osd_information['stats']['used'],
osd_free=osd_information['stats']['avail'], osd_free=osd_information['stats']['avail'],
osd_util=osd_util, osd_util=osd_util,
osd_var=osd_var, osd_var=osd_var,
osd_wrops=osd_information['stats']['wr_ops'], osd_wrops=osd_information['stats']['wr_ops'],
osd_wrdata=osd_information['stats']['wr_data'], osd_wrdata=osd_information['stats']['wr_data'],
osd_rdops=osd_information['stats']['rd_ops'], osd_rdops=osd_information['stats']['rd_ops'],
osd_rddata=osd_information['stats']['rd_data'] osd_rddata=osd_information['stats']['rd_data'])
)
) )
return '\n'.join(osd_list_output) return '\n'.join(osd_list_output)
@ -543,6 +559,7 @@ def ceph_pool_info(config, pool):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ceph_pool_list(config, limit): def ceph_pool_list(config, limit):
""" """
Get list information about Ceph OSDs (limited by {limit}) Get list information about Ceph OSDs (limited by {limit})
@ -562,6 +579,7 @@ def ceph_pool_list(config, limit):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ceph_pool_add(config, pool, pgs, replcfg): def ceph_pool_add(config, pool, pgs, replcfg):
""" """
Add new Ceph OSD Add new Ceph OSD
@ -584,6 +602,7 @@ def ceph_pool_add(config, pool, pgs, replcfg):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ceph_pool_remove(config, pool): def ceph_pool_remove(config, pool):
""" """
Remove Ceph OSD Remove Ceph OSD
@ -604,13 +623,14 @@ def ceph_pool_remove(config, pool):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def format_list_pool(pool_list): def format_list_pool(pool_list):
# Handle empty list # Handle empty list
if not pool_list: if not pool_list:
pool_list = list() pool_list = list()
# Handle single-entry list # Handle single-entry list
if not isinstance(pool_list, list): if not isinstance(pool_list, list):
pool_list = [ pool_list ] pool_list = [pool_list]
pool_list_output = [] pool_list_output = []
@ -721,38 +741,37 @@ Rd: {pool_read_ops: <{pool_read_ops_length}} \
Wr: {pool_write_ops: <{pool_write_ops_length}} \ Wr: {pool_write_ops: <{pool_write_ops_length}} \
{pool_write_data: <{pool_write_data_length}} \ {pool_write_data: <{pool_write_data_length}} \
{end_bold}'.format( {end_bold}'.format(
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
pool_id_length=pool_id_length, pool_id_length=pool_id_length,
pool_name_length=pool_name_length, pool_name_length=pool_name_length,
pool_used_length=pool_used_length, pool_used_length=pool_used_length,
pool_usedpct_length=pool_usedpct_length, pool_usedpct_length=pool_usedpct_length,
pool_free_length=pool_free_length, pool_free_length=pool_free_length,
pool_objects_length=pool_num_objects_length, pool_objects_length=pool_num_objects_length,
pool_clones_length=pool_num_clones_length, pool_clones_length=pool_num_clones_length,
pool_copies_length=pool_num_copies_length, pool_copies_length=pool_num_copies_length,
pool_degraded_length=pool_num_degraded_length, pool_degraded_length=pool_num_degraded_length,
pool_write_ops_length=pool_write_ops_length, pool_write_ops_length=pool_write_ops_length,
pool_write_data_length=pool_write_data_length, pool_write_data_length=pool_write_data_length,
pool_read_ops_length=pool_read_ops_length, pool_read_ops_length=pool_read_ops_length,
pool_read_data_length=pool_read_data_length, pool_read_data_length=pool_read_data_length,
pool_id='ID', pool_id='ID',
pool_name='Name', pool_name='Name',
pool_used='Used', pool_used='Used',
pool_usedpct='%', pool_usedpct='%',
pool_free='Free', pool_free='Free',
pool_objects='Count', pool_objects='Count',
pool_clones='Clones', pool_clones='Clones',
pool_copies='Copies', pool_copies='Copies',
pool_degraded='Degraded', pool_degraded='Degraded',
pool_write_ops='OPS', pool_write_ops='OPS',
pool_write_data='Data', pool_write_data='Data',
pool_read_ops='OPS', pool_read_ops='OPS',
pool_read_data='Data' pool_read_data='Data')
)
) )
for pool_information in sorted(pool_list, key = lambda x: int(x['stats']['id'])): for pool_information in sorted(pool_list, key=lambda x: int(x['stats']['id'])):
# Format the output header # Format the output header
pool_list_output.append('{bold}\ pool_list_output.append('{bold}\
{pool_id: <{pool_id_length}} \ {pool_id: <{pool_id_length}} \
@ -769,35 +788,34 @@ Wr: {pool_write_ops: <{pool_write_ops_length}} \
{pool_write_ops: <{pool_write_ops_length}} \ {pool_write_ops: <{pool_write_ops_length}} \
{pool_write_data: <{pool_write_data_length}} \ {pool_write_data: <{pool_write_data_length}} \
{end_bold}'.format( {end_bold}'.format(
bold='', bold='',
end_bold='', end_bold='',
pool_id_length=pool_id_length, pool_id_length=pool_id_length,
pool_name_length=pool_name_length, pool_name_length=pool_name_length,
pool_used_length=pool_used_length, pool_used_length=pool_used_length,
pool_usedpct_length=pool_usedpct_length, pool_usedpct_length=pool_usedpct_length,
pool_free_length=pool_free_length, pool_free_length=pool_free_length,
pool_objects_length=pool_num_objects_length, pool_objects_length=pool_num_objects_length,
pool_clones_length=pool_num_clones_length, pool_clones_length=pool_num_clones_length,
pool_copies_length=pool_num_copies_length, pool_copies_length=pool_num_copies_length,
pool_degraded_length=pool_num_degraded_length, pool_degraded_length=pool_num_degraded_length,
pool_write_ops_length=pool_write_ops_length, pool_write_ops_length=pool_write_ops_length,
pool_write_data_length=pool_write_data_length, pool_write_data_length=pool_write_data_length,
pool_read_ops_length=pool_read_ops_length, pool_read_ops_length=pool_read_ops_length,
pool_read_data_length=pool_read_data_length, pool_read_data_length=pool_read_data_length,
pool_id=pool_information['stats']['id'], pool_id=pool_information['stats']['id'],
pool_name=pool_information['name'], pool_name=pool_information['name'],
pool_used=pool_information['stats']['used_bytes'], pool_used=pool_information['stats']['used_bytes'],
pool_usedpct=pool_information['stats']['used_percent'], pool_usedpct=pool_information['stats']['used_percent'],
pool_free=pool_information['stats']['free_bytes'], pool_free=pool_information['stats']['free_bytes'],
pool_objects=pool_information['stats']['num_objects'], pool_objects=pool_information['stats']['num_objects'],
pool_clones=pool_information['stats']['num_object_clones'], pool_clones=pool_information['stats']['num_object_clones'],
pool_copies=pool_information['stats']['num_object_copies'], pool_copies=pool_information['stats']['num_object_copies'],
pool_degraded=pool_information['stats']['num_objects_degraded'], pool_degraded=pool_information['stats']['num_objects_degraded'],
pool_write_ops=pool_information['stats']['write_ops'], pool_write_ops=pool_information['stats']['write_ops'],
pool_write_data=pool_information['stats']['write_bytes'], pool_write_data=pool_information['stats']['write_bytes'],
pool_read_ops=pool_information['stats']['read_ops'], pool_read_ops=pool_information['stats']['read_ops'],
pool_read_data=pool_information['stats']['read_bytes'] pool_read_data=pool_information['stats']['read_bytes'])
)
) )
return '\n'.join(pool_list_output) return '\n'.join(pool_list_output)
@ -821,6 +839,7 @@ def ceph_volume_info(config, pool, volume):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ceph_volume_list(config, limit, pool): def ceph_volume_list(config, limit, pool):
""" """
Get list information about Ceph volumes (limited by {limit} and by {pool}) Get list information about Ceph volumes (limited by {limit} and by {pool})
@ -842,6 +861,7 @@ def ceph_volume_list(config, limit, pool):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ceph_volume_add(config, pool, volume, size): def ceph_volume_add(config, pool, volume, size):
""" """
Add new Ceph volume Add new Ceph volume
@ -864,6 +884,7 @@ def ceph_volume_add(config, pool, volume, size):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ceph_volume_upload(config, pool, volume, image_format, image_file): def ceph_volume_upload(config, pool, volume, image_format, image_file):
""" """
Upload a disk image to a Ceph volume Upload a disk image to a Ceph volume
@ -876,7 +897,7 @@ def ceph_volume_upload(config, pool, volume, image_format, image_file):
bar = UploadProgressBar(image_file, end_message="Parsing file on remote side...", end_nl=False) bar = UploadProgressBar(image_file, end_message="Parsing file on remote side...", end_nl=False)
upload_data = MultipartEncoder( upload_data = MultipartEncoder(
fields={ 'file': ('filename', open(image_file, 'rb'), 'application/octet-stream')} fields={'file': ('filename', open(image_file, 'rb'), 'application/octet-stream')}
) )
upload_monitor = MultipartEncoderMonitor(upload_data, bar.update) upload_monitor = MultipartEncoderMonitor(upload_data, bar.update)
@ -899,6 +920,7 @@ def ceph_volume_upload(config, pool, volume, image_format, image_file):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ceph_volume_remove(config, pool, volume): def ceph_volume_remove(config, pool, volume):
""" """
Remove Ceph volume Remove Ceph volume
@ -916,6 +938,7 @@ def ceph_volume_remove(config, pool, volume):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ceph_volume_modify(config, pool, volume, new_name=None, new_size=None): def ceph_volume_modify(config, pool, volume, new_name=None, new_size=None):
""" """
Modify Ceph volume Modify Ceph volume
@ -940,6 +963,7 @@ def ceph_volume_modify(config, pool, volume, new_name=None, new_size=None):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ceph_volume_clone(config, pool, volume, new_volume): def ceph_volume_clone(config, pool, volume, new_volume):
""" """
Clone Ceph volume Clone Ceph volume
@ -960,13 +984,14 @@ def ceph_volume_clone(config, pool, volume, new_volume):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def format_list_volume(volume_list): def format_list_volume(volume_list):
# Handle empty list # Handle empty list
if not volume_list: if not volume_list:
volume_list = list() volume_list = list()
# Handle single-entry list # Handle single-entry list
if not isinstance(volume_list, list): if not isinstance(volume_list, list):
volume_list = [ volume_list ] volume_list = [volume_list]
volume_list_output = [] volume_list_output = []
@ -1024,23 +1049,22 @@ def format_list_volume(volume_list):
{volume_format: <{volume_format_length}} \ {volume_format: <{volume_format_length}} \
{volume_features: <{volume_features_length}} \ {volume_features: <{volume_features_length}} \
{end_bold}'.format( {end_bold}'.format(
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
volume_name_length=volume_name_length, volume_name_length=volume_name_length,
volume_pool_length=volume_pool_length, volume_pool_length=volume_pool_length,
volume_size_length=volume_size_length, volume_size_length=volume_size_length,
volume_objects_length=volume_objects_length, volume_objects_length=volume_objects_length,
volume_order_length=volume_order_length, volume_order_length=volume_order_length,
volume_format_length=volume_format_length, volume_format_length=volume_format_length,
volume_features_length=volume_features_length, volume_features_length=volume_features_length,
volume_name='Name', volume_name='Name',
volume_pool='Pool', volume_pool='Pool',
volume_size='Size', volume_size='Size',
volume_objects='Objects', volume_objects='Objects',
volume_order='Order', volume_order='Order',
volume_format='Format', volume_format='Format',
volume_features='Features', volume_features='Features')
)
) )
for volume_information in volume_list: for volume_information in volume_list:
@ -1053,23 +1077,22 @@ def format_list_volume(volume_list):
{volume_format: <{volume_format_length}} \ {volume_format: <{volume_format_length}} \
{volume_features: <{volume_features_length}} \ {volume_features: <{volume_features_length}} \
{end_bold}'.format( {end_bold}'.format(
bold='', bold='',
end_bold='', end_bold='',
volume_name_length=volume_name_length, volume_name_length=volume_name_length,
volume_pool_length=volume_pool_length, volume_pool_length=volume_pool_length,
volume_size_length=volume_size_length, volume_size_length=volume_size_length,
volume_objects_length=volume_objects_length, volume_objects_length=volume_objects_length,
volume_order_length=volume_order_length, volume_order_length=volume_order_length,
volume_format_length=volume_format_length, volume_format_length=volume_format_length,
volume_features_length=volume_features_length, volume_features_length=volume_features_length,
volume_name=volume_information['name'], volume_name=volume_information['name'],
volume_pool=volume_information['pool'], volume_pool=volume_information['pool'],
volume_size=volume_information['stats']['size'], volume_size=volume_information['stats']['size'],
volume_objects=volume_information['stats']['objects'], volume_objects=volume_information['stats']['objects'],
volume_order=volume_information['stats']['order'], volume_order=volume_information['stats']['order'],
volume_format=volume_information['stats']['format'], volume_format=volume_information['stats']['format'],
volume_features=','.join(volume_information['stats']['features']), volume_features=','.join(volume_information['stats']['features']))
)
) )
return '\n'.join(sorted(volume_list_output)) return '\n'.join(sorted(volume_list_output))
@ -1093,6 +1116,7 @@ def ceph_snapshot_info(config, pool, volume, snapshot):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ceph_snapshot_list(config, limit, volume, pool): def ceph_snapshot_list(config, limit, volume, pool):
""" """
Get list information about Ceph snapshots (limited by {limit}, by {pool}, or by {volume}) Get list information about Ceph snapshots (limited by {limit}, by {pool}, or by {volume})
@ -1116,6 +1140,7 @@ def ceph_snapshot_list(config, limit, volume, pool):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ceph_snapshot_add(config, pool, volume, snapshot): def ceph_snapshot_add(config, pool, volume, snapshot):
""" """
Add new Ceph snapshot Add new Ceph snapshot
@ -1138,6 +1163,7 @@ def ceph_snapshot_add(config, pool, volume, snapshot):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ceph_snapshot_remove(config, pool, volume, snapshot): def ceph_snapshot_remove(config, pool, volume, snapshot):
""" """
Remove Ceph snapshot Remove Ceph snapshot
@ -1155,6 +1181,7 @@ def ceph_snapshot_remove(config, pool, volume, snapshot):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ceph_snapshot_modify(config, pool, volume, snapshot, new_name=None): def ceph_snapshot_modify(config, pool, volume, snapshot, new_name=None):
""" """
Modify Ceph snapshot Modify Ceph snapshot
@ -1177,13 +1204,14 @@ def ceph_snapshot_modify(config, pool, volume, snapshot, new_name=None):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def format_list_snapshot(snapshot_list): def format_list_snapshot(snapshot_list):
# Handle empty list # Handle empty list
if not snapshot_list: if not snapshot_list:
snapshot_list = list() snapshot_list = list()
# Handle single-entry list # Handle single-entry list
if not isinstance(snapshot_list, list): if not isinstance(snapshot_list, list):
snapshot_list = [ snapshot_list ] snapshot_list = [snapshot_list]
snapshot_list_output = [] snapshot_list_output = []
@ -1217,15 +1245,14 @@ def format_list_snapshot(snapshot_list):
{snapshot_volume: <{snapshot_volume_length}} \ {snapshot_volume: <{snapshot_volume_length}} \
{snapshot_pool: <{snapshot_pool_length}} \ {snapshot_pool: <{snapshot_pool_length}} \
{end_bold}'.format( {end_bold}'.format(
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
snapshot_name_length=snapshot_name_length, snapshot_name_length=snapshot_name_length,
snapshot_volume_length=snapshot_volume_length, snapshot_volume_length=snapshot_volume_length,
snapshot_pool_length=snapshot_pool_length, snapshot_pool_length=snapshot_pool_length,
snapshot_name='Name', snapshot_name='Name',
snapshot_volume='Volume', snapshot_volume='Volume',
snapshot_pool='Pool', snapshot_pool='Pool')
)
) )
for snapshot_information in snapshot_list: for snapshot_information in snapshot_list:
@ -1237,19 +1264,19 @@ def format_list_snapshot(snapshot_list):
{snapshot_volume: <{snapshot_volume_length}} \ {snapshot_volume: <{snapshot_volume_length}} \
{snapshot_pool: <{snapshot_pool_length}} \ {snapshot_pool: <{snapshot_pool_length}} \
{end_bold}'.format( {end_bold}'.format(
bold='', bold='',
end_bold='', end_bold='',
snapshot_name_length=snapshot_name_length, snapshot_name_length=snapshot_name_length,
snapshot_volume_length=snapshot_volume_length, snapshot_volume_length=snapshot_volume_length,
snapshot_pool_length=snapshot_pool_length, snapshot_pool_length=snapshot_pool_length,
snapshot_name=snapshot_name, snapshot_name=snapshot_name,
snapshot_volume=snapshot_volume, snapshot_volume=snapshot_volume,
snapshot_pool=snapshot_pool, snapshot_pool=snapshot_pool)
)
) )
return '\n'.join(sorted(snapshot_list_output)) return '\n'.join(sorted(snapshot_list_output))
# #
# Benchmark functions # Benchmark functions
# #
@ -1272,9 +1299,10 @@ def ceph_benchmark_run(config, pool):
else: else:
retvalue = False retvalue = False
retdata = response.json().get('message', '') retdata = response.json().get('message', '')
return retvalue, retdata return retvalue, retdata
def ceph_benchmark_list(config, job): def ceph_benchmark_list(config, job):
""" """
View results of one or more previous benchmark runs View results of one or more previous benchmark runs
@ -1301,16 +1329,16 @@ def ceph_benchmark_list(config, job):
return retvalue, retdata return retvalue, retdata
def format_list_benchmark(config, benchmark_information): def format_list_benchmark(config, benchmark_information):
benchmark_list_output = [] benchmark_list_output = []
benchmark_id_length = 3
benchmark_job_length = 20 benchmark_job_length = 20
benchmark_bandwidth_length = dict() benchmark_bandwidth_length = dict()
benchmark_iops_length = dict() benchmark_iops_length = dict()
# For this output, we're only showing the Sequential (seq_read and seq_write) and 4k Random (rand_read_4K and rand_write_4K) results since we're showing them for each test result. # For this output, we're only showing the Sequential (seq_read and seq_write) and 4k Random (rand_read_4K and rand_write_4K) results since we're showing them for each test result.
for test in [ "seq_read", "seq_write", "rand_read_4K", "rand_write_4K" ]: for test in ["seq_read", "seq_write", "rand_read_4K", "rand_write_4K"]:
benchmark_bandwidth_length[test] = 7 benchmark_bandwidth_length[test] = 7
benchmark_iops_length[test] = 6 benchmark_iops_length[test] = 6
@ -1326,7 +1354,7 @@ def format_list_benchmark(config, benchmark_information):
benchmark_bandwidth = dict() benchmark_bandwidth = dict()
benchmark_iops = dict() benchmark_iops = dict()
for test in [ "seq_read", "seq_write", "rand_read_4K", "rand_write_4K" ]: for test in ["seq_read", "seq_write", "rand_read_4K", "rand_write_4K"]:
benchmark_bandwidth[test] = format_bytes_tohuman(int(benchmark_data[test]['overall']['bandwidth']) * 1024) benchmark_bandwidth[test] = format_bytes_tohuman(int(benchmark_data[test]['overall']['bandwidth']) * 1024)
benchmark_iops[test] = format_ops_tohuman(int(benchmark_data[test]['overall']['iops'])) benchmark_iops[test] = format_ops_tohuman(int(benchmark_data[test]['overall']['iops']))
@ -1344,15 +1372,14 @@ def format_list_benchmark(config, benchmark_information):
{seq_header: <{seq_header_length}} \ {seq_header: <{seq_header_length}} \
{rand_header: <{rand_header_length}} \ {rand_header: <{rand_header_length}} \
{end_bold}'.format( {end_bold}'.format(
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
benchmark_job_length=benchmark_job_length, benchmark_job_length=benchmark_job_length,
seq_header_length=benchmark_bandwidth_length['seq_read'] + benchmark_bandwidth_length['seq_write'] + benchmark_iops_length['seq_read'] + benchmark_iops_length['seq_write'] + 3, seq_header_length=benchmark_bandwidth_length['seq_read'] + benchmark_bandwidth_length['seq_write'] + benchmark_iops_length['seq_read'] + benchmark_iops_length['seq_write'] + 3,
rand_header_length=benchmark_bandwidth_length['rand_read_4K'] + benchmark_bandwidth_length['rand_write_4K'] + benchmark_iops_length['rand_read_4K'] + benchmark_iops_length['rand_write_4K'] + 2, rand_header_length=benchmark_bandwidth_length['rand_read_4K'] + benchmark_bandwidth_length['rand_write_4K'] + benchmark_iops_length['rand_read_4K'] + benchmark_iops_length['rand_write_4K'] + 2,
benchmark_job='Benchmark Job', benchmark_job='Benchmark Job',
seq_header='Sequential (4M blocks):', seq_header='Sequential (4M blocks):',
rand_header='Random (4K blocks):' rand_header='Random (4K blocks):')
)
) )
benchmark_list_output.append('{bold}\ benchmark_list_output.append('{bold}\
@ -1362,19 +1389,18 @@ def format_list_benchmark(config, benchmark_information):
{rand_benchmark_bandwidth: <{rand_benchmark_bandwidth_length}} \ {rand_benchmark_bandwidth: <{rand_benchmark_bandwidth_length}} \
{rand_benchmark_iops: <{rand_benchmark_iops_length}} \ {rand_benchmark_iops: <{rand_benchmark_iops_length}} \
{end_bold}'.format( {end_bold}'.format(
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
benchmark_job_length=benchmark_job_length, benchmark_job_length=benchmark_job_length,
seq_benchmark_bandwidth_length=benchmark_bandwidth_length['seq_read'] + benchmark_bandwidth_length['seq_write'] + 2, seq_benchmark_bandwidth_length=benchmark_bandwidth_length['seq_read'] + benchmark_bandwidth_length['seq_write'] + 2,
seq_benchmark_iops_length=benchmark_iops_length['seq_read'] + benchmark_iops_length['seq_write'], seq_benchmark_iops_length=benchmark_iops_length['seq_read'] + benchmark_iops_length['seq_write'],
rand_benchmark_bandwidth_length=benchmark_bandwidth_length['rand_read_4K'] + benchmark_bandwidth_length['rand_write_4K'] + 1, rand_benchmark_bandwidth_length=benchmark_bandwidth_length['rand_read_4K'] + benchmark_bandwidth_length['rand_write_4K'] + 1,
rand_benchmark_iops_length=benchmark_iops_length['rand_read_4K'] + benchmark_iops_length['rand_write_4K'], rand_benchmark_iops_length=benchmark_iops_length['rand_read_4K'] + benchmark_iops_length['rand_write_4K'],
benchmark_job='', benchmark_job='',
seq_benchmark_bandwidth='R/W Bandwith/s', seq_benchmark_bandwidth='R/W Bandwith/s',
seq_benchmark_iops='R/W IOPS', seq_benchmark_iops='R/W IOPS',
rand_benchmark_bandwidth='R/W Bandwith/s', rand_benchmark_bandwidth='R/W Bandwith/s',
rand_benchmark_iops='R/W IOPS' rand_benchmark_iops='R/W IOPS')
)
) )
for benchmark in benchmark_information: for benchmark in benchmark_information:
@ -1388,17 +1414,16 @@ def format_list_benchmark(config, benchmark_information):
else: else:
benchmark_bandwidth = dict() benchmark_bandwidth = dict()
benchmark_iops = dict() benchmark_iops = dict()
for test in [ "seq_read", "seq_write", "rand_read_4K", "rand_write_4K" ]: for test in ["seq_read", "seq_write", "rand_read_4K", "rand_write_4K"]:
benchmark_data = json.loads(benchmark['benchmark_result']) benchmark_data = json.loads(benchmark['benchmark_result'])
benchmark_bandwidth[test] = format_bytes_tohuman(int(benchmark_data[test]['overall']['bandwidth']) * 1024) benchmark_bandwidth[test] = format_bytes_tohuman(int(benchmark_data[test]['overall']['bandwidth']) * 1024)
benchmark_iops[test] = format_ops_tohuman(int(benchmark_data[test]['overall']['iops'])) benchmark_iops[test] = format_ops_tohuman(int(benchmark_data[test]['overall']['iops']))
seq_benchmark_bandwidth = "{} / {}".format(benchmark_bandwidth['seq_read'], benchmark_bandwidth['seq_write']) seq_benchmark_bandwidth = "{} / {}".format(benchmark_bandwidth['seq_read'], benchmark_bandwidth['seq_write'])
seq_benchmark_iops = "{} / {}".format(benchmark_iops['seq_read'], benchmark_iops['seq_write']) seq_benchmark_iops = "{} / {}".format(benchmark_iops['seq_read'], benchmark_iops['seq_write'])
rand_benchmark_bandwidth = "{} / {}".format(benchmark_bandwidth['rand_read_4K'], benchmark_bandwidth['rand_write_4K']) rand_benchmark_bandwidth = "{} / {}".format(benchmark_bandwidth['rand_read_4K'], benchmark_bandwidth['rand_write_4K'])
rand_benchmark_iops = "{} / {}".format(benchmark_iops['rand_read_4K'], benchmark_iops['rand_write_4K']) rand_benchmark_iops = "{} / {}".format(benchmark_iops['rand_read_4K'], benchmark_iops['rand_write_4K'])
benchmark_list_output.append('{bold}\ benchmark_list_output.append('{bold}\
{benchmark_job: <{benchmark_job_length}} \ {benchmark_job: <{benchmark_job_length}} \
{seq_benchmark_bandwidth: <{seq_benchmark_bandwidth_length}} \ {seq_benchmark_bandwidth: <{seq_benchmark_bandwidth_length}} \
@ -1406,28 +1431,24 @@ def format_list_benchmark(config, benchmark_information):
{rand_benchmark_bandwidth: <{rand_benchmark_bandwidth_length}} \ {rand_benchmark_bandwidth: <{rand_benchmark_bandwidth_length}} \
{rand_benchmark_iops: <{rand_benchmark_iops_length}} \ {rand_benchmark_iops: <{rand_benchmark_iops_length}} \
{end_bold}'.format( {end_bold}'.format(
bold='', bold='',
end_bold='', end_bold='',
benchmark_job_length=benchmark_job_length, benchmark_job_length=benchmark_job_length,
seq_benchmark_bandwidth_length=benchmark_bandwidth_length['seq_read'] + benchmark_bandwidth_length['seq_write'] + 2, seq_benchmark_bandwidth_length=benchmark_bandwidth_length['seq_read'] + benchmark_bandwidth_length['seq_write'] + 2,
seq_benchmark_iops_length=benchmark_iops_length['seq_read'] + benchmark_iops_length['seq_write'], seq_benchmark_iops_length=benchmark_iops_length['seq_read'] + benchmark_iops_length['seq_write'],
rand_benchmark_bandwidth_length=benchmark_bandwidth_length['rand_read_4K'] + benchmark_bandwidth_length['rand_write_4K'] + 1, rand_benchmark_bandwidth_length=benchmark_bandwidth_length['rand_read_4K'] + benchmark_bandwidth_length['rand_write_4K'] + 1,
rand_benchmark_iops_length=benchmark_iops_length['rand_read_4K'] + benchmark_iops_length['rand_write_4K'], rand_benchmark_iops_length=benchmark_iops_length['rand_read_4K'] + benchmark_iops_length['rand_write_4K'],
benchmark_job=benchmark_job, benchmark_job=benchmark_job,
seq_benchmark_bandwidth=seq_benchmark_bandwidth, seq_benchmark_bandwidth=seq_benchmark_bandwidth,
seq_benchmark_iops=seq_benchmark_iops, seq_benchmark_iops=seq_benchmark_iops,
rand_benchmark_bandwidth=rand_benchmark_bandwidth, rand_benchmark_bandwidth=rand_benchmark_bandwidth,
rand_benchmark_iops=rand_benchmark_iops rand_benchmark_iops=rand_benchmark_iops)
)
) )
return '\n'.join(benchmark_list_output) return '\n'.join(benchmark_list_output)
def format_info_benchmark(config, benchmark_information):
# Load information from benchmark output
benchmark_id = benchmark_information[0]['id']
benchmark_job = benchmark_information[0]['job']
def format_info_benchmark(config, benchmark_information):
if benchmark_information[0]['benchmark_result'] == "Running": if benchmark_information[0]['benchmark_result'] == "Running":
return "Benchmark test is still running." return "Benchmark test is still running."
@ -1471,7 +1492,7 @@ def format_info_benchmark(config, benchmark_information):
for element in benchmark_details[test]['bandwidth']: for element in benchmark_details[test]['bandwidth']:
try: try:
_element_length = len(format_bytes_tohuman(int(float(benchmark_details[test]['bandwidth'][element])))) _element_length = len(format_bytes_tohuman(int(float(benchmark_details[test]['bandwidth'][element]))))
except: except Exception:
_element_length = len(benchmark_details[test]['bandwidth'][element]) _element_length = len(benchmark_details[test]['bandwidth'][element])
if _element_length > bandwidth_column_length: if _element_length > bandwidth_column_length:
bandwidth_column_length = _element_length bandwidth_column_length = _element_length
@ -1479,7 +1500,7 @@ def format_info_benchmark(config, benchmark_information):
for element in benchmark_details[test]['iops']: for element in benchmark_details[test]['iops']:
try: try:
_element_length = len(format_ops_tohuman(int(float(benchmark_details[test]['iops'][element])))) _element_length = len(format_ops_tohuman(int(float(benchmark_details[test]['iops'][element]))))
except: except Exception:
_element_length = len(benchmark_details[test]['iops'][element]) _element_length = len(benchmark_details[test]['iops'][element])
if _element_length > iops_column_length: if _element_length > iops_column_length:
iops_column_length = _element_length iops_column_length = _element_length
@ -1494,8 +1515,6 @@ def format_info_benchmark(config, benchmark_information):
if _element_length > cpuutil_column_length: if _element_length > cpuutil_column_length:
cpuutil_column_length = _element_length cpuutil_column_length = _element_length
for test in benchmark_details: for test in benchmark_details:
ainformation.append('') ainformation.append('')

View File

@ -25,6 +25,7 @@ import json
import cli_lib.ansiprint as ansiprint import cli_lib.ansiprint as ansiprint
from cli_lib.common import call_api from cli_lib.common import call_api
def initialize(config): def initialize(config):
""" """
Initialize the PVC cluster Initialize the PVC cluster
@ -42,6 +43,7 @@ def initialize(config):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def maintenance_mode(config, state): def maintenance_mode(config, state):
""" """
Enable or disable PVC cluster maintenance mode Enable or disable PVC cluster maintenance mode
@ -62,6 +64,7 @@ def maintenance_mode(config, state):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def get_info(config): def get_info(config):
""" """
Get status of the PVC cluster Get status of the PVC cluster
@ -77,6 +80,7 @@ def get_info(config):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def format_info(cluster_information, oformat): def format_info(cluster_information, oformat):
if oformat == 'json': if oformat == 'json':
return json.dumps(cluster_information) return json.dumps(cluster_information)
@ -105,15 +109,11 @@ def format_info(cluster_information, oformat):
ainformation.append('{}Cluster health:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), health_colour, cluster_information['health'], ansiprint.end())) ainformation.append('{}Cluster health:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), health_colour, cluster_information['health'], ansiprint.end()))
if cluster_information['health_msg']: if cluster_information['health_msg']:
for line in cluster_information['health_msg']: for line in cluster_information['health_msg']:
ainformation.append( ainformation.append(' > {}'.format(line))
' > {}'.format(line)
)
ainformation.append('{}Storage health:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), storage_health_colour, cluster_information['storage_health'], ansiprint.end())) ainformation.append('{}Storage health:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), storage_health_colour, cluster_information['storage_health'], ansiprint.end()))
if cluster_information['storage_health_msg']: if cluster_information['storage_health_msg']:
for line in cluster_information['storage_health_msg']: for line in cluster_information['storage_health_msg']:
ainformation.append( ainformation.append(' > {}'.format(line))
' > {}'.format(line)
)
ainformation.append('') ainformation.append('')
ainformation.append('{}Primary node:{} {}'.format(ansiprint.purple(), ansiprint.end(), cluster_information['primary_node'])) ainformation.append('{}Primary node:{} {}'.format(ansiprint.purple(), ansiprint.end(), cluster_information['primary_node']))
ainformation.append('{}Cluster upstream IP:{} {}'.format(ansiprint.purple(), ansiprint.end(), cluster_information['upstream_ip'])) ainformation.append('{}Cluster upstream IP:{} {}'.format(ansiprint.purple(), ansiprint.end(), cluster_information['upstream_ip']))

View File

@ -21,21 +21,21 @@
############################################################################### ###############################################################################
import os import os
import io
import math import math
import time import time
import requests import requests
import click import click
from urllib3 import disable_warnings from urllib3 import disable_warnings
def format_bytes(size_bytes): def format_bytes(size_bytes):
byte_unit_matrix = { byte_unit_matrix = {
'B': 1, 'B': 1,
'K': 1024, 'K': 1024,
'M': 1024*1024, 'M': 1024 * 1024,
'G': 1024*1024*1024, 'G': 1024 * 1024 * 1024,
'T': 1024*1024*1024*1024, 'T': 1024 * 1024 * 1024 * 1024,
'P': 1024*1024*1024*1024*1024 'P': 1024 * 1024 * 1024 * 1024 * 1024
} }
human_bytes = '0B' human_bytes = '0B'
for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get): for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get):
@ -45,14 +45,15 @@ def format_bytes(size_bytes):
break break
return human_bytes return human_bytes
def format_metric(integer): def format_metric(integer):
integer_unit_matrix = { integer_unit_matrix = {
'': 1, '': 1,
'K': 1000, 'K': 1000,
'M': 1000*1000, 'M': 1000 * 1000,
'B': 1000*1000*1000, 'B': 1000 * 1000 * 1000,
'T': 1000*1000*1000*1000, 'T': 1000 * 1000 * 1000 * 1000,
'Q': 1000*1000*1000*1000*1000 'Q': 1000 * 1000 * 1000 * 1000 * 1000
} }
human_integer = '0' human_integer = '0'
for unit in sorted(integer_unit_matrix, key=integer_unit_matrix.get): for unit in sorted(integer_unit_matrix, key=integer_unit_matrix.get):
@ -62,6 +63,7 @@ def format_metric(integer):
break break
return human_integer return human_integer
class UploadProgressBar(object): class UploadProgressBar(object):
def __init__(self, filename, end_message='', end_nl=True): def __init__(self, filename, end_message='', end_nl=True):
file_size = os.path.getsize(filename) file_size = os.path.getsize(filename)
@ -104,6 +106,7 @@ class UploadProgressBar(object):
if self.end_message: if self.end_message:
click.echo(self.end_message + self.end_suffix, nl=self.end_nl) click.echo(self.end_message + self.end_suffix, nl=self.end_nl)
class ErrorResponse(requests.Response): class ErrorResponse(requests.Response):
def __init__(self, json_data, status_code): def __init__(self, json_data, status_code):
self.json_data = json_data self.json_data = json_data
@ -112,6 +115,7 @@ class ErrorResponse(requests.Response):
def json(self): def json(self):
return self.json_data return self.json_data
def call_api(config, operation, request_uri, headers={}, params=None, data=None, files=None): def call_api(config, operation, request_uri, headers={}, params=None, data=None, files=None):
# Craft the URI # Craft the URI
uri = '{}://{}{}{}'.format( uri = '{}://{}{}{}'.format(
@ -172,7 +176,7 @@ def call_api(config, operation, request_uri, headers={}, params=None, data=None,
) )
except Exception as e: except Exception as e:
message = 'Failed to connect to the API: {}'.format(e) message = 'Failed to connect to the API: {}'.format(e)
response = ErrorResponse({'message':message}, 500) response = ErrorResponse({'message': message}, 500)
# Display debug output # Display debug output
if config['debug']: if config['debug']:
@ -183,4 +187,3 @@ def call_api(config, operation, request_uri, headers={}, params=None, data=None,
# Return the response object # Return the response object
return response return response

View File

@ -20,38 +20,39 @@
# #
############################################################################### ###############################################################################
import difflib import re
import colorama
import cli_lib.ansiprint as ansiprint import cli_lib.ansiprint as ansiprint
from cli_lib.common import call_api from cli_lib.common import call_api
def isValidMAC(macaddr): def isValidMAC(macaddr):
allowed = re.compile(r""" allowed = re.compile(r"""
( (
^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$ ^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$
) )
""", """,
re.VERBOSE|re.IGNORECASE) re.VERBOSE | re.IGNORECASE)
if allowed.match(macaddr): if allowed.match(macaddr):
return True return True
else: else:
return False return False
def isValidIP(ipaddr): def isValidIP(ipaddr):
ip4_blocks = str(ipaddr).split(".") ip4_blocks = str(ipaddr).split(".")
if len(ip4_blocks) == 4: if len(ip4_blocks) == 4:
for block in ip4_blocks: for block in ip4_blocks:
# Check if number is digit, if not checked before calling this function # Check if number is digit, if not checked before calling this function
if not block.isdigit(): if not block.isdigit():
return False return False
tmp = int(block) tmp = int(block)
if 0 > tmp > 255: if 0 > tmp > 255:
return False return False
return True return True
return False return False
# #
# Primary functions # Primary functions
# #
@ -70,6 +71,7 @@ def net_info(config, net):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def net_list(config, limit): def net_list(config, limit):
""" """
Get list information about networks (limited by {limit}) Get list information about networks (limited by {limit})
@ -89,10 +91,11 @@ def net_list(config, limit):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def net_add(config, vni, description, nettype, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, dhcp4_flag, dhcp4_start, dhcp4_end): def net_add(config, vni, description, nettype, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, dhcp4_flag, dhcp4_start, dhcp4_end):
""" """
Add new network Add new network
API endpoint: POST /api/v1/network API endpoint: POST /api/v1/network
API arguments: lots API arguments: lots
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
@ -120,10 +123,11 @@ def net_add(config, vni, description, nettype, domain, name_servers, ip4_network
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def net_modify(config, net, description, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, dhcp4_flag, dhcp4_start, dhcp4_end): def net_modify(config, net, description, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, dhcp4_flag, dhcp4_start, dhcp4_end):
""" """
Modify a network Modify a network
API endpoint: POST /api/v1/network/{net} API endpoint: POST /api/v1/network/{net}
API arguments: lots API arguments: lots
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
@ -159,10 +163,11 @@ def net_modify(config, net, description, domain, name_servers, ip4_network, ip4_
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def net_remove(config, net): def net_remove(config, net):
""" """
Remove a network Remove a network
API endpoint: DELETE /api/v1/network/{net} API endpoint: DELETE /api/v1/network/{net}
API arguments: API arguments:
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
@ -176,6 +181,7 @@ def net_remove(config, net):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
# #
# DHCP lease functions # DHCP lease functions
# #
@ -194,6 +200,7 @@ def net_dhcp_info(config, net, mac):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def net_dhcp_list(config, net, limit, only_static=False): def net_dhcp_list(config, net, limit, only_static=False):
""" """
Get list information about leases (limited by {limit}) Get list information about leases (limited by {limit})
@ -218,10 +225,11 @@ def net_dhcp_list(config, net, limit, only_static=False):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def net_dhcp_add(config, net, ipaddr, macaddr, hostname): def net_dhcp_add(config, net, ipaddr, macaddr, hostname):
""" """
Add new network DHCP lease Add new network DHCP lease
API endpoint: POST /api/v1/network/{net}/lease API endpoint: POST /api/v1/network/{net}/lease
API arguments: macaddress=macaddr, ipaddress=ipaddr, hostname=hostname API arguments: macaddress=macaddr, ipaddress=ipaddr, hostname=hostname
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
@ -240,10 +248,11 @@ def net_dhcp_add(config, net, ipaddr, macaddr, hostname):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def net_dhcp_remove(config, net, mac): def net_dhcp_remove(config, net, mac):
""" """
Remove a network DHCP lease Remove a network DHCP lease
API endpoint: DELETE /api/v1/network/{vni}/lease/{mac} API endpoint: DELETE /api/v1/network/{vni}/lease/{mac}
API arguments: API arguments:
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
@ -257,6 +266,7 @@ def net_dhcp_remove(config, net, mac):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
# #
# ACL functions # ACL functions
# #
@ -275,6 +285,7 @@ def net_acl_info(config, net, description):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def net_acl_list(config, net, limit, direction): def net_acl_list(config, net, limit, direction):
""" """
Get list information about ACLs (limited by {limit}) Get list information about ACLs (limited by {limit})
@ -296,10 +307,11 @@ def net_acl_list(config, net, limit, direction):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def net_acl_add(config, net, direction, description, rule, order): def net_acl_add(config, net, direction, description, rule, order):
""" """
Add new network acl Add new network acl
API endpoint: POST /api/v1/network/{net}/acl API endpoint: POST /api/v1/network/{net}/acl
API arguments: description=description, direction=direction, order=order, rule=rule API arguments: description=description, direction=direction, order=order, rule=rule
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
@ -320,10 +332,12 @@ def net_acl_add(config, net, direction, description, rule, order):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def net_acl_remove(config, net, description): def net_acl_remove(config, net, description):
""" """
Remove a network ACL Remove a network ACL
API endpoint: DELETE /api/v1/network/{vni}/acl/{description} API endpoint: DELETE /api/v1/network/{vni}/acl/{description}
API arguments: API arguments:
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
@ -362,6 +376,7 @@ def getOutputColours(network_information):
return v6_flag_colour, v4_flag_colour, dhcp6_flag_colour, dhcp4_flag_colour return v6_flag_colour, v4_flag_colour, dhcp6_flag_colour, dhcp4_flag_colour
def format_info(config, network_information, long_output): def format_info(config, network_information, long_output):
if not network_information: if not network_information:
return "No network found" return "No network found"
@ -420,13 +435,14 @@ def format_info(config, network_information, long_output):
# Join it all together # Join it all together
return '\n'.join(ainformation) return '\n'.join(ainformation)
def format_list(config, network_list): def format_list(config, network_list):
if not network_list: if not network_list:
return "No network found" return "No network found"
# Handle single-element lists # Handle single-element lists
if not isinstance(network_list, list): if not isinstance(network_list, list):
network_list = [ network_list ] network_list = [network_list]
network_list_output = [] network_list_output = []
@ -464,25 +480,24 @@ def format_list(config, network_list):
{net_v4_flag: <{net_v4_flag_length}} \ {net_v4_flag: <{net_v4_flag_length}} \
{net_dhcp4_flag: <{net_dhcp4_flag_length}} \ {net_dhcp4_flag: <{net_dhcp4_flag_length}} \
{end_bold}'.format( {end_bold}'.format(
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
net_vni_length=net_vni_length, net_vni_length=net_vni_length,
net_description_length=net_description_length, net_description_length=net_description_length,
net_nettype_length=net_nettype_length, net_nettype_length=net_nettype_length,
net_domain_length=net_domain_length, net_domain_length=net_domain_length,
net_v6_flag_length=net_v6_flag_length, net_v6_flag_length=net_v6_flag_length,
net_dhcp6_flag_length=net_dhcp6_flag_length, net_dhcp6_flag_length=net_dhcp6_flag_length,
net_v4_flag_length=net_v4_flag_length, net_v4_flag_length=net_v4_flag_length,
net_dhcp4_flag_length=net_dhcp4_flag_length, net_dhcp4_flag_length=net_dhcp4_flag_length,
net_vni='VNI', net_vni='VNI',
net_description='Description', net_description='Description',
net_nettype='Type', net_nettype='Type',
net_domain='Domain', net_domain='Domain',
net_v6_flag='IPv6', net_v6_flag='IPv6',
net_dhcp6_flag='DHCPv6', net_dhcp6_flag='DHCPv6',
net_v4_flag='IPv4', net_v4_flag='IPv4',
net_dhcp4_flag='DHCPv4', net_dhcp4_flag='DHCPv4')
)
) )
for network_information in network_list: for network_information in network_list:
@ -497,13 +512,7 @@ def format_list(config, network_list):
else: else:
v6_flag = 'False' v6_flag = 'False'
if network_information['ip4']['dhcp_flag'] == "True": network_list_output.append('{bold}\
dhcp4_range = '{} - {}'.format(network_information['ip4']['dhcp_start'], network_information['ip4']['dhcp_end'])
else:
dhcp4_range = 'N/A'
network_list_output.append(
'{bold}\
{net_vni: <{net_vni_length}} \ {net_vni: <{net_vni_length}} \
{net_description: <{net_description_length}} \ {net_description: <{net_description_length}} \
{net_nettype: <{net_nettype_length}} \ {net_nettype: <{net_nettype_length}} \
@ -513,34 +522,34 @@ def format_list(config, network_list):
{v4_flag_colour}{net_v4_flag: <{net_v4_flag_length}}{colour_off} \ {v4_flag_colour}{net_v4_flag: <{net_v4_flag_length}}{colour_off} \
{dhcp4_flag_colour}{net_dhcp4_flag: <{net_dhcp4_flag_length}}{colour_off} \ {dhcp4_flag_colour}{net_dhcp4_flag: <{net_dhcp4_flag_length}}{colour_off} \
{end_bold}'.format( {end_bold}'.format(
bold='', bold='',
end_bold='', end_bold='',
net_vni_length=net_vni_length, net_vni_length=net_vni_length,
net_description_length=net_description_length, net_description_length=net_description_length,
net_nettype_length=net_nettype_length, net_nettype_length=net_nettype_length,
net_domain_length=net_domain_length, net_domain_length=net_domain_length,
net_v6_flag_length=net_v6_flag_length, net_v6_flag_length=net_v6_flag_length,
net_dhcp6_flag_length=net_dhcp6_flag_length, net_dhcp6_flag_length=net_dhcp6_flag_length,
net_v4_flag_length=net_v4_flag_length, net_v4_flag_length=net_v4_flag_length,
net_dhcp4_flag_length=net_dhcp4_flag_length, net_dhcp4_flag_length=net_dhcp4_flag_length,
net_vni=network_information['vni'], net_vni=network_information['vni'],
net_description=network_information['description'], net_description=network_information['description'],
net_nettype=network_information['type'], net_nettype=network_information['type'],
net_domain=network_information['domain'], net_domain=network_information['domain'],
net_v6_flag=v6_flag, net_v6_flag=v6_flag,
v6_flag_colour=v6_flag_colour, v6_flag_colour=v6_flag_colour,
net_dhcp6_flag=network_information['ip6']['dhcp_flag'], net_dhcp6_flag=network_information['ip6']['dhcp_flag'],
dhcp6_flag_colour=dhcp6_flag_colour, dhcp6_flag_colour=dhcp6_flag_colour,
net_v4_flag=v4_flag, net_v4_flag=v4_flag,
v4_flag_colour=v4_flag_colour, v4_flag_colour=v4_flag_colour,
net_dhcp4_flag=network_information['ip4']['dhcp_flag'], net_dhcp4_flag=network_information['ip4']['dhcp_flag'],
dhcp4_flag_colour=dhcp4_flag_colour, dhcp4_flag_colour=dhcp4_flag_colour,
colour_off=ansiprint.end() colour_off=ansiprint.end())
)
) )
return '\n'.join(sorted(network_list_output)) return '\n'.join(sorted(network_list_output))
def format_list_dhcp(dhcp_lease_list): def format_list_dhcp(dhcp_lease_list):
dhcp_lease_list_output = [] dhcp_lease_list_output = []
@ -570,17 +579,16 @@ def format_list_dhcp(dhcp_lease_list):
{lease_mac_address: <{lease_mac_address_length}} \ {lease_mac_address: <{lease_mac_address_length}} \
{lease_timestamp: <{lease_timestamp_length}} \ {lease_timestamp: <{lease_timestamp_length}} \
{end_bold}'.format( {end_bold}'.format(
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
lease_hostname_length=lease_hostname_length, lease_hostname_length=lease_hostname_length,
lease_ip4_address_length=lease_ip4_address_length, lease_ip4_address_length=lease_ip4_address_length,
lease_mac_address_length=lease_mac_address_length, lease_mac_address_length=lease_mac_address_length,
lease_timestamp_length=lease_timestamp_length, lease_timestamp_length=lease_timestamp_length,
lease_hostname='Hostname', lease_hostname='Hostname',
lease_ip4_address='IP Address', lease_ip4_address='IP Address',
lease_mac_address='MAC Address', lease_mac_address='MAC Address',
lease_timestamp='Timestamp' lease_timestamp='Timestamp')
)
) )
for dhcp_lease_information in dhcp_lease_list: for dhcp_lease_information in dhcp_lease_list:
@ -590,28 +598,28 @@ def format_list_dhcp(dhcp_lease_list):
{lease_mac_address: <{lease_mac_address_length}} \ {lease_mac_address: <{lease_mac_address_length}} \
{lease_timestamp: <{lease_timestamp_length}} \ {lease_timestamp: <{lease_timestamp_length}} \
{end_bold}'.format( {end_bold}'.format(
bold='', bold='',
end_bold='', end_bold='',
lease_hostname_length=lease_hostname_length, lease_hostname_length=lease_hostname_length,
lease_ip4_address_length=lease_ip4_address_length, lease_ip4_address_length=lease_ip4_address_length,
lease_mac_address_length=lease_mac_address_length, lease_mac_address_length=lease_mac_address_length,
lease_timestamp_length=12, lease_timestamp_length=12,
lease_hostname=str(dhcp_lease_information['hostname']), lease_hostname=str(dhcp_lease_information['hostname']),
lease_ip4_address=str(dhcp_lease_information['ip4_address']), lease_ip4_address=str(dhcp_lease_information['ip4_address']),
lease_mac_address=str(dhcp_lease_information['mac_address']), lease_mac_address=str(dhcp_lease_information['mac_address']),
lease_timestamp=str(dhcp_lease_information['timestamp']) lease_timestamp=str(dhcp_lease_information['timestamp']))
)
) )
return '\n'.join(sorted(dhcp_lease_list_output)) return '\n'.join(sorted(dhcp_lease_list_output))
def format_list_acl(acl_list): def format_list_acl(acl_list):
# Handle when we get an empty entry # Handle when we get an empty entry
if not acl_list: if not acl_list:
acl_list = list() acl_list = list()
# Handle when we get a single entry # Handle when we get a single entry
if isinstance(acl_list, dict): if isinstance(acl_list, dict):
acl_list = [ acl_list ] acl_list = [acl_list]
acl_list_output = [] acl_list_output = []
@ -641,17 +649,16 @@ def format_list_acl(acl_list):
{acl_description: <{acl_description_length}} \ {acl_description: <{acl_description_length}} \
{acl_rule: <{acl_rule_length}} \ {acl_rule: <{acl_rule_length}} \
{end_bold}'.format( {end_bold}'.format(
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
acl_direction_length=acl_direction_length, acl_direction_length=acl_direction_length,
acl_order_length=acl_order_length, acl_order_length=acl_order_length,
acl_description_length=acl_description_length, acl_description_length=acl_description_length,
acl_rule_length=acl_rule_length, acl_rule_length=acl_rule_length,
acl_direction='Direction', acl_direction='Direction',
acl_order='Order', acl_order='Order',
acl_description='Description', acl_description='Description',
acl_rule='Rule', acl_rule='Rule')
)
) )
for acl_information in acl_list: for acl_information in acl_list:
@ -661,17 +668,16 @@ def format_list_acl(acl_list):
{acl_description: <{acl_description_length}} \ {acl_description: <{acl_description_length}} \
{acl_rule: <{acl_rule_length}} \ {acl_rule: <{acl_rule_length}} \
{end_bold}'.format( {end_bold}'.format(
bold='', bold='',
end_bold='', end_bold='',
acl_direction_length=acl_direction_length, acl_direction_length=acl_direction_length,
acl_order_length=acl_order_length, acl_order_length=acl_order_length,
acl_description_length=acl_description_length, acl_description_length=acl_description_length,
acl_rule_length=acl_rule_length, acl_rule_length=acl_rule_length,
acl_direction=acl_information['direction'], acl_direction=acl_information['direction'],
acl_order=acl_information['order'], acl_order=acl_information['order'],
acl_description=acl_information['description'], acl_description=acl_information['description'],
acl_rule=acl_information['rule'], acl_rule=acl_information['rule'])
)
) )
return '\n'.join(sorted(acl_list_output)) return '\n'.join(sorted(acl_list_output))

View File

@ -23,6 +23,7 @@
import cli_lib.ansiprint as ansiprint import cli_lib.ansiprint as ansiprint
from cli_lib.common import call_api from cli_lib.common import call_api
# #
# Primary functions # Primary functions
# #
@ -34,7 +35,7 @@ def node_coordinator_state(config, node, action):
API arguments: action={action} API arguments: action={action}
API schema: {"message": "{data}"} API schema: {"message": "{data}"}
""" """
params={ params = {
'state': action 'state': action
} }
response = call_api(config, 'post', '/node/{node}/coordinator-state'.format(node=node), params=params) response = call_api(config, 'post', '/node/{node}/coordinator-state'.format(node=node), params=params)
@ -46,6 +47,7 @@ def node_coordinator_state(config, node, action):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def node_domain_state(config, node, action, wait): def node_domain_state(config, node, action, wait):
""" """
Set node domain state state (flush/ready) Set node domain state state (flush/ready)
@ -54,7 +56,7 @@ def node_domain_state(config, node, action, wait):
API arguments: action={action}, wait={wait} API arguments: action={action}, wait={wait}
API schema: {"message": "{data}"} API schema: {"message": "{data}"}
""" """
params={ params = {
'state': action, 'state': action,
'wait': str(wait).lower() 'wait': str(wait).lower()
} }
@ -67,6 +69,7 @@ def node_domain_state(config, node, action, wait):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def node_info(config, node): def node_info(config, node):
""" """
Get information about node Get information about node
@ -82,6 +85,7 @@ def node_info(config, node):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def node_list(config, limit, target_daemon_state, target_coordinator_state, target_domain_state): def node_list(config, limit, target_daemon_state, target_coordinator_state, target_domain_state):
""" """
Get list information about nodes (limited by {limit}) Get list information about nodes (limited by {limit})
@ -107,6 +111,7 @@ def node_list(config, limit, target_daemon_state, target_coordinator_state, targ
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
# #
# Output display functions # Output display functions
# #
@ -148,6 +153,7 @@ def getOutputColours(node_information):
return daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour return daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour
def format_info(node_information, long_output): def format_info(node_information, long_output):
daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour = getOutputColours(node_information) daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour = getOutputColours(node_information)
@ -178,10 +184,11 @@ def format_info(node_information, long_output):
ainformation.append('') ainformation.append('')
return '\n'.join(ainformation) return '\n'.join(ainformation)
def format_list(node_list, raw): def format_list(node_list, raw):
# Handle single-element lists # Handle single-element lists
if not isinstance(node_list, list): if not isinstance(node_list, list):
node_list = [ node_list ] node_list = [node_list]
if raw: if raw:
ainformation = list() ainformation = list()
@ -293,7 +300,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
node_mem_provisioned='Prov' node_mem_provisioned='Prov'
) )
) )
# Format the string (elements) # Format the string (elements)
for node_information in node_list: for node_information in node_list:
daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour = getOutputColours(node_information) daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour = getOutputColours(node_information)

View File

@ -20,9 +20,6 @@
# #
############################################################################### ###############################################################################
import time
import re
import subprocess
import ast import ast
from requests_toolbelt.multipart.encoder import MultipartEncoder, MultipartEncoderMonitor from requests_toolbelt.multipart.encoder import MultipartEncoder, MultipartEncoderMonitor
@ -30,6 +27,7 @@ from requests_toolbelt.multipart.encoder import MultipartEncoder, MultipartEncod
import cli_lib.ansiprint as ansiprint import cli_lib.ansiprint as ansiprint
from cli_lib.common import UploadProgressBar, call_api from cli_lib.common import UploadProgressBar, call_api
# #
# Primary functions # Primary functions
# #
@ -48,6 +46,7 @@ def template_info(config, template, template_type):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def template_list(config, limit, template_type=None): def template_list(config, limit, template_type=None):
""" """
Get list information about templates (limited by {limit}) Get list information about templates (limited by {limit})
@ -70,6 +69,7 @@ def template_list(config, limit, template_type=None):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def template_add(config, params, template_type=None): def template_add(config, params, template_type=None):
""" """
Add a new template of {template_type} with {params} Add a new template of {template_type} with {params}
@ -84,9 +84,10 @@ def template_add(config, params, template_type=None):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def template_modify(config, params, name, template_type): def template_modify(config, params, name, template_type):
""" """
Modify an existing template of {template_type} with {params} Modify an existing template of {template_type} with {params}
@ -101,9 +102,10 @@ def template_modify(config, params, name, template_type):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def template_remove(config, name, template_type): def template_remove(config, name, template_type):
""" """
Remove template {name} of {template_type} Remove template {name} of {template_type}
@ -118,9 +120,10 @@ def template_remove(config, name, template_type):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def template_element_add(config, name, element_id, params, element_type=None, template_type=None): def template_element_add(config, name, element_id, params, element_type=None, template_type=None):
""" """
Add a new template element of {element_type} with {params} to template {name} of {template_type} Add a new template element of {element_type} with {params} to template {name} of {template_type}
@ -135,9 +138,10 @@ def template_element_add(config, name, element_id, params, element_type=None, te
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def template_element_remove(config, name, element_id, element_type=None, template_type=None): def template_element_remove(config, name, element_id, element_type=None, template_type=None):
""" """
Remove template element {element_id} of {element_type} from template {name} of {template_type} Remove template element {element_id} of {element_type} from template {name} of {template_type}
@ -152,9 +156,10 @@ def template_element_remove(config, name, element_id, element_type=None, templat
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def userdata_info(config, userdata): def userdata_info(config, userdata):
""" """
Get information about userdata Get information about userdata
@ -170,6 +175,7 @@ def userdata_info(config, userdata):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def userdata_list(config, limit): def userdata_list(config, limit):
""" """
Get list information about userdatas (limited by {limit}) Get list information about userdatas (limited by {limit})
@ -189,6 +195,7 @@ def userdata_list(config, limit):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def userdata_show(config, name): def userdata_show(config, name):
""" """
Get information about userdata name Get information about userdata name
@ -204,6 +211,7 @@ def userdata_show(config, name):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def userdata_add(config, params): def userdata_add(config, params):
""" """
Add a new userdata with {params} Add a new userdata with {params}
@ -227,9 +235,10 @@ def userdata_add(config, params):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def userdata_modify(config, name, params): def userdata_modify(config, name, params):
""" """
Modify userdata {name} with {params} Modify userdata {name} with {params}
@ -252,9 +261,10 @@ def userdata_modify(config, name, params):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def userdata_remove(config, name): def userdata_remove(config, name):
""" """
Remove userdata {name} Remove userdata {name}
@ -269,9 +279,10 @@ def userdata_remove(config, name):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def script_info(config, script): def script_info(config, script):
""" """
Get information about script Get information about script
@ -287,6 +298,7 @@ def script_info(config, script):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def script_list(config, limit): def script_list(config, limit):
""" """
Get list information about scripts (limited by {limit}) Get list information about scripts (limited by {limit})
@ -306,6 +318,7 @@ def script_list(config, limit):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def script_show(config, name): def script_show(config, name):
""" """
Get information about script name Get information about script name
@ -321,6 +334,7 @@ def script_show(config, name):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def script_add(config, params): def script_add(config, params):
""" """
Add a new script with {params} Add a new script with {params}
@ -344,9 +358,10 @@ def script_add(config, params):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def script_modify(config, name, params): def script_modify(config, name, params):
""" """
Modify script {name} with {params} Modify script {name} with {params}
@ -369,9 +384,10 @@ def script_modify(config, name, params):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def script_remove(config, name): def script_remove(config, name):
""" """
Remove script {name} Remove script {name}
@ -386,9 +402,10 @@ def script_remove(config, name):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def ova_info(config, name): def ova_info(config, name):
""" """
Get information about OVA image {name} Get information about OVA image {name}
@ -404,6 +421,7 @@ def ova_info(config, name):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ova_list(config, limit): def ova_list(config, limit):
""" """
Get list information about OVA images (limited by {limit}) Get list information about OVA images (limited by {limit})
@ -423,6 +441,7 @@ def ova_list(config, limit):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def ova_upload(config, name, ova_file, params): def ova_upload(config, name, ova_file, params):
""" """
Upload an OVA image to the cluster Upload an OVA image to the cluster
@ -435,7 +454,7 @@ def ova_upload(config, name, ova_file, params):
bar = UploadProgressBar(ova_file, end_message="Parsing file on remote side...", end_nl=False) bar = UploadProgressBar(ova_file, end_message="Parsing file on remote side...", end_nl=False)
upload_data = MultipartEncoder( upload_data = MultipartEncoder(
fields={ 'file': ('filename', open(ova_file, 'rb'), 'application/octet-stream')} fields={'file': ('filename', open(ova_file, 'rb'), 'application/octet-stream')}
) )
upload_monitor = MultipartEncoderMonitor(upload_data, bar.update) upload_monitor = MultipartEncoderMonitor(upload_data, bar.update)
@ -455,6 +474,7 @@ def ova_upload(config, name, ova_file, params):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def ova_remove(config, name): def ova_remove(config, name):
""" """
Remove OVA image {name} Remove OVA image {name}
@ -469,9 +489,10 @@ def ova_remove(config, name):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def profile_info(config, profile): def profile_info(config, profile):
""" """
Get information about profile Get information about profile
@ -487,6 +508,7 @@ def profile_info(config, profile):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def profile_list(config, limit): def profile_list(config, limit):
""" """
Get list information about profiles (limited by {limit}) Get list information about profiles (limited by {limit})
@ -506,6 +528,7 @@ def profile_list(config, limit):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def profile_add(config, params): def profile_add(config, params):
""" """
Add a new profile with {params} Add a new profile with {params}
@ -520,9 +543,10 @@ def profile_add(config, params):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def profile_modify(config, name, params): def profile_modify(config, name, params):
""" """
Modify profile {name} with {params} Modify profile {name} with {params}
@ -537,9 +561,10 @@ def profile_modify(config, name, params):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def profile_remove(config, name): def profile_remove(config, name):
""" """
Remove profile {name} Remove profile {name}
@ -554,9 +579,10 @@ def profile_remove(config, name):
retvalue = True retvalue = True
else: else:
retvalue = False retvalue = False
return retvalue, response.json().get('message', '') return retvalue, response.json().get('message', '')
def vm_create(config, name, profile, wait_flag, define_flag, start_flag, script_args): def vm_create(config, name, profile, wait_flag, define_flag, start_flag, script_args):
""" """
Create a new VM named {name} with profile {profile} Create a new VM named {name} with profile {profile}
@ -584,9 +610,10 @@ def vm_create(config, name, profile, wait_flag, define_flag, start_flag, script_
else: else:
retvalue = False retvalue = False
retdata = response.json().get('message', '') retdata = response.json().get('message', '')
return retvalue, retdata return retvalue, retdata
def task_status(config, task_id=None, is_watching=False): def task_status(config, task_id=None, is_watching=False):
""" """
Get information about provisioner job {task_id} or all tasks if None Get information about provisioner job {task_id} or all tasks if None
@ -661,6 +688,7 @@ def task_status(config, task_id=None, is_watching=False):
return retvalue, retdata return retvalue, retdata
# #
# Format functions # Format functions
# #
@ -671,12 +699,12 @@ def format_list_template(template_data, template_type=None):
template_type can be used to only display part of the full list, allowing function template_type can be used to only display part of the full list, allowing function
reuse with more limited output options. reuse with more limited output options.
""" """
template_types = [ 'system', 'network', 'storage' ] template_types = ['system', 'network', 'storage']
normalized_template_data = dict() normalized_template_data = dict()
ainformation = list() ainformation = list()
if template_type in template_types: if template_type in template_types:
template_types = [ template_type ] template_types = [template_type]
template_data_type = '{}_templates'.format(template_type) template_data_type = '{}_templates'.format(template_type)
normalized_template_data[template_data_type] = template_data normalized_template_data[template_data_type] = template_data
else: else:
@ -703,9 +731,10 @@ def format_list_template(template_data, template_type=None):
return '\n'.join(ainformation) return '\n'.join(ainformation)
def format_list_template_system(template_data): def format_list_template_system(template_data):
if isinstance(template_data, dict): if isinstance(template_data, dict):
template_data = [ template_data ] template_data = [template_data]
template_list_output = [] template_list_output = []
@ -779,38 +808,34 @@ Meta: {template_node_limit: <{template_node_limit_length}} \
{template_node_selector: <{template_node_selector_length}} \ {template_node_selector: <{template_node_selector_length}} \
{template_node_autostart: <{template_node_autostart_length}} \ {template_node_autostart: <{template_node_autostart_length}} \
{template_migration_method: <{template_migration_method_length}}{end_bold}'.format( {template_migration_method: <{template_migration_method_length}}{end_bold}'.format(
template_name_length=template_name_length, template_name_length=template_name_length,
template_id_length=template_id_length, template_id_length=template_id_length,
template_vcpu_length=template_vcpu_length, template_vcpu_length=template_vcpu_length,
template_vram_length=template_vram_length, template_vram_length=template_vram_length,
template_serial_length=template_serial_length, template_serial_length=template_serial_length,
template_vnc_length=template_vnc_length, template_vnc_length=template_vnc_length,
template_vnc_bind_length=template_vnc_bind_length, template_vnc_bind_length=template_vnc_bind_length,
template_node_limit_length=template_node_limit_length, template_node_limit_length=template_node_limit_length,
template_node_selector_length=template_node_selector_length, template_node_selector_length=template_node_selector_length,
template_node_autostart_length=template_node_autostart_length, template_node_autostart_length=template_node_autostart_length,
template_migration_method_length=template_migration_method_length, template_migration_method_length=template_migration_method_length,
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
template_state_colour='', template_state_colour='',
end_colour='', end_colour='',
template_name='Name', template_name='Name',
template_id='ID', template_id='ID',
template_vcpu='vCPUs', template_vcpu='vCPUs',
template_vram='vRAM [MB]', template_vram='vRAM [MB]',
template_serial='Serial', template_serial='Serial',
template_vnc='VNC', template_vnc='VNC',
template_vnc_bind='VNC bind', template_vnc_bind='VNC bind',
template_node_limit='Limit', template_node_limit='Limit',
template_node_selector='Selector', template_node_selector='Selector',
template_node_autostart='Autostart', template_node_autostart='Autostart',
template_migration_method='Migration' template_migration_method='Migration')
)
# Keep track of nets we found to be valid to cut down on duplicate API hits
valid_net_list = []
# Format the string (elements) # Format the string (elements)
for template in sorted(template_data, key=lambda i: i.get('name', None)): for template in sorted(template_data, key=lambda i: i.get('name', None)):
template_list_output.append( template_list_output.append(
'{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ '{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \
@ -854,9 +879,10 @@ Meta: {template_node_limit: <{template_node_limit_length}} \
return True, '' return True, ''
def format_list_template_network(template_template): def format_list_template_network(template_template):
if isinstance(template_template, dict): if isinstance(template_template, dict):
template_template = [ template_template ] template_template = [template_template]
template_list_output = [] template_list_output = []
@ -895,17 +921,16 @@ def format_list_template_network(template_template):
template_list_output_header = '{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ template_list_output_header = '{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \
{template_mac_template: <{template_mac_template_length}} \ {template_mac_template: <{template_mac_template_length}} \
{template_networks: <{template_networks_length}}{end_bold}'.format( {template_networks: <{template_networks_length}}{end_bold}'.format(
template_name_length=template_name_length, template_name_length=template_name_length,
template_id_length=template_id_length, template_id_length=template_id_length,
template_mac_template_length=template_mac_template_length, template_mac_template_length=template_mac_template_length,
template_networks_length=template_networks_length, template_networks_length=template_networks_length,
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
template_name='Name', template_name='Name',
template_id='ID', template_id='ID',
template_mac_template='MAC template', template_mac_template='MAC template',
template_networks='Network VNIs' template_networks='Network VNIs')
)
# Format the string (elements) # Format the string (elements)
for template in sorted(template_template, key=lambda i: i.get('name', None)): for template in sorted(template_template, key=lambda i: i.get('name', None)):
@ -928,9 +953,10 @@ def format_list_template_network(template_template):
return '\n'.join([template_list_output_header] + template_list_output) return '\n'.join([template_list_output_header] + template_list_output)
def format_list_template_storage(template_template): def format_list_template_storage(template_template):
if isinstance(template_template, dict): if isinstance(template_template, dict):
template_template = [ template_template ] template_template = [template_template]
template_list_output = [] template_list_output = []
@ -994,27 +1020,26 @@ def format_list_template_storage(template_template):
{template_disk_filesystem: <{template_disk_filesystem_length}} \ {template_disk_filesystem: <{template_disk_filesystem_length}} \
{template_disk_fsargs: <{template_disk_fsargs_length}} \ {template_disk_fsargs: <{template_disk_fsargs_length}} \
{template_disk_mountpoint: <{template_disk_mountpoint_length}}{end_bold}'.format( {template_disk_mountpoint: <{template_disk_mountpoint_length}}{end_bold}'.format(
template_name_length=template_name_length, template_name_length=template_name_length,
template_id_length=template_id_length, template_id_length=template_id_length,
template_disk_id_length=template_disk_id_length, template_disk_id_length=template_disk_id_length,
template_disk_pool_length=template_disk_pool_length, template_disk_pool_length=template_disk_pool_length,
template_disk_source_length=template_disk_source_length, template_disk_source_length=template_disk_source_length,
template_disk_size_length=template_disk_size_length, template_disk_size_length=template_disk_size_length,
template_disk_filesystem_length=template_disk_filesystem_length, template_disk_filesystem_length=template_disk_filesystem_length,
template_disk_fsargs_length=template_disk_fsargs_length, template_disk_fsargs_length=template_disk_fsargs_length,
template_disk_mountpoint_length=template_disk_mountpoint_length, template_disk_mountpoint_length=template_disk_mountpoint_length,
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
template_name='Name', template_name='Name',
template_id='ID', template_id='ID',
template_disk_id='Disk ID', template_disk_id='Disk ID',
template_disk_pool='Pool', template_disk_pool='Pool',
template_disk_source='Source Volume', template_disk_source='Source Volume',
template_disk_size='Size [GB]', template_disk_size='Size [GB]',
template_disk_filesystem='Filesystem', template_disk_filesystem='Filesystem',
template_disk_fsargs='Arguments', template_disk_fsargs='Arguments',
template_disk_mountpoint='Mountpoint' template_disk_mountpoint='Mountpoint')
)
# Format the string (elements) # Format the string (elements)
for template in sorted(template_template, key=lambda i: i.get('name', None)): for template in sorted(template_template, key=lambda i: i.get('name', None)):
@ -1063,16 +1088,16 @@ def format_list_template_storage(template_template):
return '\n'.join([template_list_output_header] + template_list_output) return '\n'.join([template_list_output_header] + template_list_output)
def format_list_userdata(userdata_data, lines=None): def format_list_userdata(userdata_data, lines=None):
if isinstance(userdata_data, dict): if isinstance(userdata_data, dict):
userdata_data = [ userdata_data ] userdata_data = [userdata_data]
userdata_list_output = [] userdata_list_output = []
# Determine optimal column widths # Determine optimal column widths
userdata_name_length = 5 userdata_name_length = 5
userdata_id_length = 3 userdata_id_length = 3
userdata_useruserdata_length = 8
for userdata in userdata_data: for userdata in userdata_data:
# userdata_name column # userdata_name column
@ -1087,14 +1112,13 @@ def format_list_userdata(userdata_data, lines=None):
# Format the string (header) # Format the string (header)
userdata_list_output_header = '{bold}{userdata_name: <{userdata_name_length}} {userdata_id: <{userdata_id_length}} \ userdata_list_output_header = '{bold}{userdata_name: <{userdata_name_length}} {userdata_id: <{userdata_id_length}} \
{userdata_data}{end_bold}'.format( {userdata_data}{end_bold}'.format(
userdata_name_length=userdata_name_length, userdata_name_length=userdata_name_length,
userdata_id_length=userdata_id_length, userdata_id_length=userdata_id_length,
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
userdata_name='Name', userdata_name='Name',
userdata_id='ID', userdata_id='ID',
userdata_data='Document' userdata_data='Document')
)
# Format the string (elements) # Format the string (elements)
for data in sorted(userdata_data, key=lambda i: i.get('name', None)): for data in sorted(userdata_data, key=lambda i: i.get('name', None)):
@ -1138,16 +1162,16 @@ def format_list_userdata(userdata_data, lines=None):
return '\n'.join([userdata_list_output_header] + userdata_list_output) return '\n'.join([userdata_list_output_header] + userdata_list_output)
def format_list_script(script_data, lines=None): def format_list_script(script_data, lines=None):
if isinstance(script_data, dict): if isinstance(script_data, dict):
script_data = [ script_data ] script_data = [script_data]
script_list_output = [] script_list_output = []
# Determine optimal column widths # Determine optimal column widths
script_name_length = 5 script_name_length = 5
script_id_length = 3 script_id_length = 3
script_script_length = 8
for script in script_data: for script in script_data:
# script_name column # script_name column
@ -1162,14 +1186,13 @@ def format_list_script(script_data, lines=None):
# Format the string (header) # Format the string (header)
script_list_output_header = '{bold}{script_name: <{script_name_length}} {script_id: <{script_id_length}} \ script_list_output_header = '{bold}{script_name: <{script_name_length}} {script_id: <{script_id_length}} \
{script_data}{end_bold}'.format( {script_data}{end_bold}'.format(
script_name_length=script_name_length, script_name_length=script_name_length,
script_id_length=script_id_length, script_id_length=script_id_length,
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
script_name='Name', script_name='Name',
script_id='ID', script_id='ID',
script_data='Script' script_data='Script')
)
# Format the string (elements) # Format the string (elements)
for script in sorted(script_data, key=lambda i: i.get('name', None)): for script in sorted(script_data, key=lambda i: i.get('name', None)):
@ -1213,9 +1236,10 @@ def format_list_script(script_data, lines=None):
return '\n'.join([script_list_output_header] + script_list_output) return '\n'.join([script_list_output_header] + script_list_output)
def format_list_ova(ova_data): def format_list_ova(ova_data):
if isinstance(ova_data, dict): if isinstance(ova_data, dict):
ova_data = [ ova_data ] ova_data = [ova_data]
ova_list_output = [] ova_list_output = []
@ -1267,23 +1291,22 @@ def format_list_ova(ova_data):
{ova_disk_pool: <{ova_disk_pool_length}} \ {ova_disk_pool: <{ova_disk_pool_length}} \
{ova_disk_volume_format: <{ova_disk_volume_format_length}} \ {ova_disk_volume_format: <{ova_disk_volume_format_length}} \
{ova_disk_volume_name: <{ova_disk_volume_name_length}}{end_bold}'.format( {ova_disk_volume_name: <{ova_disk_volume_name_length}}{end_bold}'.format(
ova_name_length=ova_name_length, ova_name_length=ova_name_length,
ova_id_length=ova_id_length, ova_id_length=ova_id_length,
ova_disk_id_length=ova_disk_id_length, ova_disk_id_length=ova_disk_id_length,
ova_disk_pool_length=ova_disk_pool_length, ova_disk_pool_length=ova_disk_pool_length,
ova_disk_size_length=ova_disk_size_length, ova_disk_size_length=ova_disk_size_length,
ova_disk_volume_format_length=ova_disk_volume_format_length, ova_disk_volume_format_length=ova_disk_volume_format_length,
ova_disk_volume_name_length=ova_disk_volume_name_length, ova_disk_volume_name_length=ova_disk_volume_name_length,
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
ova_name='Name', ova_name='Name',
ova_id='ID', ova_id='ID',
ova_disk_id='Disk ID', ova_disk_id='Disk ID',
ova_disk_size='Size [GB]', ova_disk_size='Size [GB]',
ova_disk_pool='Pool', ova_disk_pool='Pool',
ova_disk_volume_format='Format', ova_disk_volume_format='Format',
ova_disk_volume_name='Source Volume', ova_disk_volume_name='Source Volume')
)
# Format the string (elements) # Format the string (elements)
for ova in sorted(ova_data, key=lambda i: i.get('name', None)): for ova in sorted(ova_data, key=lambda i: i.get('name', None)):
@ -1326,9 +1349,10 @@ def format_list_ova(ova_data):
return '\n'.join([ova_list_output_header] + ova_list_output) return '\n'.join([ova_list_output_header] + ova_list_output)
def format_list_profile(profile_data): def format_list_profile(profile_data):
if isinstance(profile_data, dict): if isinstance(profile_data, dict):
profile_data = [ profile_data ] profile_data = [profile_data]
# Format the profile "source" from the type and, if applicable, OVA profile name # Format the profile "source" from the type and, if applicable, OVA profile name
for profile in profile_data: for profile in profile_data:
@ -1395,26 +1419,25 @@ Templates: {profile_system_template: <{profile_system_template_length}} \
Data: {profile_userdata: <{profile_userdata_length}} \ Data: {profile_userdata: <{profile_userdata_length}} \
{profile_script: <{profile_script_length}} \ {profile_script: <{profile_script_length}} \
{profile_arguments}{end_bold}'.format( {profile_arguments}{end_bold}'.format(
profile_name_length=profile_name_length, profile_name_length=profile_name_length,
profile_id_length=profile_id_length, profile_id_length=profile_id_length,
profile_source_length=profile_source_length, profile_source_length=profile_source_length,
profile_system_template_length=profile_system_template_length, profile_system_template_length=profile_system_template_length,
profile_network_template_length=profile_network_template_length, profile_network_template_length=profile_network_template_length,
profile_storage_template_length=profile_storage_template_length, profile_storage_template_length=profile_storage_template_length,
profile_userdata_length=profile_userdata_length, profile_userdata_length=profile_userdata_length,
profile_script_length=profile_script_length, profile_script_length=profile_script_length,
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
profile_name='Name', profile_name='Name',
profile_id='ID', profile_id='ID',
profile_source='Source', profile_source='Source',
profile_system_template='System', profile_system_template='System',
profile_network_template='Network', profile_network_template='Network',
profile_storage_template='Storage', profile_storage_template='Storage',
profile_userdata='Userdata', profile_userdata='Userdata',
profile_script='Script', profile_script='Script',
profile_arguments='Script Arguments' profile_arguments='Script Arguments')
)
# Format the string (elements) # Format the string (elements)
for profile in sorted(profile_data, key=lambda i: i.get('name', None)): for profile in sorted(profile_data, key=lambda i: i.get('name', None)):
@ -1450,6 +1473,7 @@ Data: {profile_userdata: <{profile_userdata_length}} \
return '\n'.join([profile_list_output_header] + profile_list_output) return '\n'.join([profile_list_output_header] + profile_list_output)
def format_list_task(task_data): def format_list_task(task_data):
task_list_output = [] task_list_output = []
@ -1499,23 +1523,22 @@ VM: {task_vm_name: <{task_vm_name_length}} \
{task_vm_profile: <{task_vm_profile_length}} \ {task_vm_profile: <{task_vm_profile_length}} \
{task_vm_define: <{task_vm_define_length}} \ {task_vm_define: <{task_vm_define_length}} \
{task_vm_start: <{task_vm_start_length}}{end_bold}'.format( {task_vm_start: <{task_vm_start_length}}{end_bold}'.format(
task_id_length=task_id_length, task_id_length=task_id_length,
task_type_length=task_type_length, task_type_length=task_type_length,
task_worker_length=task_worker_length, task_worker_length=task_worker_length,
task_vm_name_length=task_vm_name_length, task_vm_name_length=task_vm_name_length,
task_vm_profile_length=task_vm_profile_length, task_vm_profile_length=task_vm_profile_length,
task_vm_define_length=task_vm_define_length, task_vm_define_length=task_vm_define_length,
task_vm_start_length=task_vm_start_length, task_vm_start_length=task_vm_start_length,
bold=ansiprint.bold(), bold=ansiprint.bold(),
end_bold=ansiprint.end(), end_bold=ansiprint.end(),
task_id='Job ID', task_id='Job ID',
task_type='Status', task_type='Status',
task_worker='Worker', task_worker='Worker',
task_vm_name='Name', task_vm_name='Name',
task_vm_profile='Profile', task_vm_profile='Profile',
task_vm_define='Define?', task_vm_define='Define?',
task_vm_start='Start?' task_vm_start='Start?')
)
# Format the string (elements) # Format the string (elements)
for task in sorted(task_data, key=lambda i: i.get('type', None)): for task in sorted(task_data, key=lambda i: i.get('type', None)):

View File

@ -22,14 +22,11 @@
import time import time
import re import re
import subprocess
from collections import deque
import cli_lib.ansiprint as ansiprint import cli_lib.ansiprint as ansiprint
import cli_lib.ceph as ceph
from cli_lib.common import call_api, format_bytes, format_metric from cli_lib.common import call_api, format_bytes, format_metric
# #
# Primary functions # Primary functions
# #
@ -57,6 +54,7 @@ def vm_info(config, vm):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def vm_list(config, limit, target_node, target_state): def vm_list(config, limit, target_node, target_state):
""" """
Get list information about VMs (limited by {limit}, {target_node}, or {target_state}) Get list information about VMs (limited by {limit}, {target_node}, or {target_state})
@ -80,6 +78,7 @@ def vm_list(config, limit, target_node, target_state):
else: else:
return False, response.json().get('message', '') return False, response.json().get('message', '')
def vm_define(config, xml, node, node_limit, node_selector, node_autostart, migration_method): def vm_define(config, xml, node, node_limit, node_selector, node_autostart, migration_method):
""" """
Define a new VM on the cluster Define a new VM on the cluster
@ -107,6 +106,7 @@ def vm_define(config, xml, node, node_limit, node_selector, node_autostart, migr
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def vm_modify(config, vm, xml, restart): def vm_modify(config, vm, xml, restart):
""" """
Modify the configuration of VM Modify the configuration of VM
@ -130,6 +130,7 @@ def vm_modify(config, vm, xml, restart):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def vm_metadata(config, vm, node_limit, node_selector, node_autostart, migration_method, provisioner_profile): def vm_metadata(config, vm, node_limit, node_selector, node_autostart, migration_method, provisioner_profile):
""" """
Modify PVC metadata of a VM Modify PVC metadata of a VM
@ -166,6 +167,7 @@ def vm_metadata(config, vm, node_limit, node_selector, node_autostart, migration
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def vm_remove(config, vm, delete_disks=False): def vm_remove(config, vm, delete_disks=False):
""" """
Remove a VM Remove a VM
@ -174,7 +176,7 @@ def vm_remove(config, vm, delete_disks=False):
API arguments: delete_disks={delete_disks} API arguments: delete_disks={delete_disks}
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
""" """
params={ params = {
'delete_disks': delete_disks 'delete_disks': delete_disks
} }
response = call_api(config, 'delete', '/vm/{vm}'.format(vm=vm), params=params) response = call_api(config, 'delete', '/vm/{vm}'.format(vm=vm), params=params)
@ -186,6 +188,7 @@ def vm_remove(config, vm, delete_disks=False):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def vm_state(config, vm, target_state, wait=False): def vm_state(config, vm, target_state, wait=False):
""" """
Modify the current state of VM Modify the current state of VM
@ -194,7 +197,7 @@ def vm_state(config, vm, target_state, wait=False):
API arguments: state={state}, wait={wait} API arguments: state={state}, wait={wait}
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
""" """
params={ params = {
'state': target_state, 'state': target_state,
'wait': str(wait).lower() 'wait': str(wait).lower()
} }
@ -207,6 +210,7 @@ def vm_state(config, vm, target_state, wait=False):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def vm_node(config, vm, target_node, action, force=False, wait=False, force_live=False): def vm_node(config, vm, target_node, action, force=False, wait=False, force_live=False):
""" """
Modify the current node of VM via {action} Modify the current node of VM via {action}
@ -215,7 +219,7 @@ def vm_node(config, vm, target_node, action, force=False, wait=False, force_live
API arguments: node={target_node}, action={action}, force={force}, wait={wait}, force_live={force_live} API arguments: node={target_node}, action={action}, force={force}, wait={wait}, force_live={force_live}
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
""" """
params={ params = {
'node': target_node, 'node': target_node,
'action': action, 'action': action,
'force': str(force).lower(), 'force': str(force).lower(),
@ -231,6 +235,7 @@ def vm_node(config, vm, target_node, action, force=False, wait=False, force_live
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def vm_locks(config, vm): def vm_locks(config, vm):
""" """
Flush RBD locks of (stopped) VM Flush RBD locks of (stopped) VM
@ -248,6 +253,7 @@ def vm_locks(config, vm):
return retstatus, response.json().get('message', '') return retstatus, response.json().get('message', '')
def view_console_log(config, vm, lines=100): def view_console_log(config, vm, lines=100):
""" """
Return console log lines from the API (and display them in a pager in the main CLI) Return console log lines from the API (and display them in a pager in the main CLI)
@ -272,6 +278,7 @@ def view_console_log(config, vm, lines=100):
return True, loglines return True, loglines
def follow_console_log(config, vm, lines=10): def follow_console_log(config, vm, lines=10):
""" """
Return and follow console log lines from the API Return and follow console log lines from the API
@ -301,7 +308,7 @@ def follow_console_log(config, vm, lines=10):
try: try:
response = call_api(config, 'get', '/vm/{vm}/console'.format(vm=vm), params=params) response = call_api(config, 'get', '/vm/{vm}/console'.format(vm=vm), params=params)
new_console_log = response.json()['data'] new_console_log = response.json()['data']
except: except Exception:
break break
# Split the new and old log strings into constitutent lines # Split the new and old log strings into constitutent lines
old_console_loglines = console_log.split('\n') old_console_loglines = console_log.split('\n')
@ -327,6 +334,7 @@ def follow_console_log(config, vm, lines=10):
return True, '' return True, ''
# #
# Output display functions # Output display functions
# #
@ -344,7 +352,7 @@ def format_info(config, domain_information, long_output):
ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['vcpu'])) ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['vcpu']))
ainformation.append('{}Topology (S/C/T):{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['vcpu_topology'])) ainformation.append('{}Topology (S/C/T):{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['vcpu_topology']))
if long_output == True: if long_output is True:
# Virtualization information # Virtualization information
ainformation.append('') ainformation.append('')
ainformation.append('{}Emulator:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['emulator'])) ainformation.append('{}Emulator:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['emulator']))
@ -358,10 +366,10 @@ def format_info(config, domain_information, long_output):
format_metric(domain_information['memory_stats'].get('swap_in')), format_metric(domain_information['memory_stats'].get('swap_in')),
format_metric(domain_information['memory_stats'].get('swap_out')), format_metric(domain_information['memory_stats'].get('swap_out')),
'/'.join([format_metric(domain_information['memory_stats'].get('major_fault')), format_metric(domain_information['memory_stats'].get('minor_fault'))]), '/'.join([format_metric(domain_information['memory_stats'].get('major_fault')), format_metric(domain_information['memory_stats'].get('minor_fault'))]),
format_bytes(domain_information['memory_stats'].get('available')*1024), format_bytes(domain_information['memory_stats'].get('available') * 1024),
format_bytes(domain_information['memory_stats'].get('usable')*1024), format_bytes(domain_information['memory_stats'].get('usable') * 1024),
format_bytes(domain_information['memory_stats'].get('unused')*1024), format_bytes(domain_information['memory_stats'].get('unused') * 1024),
format_bytes(domain_information['memory_stats'].get('rss')*1024) format_bytes(domain_information['memory_stats'].get('rss') * 1024)
)) ))
ainformation.append('') ainformation.append('')
ainformation.append('{0}vCPU stats:{1} {2}CPU time (ns) User time (ns) System time (ns){3}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end())) ainformation.append('{0}vCPU stats:{1} {2}CPU time (ns) User time (ns) System time (ns){3}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end()))
@ -439,7 +447,7 @@ def format_info(config, domain_information, long_output):
ainformation.append('') ainformation.append('')
ainformation.append('{}Networks:{} {}'.format(ansiprint.purple(), ansiprint.end(), ', '.join(net_list))) ainformation.append('{}Networks:{} {}'.format(ansiprint.purple(), ansiprint.end(), ', '.join(net_list)))
if long_output == True: if long_output is True:
# Disk list # Disk list
ainformation.append('') ainformation.append('')
name_length = 0 name_length = 0
@ -482,10 +490,11 @@ def format_info(config, domain_information, long_output):
ainformation.append('') ainformation.append('')
return '\n'.join(ainformation) return '\n'.join(ainformation)
def format_list(config, vm_list, raw): def format_list(config, vm_list, raw):
# Handle single-element lists # Handle single-element lists
if not isinstance(vm_list, list): if not isinstance(vm_list, list):
vm_list = [ vm_list ] vm_list = [vm_list]
# Function to strip the "br" off of nets and return a nicer list # Function to strip the "br" off of nets and return a nicer list
def getNiceNetID(domain_information): def getNiceNetID(domain_information):
@ -573,7 +582,7 @@ def format_list(config, vm_list, raw):
vm_migrated='Migrated' vm_migrated='Migrated'
) )
) )
# Keep track of nets we found to be valid to cut down on duplicate API hits # Keep track of nets we found to be valid to cut down on duplicate API hits
valid_net_list = [] valid_net_list = []
# Format the string (elements) # Format the string (elements)
@ -596,7 +605,7 @@ def format_list(config, vm_list, raw):
net_list = [] net_list = []
vm_net_colour = '' vm_net_colour = ''
for net_vni in raw_net_list: for net_vni in raw_net_list:
if not net_vni in valid_net_list: if net_vni not in valid_net_list:
response = call_api(config, 'get', '/network/{net}'.format(net=net_vni)) response = call_api(config, 'get', '/network/{net}'.format(net=net_vni))
if response.status_code != 200 and net_vni not in ['cluster', 'storage', 'upstream']: if response.status_code != 200 and net_vni not in ['cluster', 'storage', 'upstream']:
vm_net_colour = ansiprint.red() vm_net_colour = ansiprint.red()

View File

@ -20,10 +20,8 @@
# #
############################################################################### ###############################################################################
import kazoo.client
import uuid import uuid
import daemon_lib.ansiprint as ansiprint
# Exists function # Exists function
def exists(zk_conn, key): def exists(zk_conn, key):
@ -33,22 +31,25 @@ def exists(zk_conn, key):
else: else:
return False return False
# Child list function # Child list function
def listchildren(zk_conn, key): def listchildren(zk_conn, key):
children = zk_conn.get_children(key) children = zk_conn.get_children(key)
return children return children
# Delete key function # Delete key function
def deletekey(zk_conn, key, recursive=True): def deletekey(zk_conn, key, recursive=True):
zk_conn.delete(key, recursive=recursive) zk_conn.delete(key, recursive=recursive)
# Data read function # Data read function
def readdata(zk_conn, key): def readdata(zk_conn, key):
data_raw = zk_conn.get(key) data_raw = zk_conn.get(key)
data = data_raw[0].decode('utf8') data = data_raw[0].decode('utf8')
meta = data_raw[1]
return data return data
# Data write function # Data write function
def writedata(zk_conn, kv): def writedata(zk_conn, kv):
# Start up a transaction # Start up a transaction
@ -87,12 +88,14 @@ def writedata(zk_conn, kv):
except Exception: except Exception:
return False return False
# Write lock function # Write lock function
def writelock(zk_conn, key): def writelock(zk_conn, key):
lock_id = str(uuid.uuid1()) lock_id = str(uuid.uuid1())
lock = zk_conn.WriteLock('{}'.format(key), lock_id) lock = zk_conn.WriteLock('{}'.format(key), lock_id)
return lock return lock
# Read lock function # Read lock function
def readlock(zk_conn, key): def readlock(zk_conn, key):
lock_id = str(uuid.uuid1()) lock_id = str(uuid.uuid1())

File diff suppressed because it is too large Load Diff

View File

@ -1,82 +0,0 @@
#!/usr/bin/env python3
# ansiprint.py - Printing function for formatted messages
# Part of the Parallel Virtual Cluster (PVC) system
#
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###############################################################################
import datetime
# ANSII colours for output
def red():
return '\033[91m'
def blue():
return '\033[94m'
def cyan():
return '\033[96m'
def green():
return '\033[92m'
def yellow():
return '\033[93m'
def purple():
return '\033[95m'
def bold():
return '\033[1m'
def end():
return '\033[0m'
# Print function
def echo(message, prefix, state):
# Get the date
date = '{} - '.format(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S.%f'))
endc = end()
# Continuation
if state == 'c':
date = ''
colour = ''
prompt = ' '
# OK
elif state == 'o':
colour = green()
prompt = '>>> '
# Error
elif state == 'e':
colour = red()
prompt = '>>> '
# Warning
elif state == 'w':
colour = yellow()
prompt = '>>> '
# Tick
elif state == 't':
colour = purple()
prompt = '>>> '
# Information
elif state == 'i':
colour = blue()
prompt = '>>> '
else:
colour = bold()
prompt = '>>> '
# Append space to prefix
if prefix != '':
prefix = prefix + ' '
print(colour + prompt + endc + date + prefix + message)

File diff suppressed because it is too large Load Diff

View File

@ -20,12 +20,8 @@
# #
############################################################################### ###############################################################################
import json
import re import re
from distutils.util import strtobool
import daemon_lib.ansiprint as ansiprint
import daemon_lib.zkhandler as zkhandler import daemon_lib.zkhandler as zkhandler
import daemon_lib.common as common import daemon_lib.common as common
import daemon_lib.vm as pvc_vm import daemon_lib.vm as pvc_vm
@ -33,6 +29,7 @@ import daemon_lib.node as pvc_node
import daemon_lib.network as pvc_network import daemon_lib.network as pvc_network
import daemon_lib.ceph as pvc_ceph import daemon_lib.ceph as pvc_ceph
def set_maintenance(zk_conn, maint_state): def set_maintenance(zk_conn, maint_state):
try: try:
if maint_state == 'true': if maint_state == 'true':
@ -41,14 +38,15 @@ def set_maintenance(zk_conn, maint_state):
else: else:
zkhandler.writedata(zk_conn, {'/maintenance': 'false'}) zkhandler.writedata(zk_conn, {'/maintenance': 'false'})
return True, 'Successfully set cluster in normal mode' return True, 'Successfully set cluster in normal mode'
except: except Exception:
return False, 'Failed to set cluster maintenance state' return False, 'Failed to set cluster maintenance state'
def getClusterInformation(zk_conn): def getClusterInformation(zk_conn):
# Get cluster maintenance state # Get cluster maintenance state
try: try:
maint_state = zkhandler.readdata(zk_conn, '/maintenance') maint_state = zkhandler.readdata(zk_conn, '/maintenance')
except: except Exception:
maint_state = 'false' maint_state = 'false'
# List of messages to display to the clients # List of messages to display to the clients
@ -120,7 +118,7 @@ def getClusterInformation(zk_conn):
cluster_health_msg.append("Node '{}' in {},{} state".format(node['name'], daemon_state, domain_state)) cluster_health_msg.append("Node '{}' in {},{} state".format(node['name'], daemon_state, domain_state))
else: else:
node_healthy_status[index] = True node_healthy_status[index] = True
node_report_status[index] = daemon_state + ',' + domain_state node_report_status[index] = daemon_state + ',' + domain_state
# Determinations for VM health # Determinations for VM health
vm_healthy_status = list(range(0, vm_count)) vm_healthy_status = list(range(0, vm_count))
@ -148,8 +146,8 @@ def getClusterInformation(zk_conn):
except KeyError: except KeyError:
ceph_osd_in = 0 ceph_osd_in = 0
up_texts = { 1: 'up', 0: 'down' } up_texts = {1: 'up', 0: 'down'}
in_texts = { 1: 'in', 0: 'out' } in_texts = {1: 'in', 0: 'out'}
if not ceph_osd_up or not ceph_osd_in: if not ceph_osd_up or not ceph_osd_in:
ceph_osd_healthy_status[index] = False ceph_osd_healthy_status[index] = False
@ -250,6 +248,7 @@ def getClusterInformation(zk_conn):
return cluster_information return cluster_information
def get_info(zk_conn): def get_info(zk_conn):
# This is a thin wrapper function for naming purposes # This is a thin wrapper function for naming purposes
cluster_information = getClusterInformation(zk_conn) cluster_information = getClusterInformation(zk_conn)

View File

@ -20,9 +20,9 @@
# #
############################################################################### ###############################################################################
import time
import uuid import uuid
import lxml import lxml
import math
import shlex import shlex
import subprocess import subprocess
import kazoo.client import kazoo.client
@ -36,6 +36,7 @@ import daemon_lib.zkhandler as zkhandler
# Supplemental functions # Supplemental functions
############################################################################### ###############################################################################
# #
# Run a local OS command via shell # Run a local OS command via shell
# #
@ -56,14 +57,15 @@ def run_os_command(command_string, background=False, environment=None, timeout=N
try: try:
stdout = command_output.stdout.decode('ascii') stdout = command_output.stdout.decode('ascii')
except: except Exception:
stdout = '' stdout = ''
try: try:
stderr = command_output.stderr.decode('ascii') stderr = command_output.stderr.decode('ascii')
except: except Exception:
stderr = '' stderr = ''
return retcode, stdout, stderr return retcode, stdout, stderr
# #
# Validate a UUID # Validate a UUID
# #
@ -71,9 +73,10 @@ def validateUUID(dom_uuid):
try: try:
uuid.UUID(dom_uuid) uuid.UUID(dom_uuid)
return True return True
except: except Exception:
return False return False
# #
# Connect and disconnect from Zookeeper # Connect and disconnect from Zookeeper
# #
@ -89,23 +92,27 @@ def startZKConnection(zk_host):
exit(1) exit(1)
return zk_conn return zk_conn
def stopZKConnection(zk_conn): def stopZKConnection(zk_conn):
zk_conn.stop() zk_conn.stop()
zk_conn.close() zk_conn.close()
return 0 return 0
# #
# Parse a Domain XML object # Parse a Domain XML object
# #
def getDomainXML(zk_conn, dom_uuid): def getDomainXML(zk_conn, dom_uuid):
try: try:
xml = zkhandler.readdata(zk_conn, '/domains/{}/xml'.format(dom_uuid)) xml = zkhandler.readdata(zk_conn, '/domains/{}/xml'.format(dom_uuid))
except: except Exception:
return None return None
# Parse XML using lxml.objectify # Parse XML using lxml.objectify
parsed_xml = lxml.objectify.fromstring(xml) parsed_xml = lxml.objectify.fromstring(xml)
return parsed_xml return parsed_xml
# #
# Get the main details for a VM object from XML # Get the main details for a VM object from XML
# #
@ -126,11 +133,12 @@ def getDomainMainDetails(parsed_xml):
dvcpu = str(parsed_xml.vcpu) dvcpu = str(parsed_xml.vcpu)
try: try:
dvcputopo = '{}/{}/{}'.format(parsed_xml.cpu.topology.attrib.get('sockets'), parsed_xml.cpu.topology.attrib.get('cores'), parsed_xml.cpu.topology.attrib.get('threads')) dvcputopo = '{}/{}/{}'.format(parsed_xml.cpu.topology.attrib.get('sockets'), parsed_xml.cpu.topology.attrib.get('cores'), parsed_xml.cpu.topology.attrib.get('threads'))
except: except Exception:
dvcputopo = 'N/A' dvcputopo = 'N/A'
return duuid, dname, ddescription, dmemory, dvcpu, dvcputopo return duuid, dname, ddescription, dmemory, dvcpu, dvcputopo
# #
# Get long-format details # Get long-format details
# #
@ -143,6 +151,7 @@ def getDomainExtraDetails(parsed_xml):
return dtype, darch, dmachine, dconsole, demulator return dtype, darch, dmachine, dconsole, demulator
# #
# Get CPU features # Get CPU features
# #
@ -151,11 +160,12 @@ def getDomainCPUFeatures(parsed_xml):
try: try:
for feature in parsed_xml.features.getchildren(): for feature in parsed_xml.features.getchildren():
dfeatures.append(feature.tag) dfeatures.append(feature.tag)
except: except Exception:
pass pass
return dfeatures return dfeatures
# #
# Get disk devices # Get disk devices
# #
@ -169,7 +179,7 @@ def getDomainDisks(parsed_xml, stats_data):
disk_stats_list = [x for x in stats_data.get('disk_stats', []) if x.get('name') == disk_attrib.get('name')] disk_stats_list = [x for x in stats_data.get('disk_stats', []) if x.get('name') == disk_attrib.get('name')]
try: try:
disk_stats = disk_stats_list[0] disk_stats = disk_stats_list[0]
except: except Exception:
disk_stats = {} disk_stats = {}
if disk_type == 'network': if disk_type == 'network':
@ -200,6 +210,7 @@ def getDomainDisks(parsed_xml, stats_data):
return ddisks return ddisks
# #
# Get a list of disk devices # Get a list of disk devices
# #
@ -208,9 +219,10 @@ def getDomainDiskList(zk_conn, dom_uuid):
disk_list = [] disk_list = []
for disk in domain_information['disks']: for disk in domain_information['disks']:
disk_list.append(disk['name']) disk_list.append(disk['name'])
return disk_list return disk_list
# #
# Get domain information from XML # Get domain information from XML
# #
@ -226,19 +238,19 @@ def getInformationFromXML(zk_conn, uuid):
try: try:
domain_node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(uuid)) domain_node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(uuid))
except: except Exception:
domain_node_limit = None domain_node_limit = None
try: try:
domain_node_selector = zkhandler.readdata(zk_conn, '/domains/{}/node_selector'.format(uuid)) domain_node_selector = zkhandler.readdata(zk_conn, '/domains/{}/node_selector'.format(uuid))
except: except Exception:
domain_node_selector = None domain_node_selector = None
try: try:
domain_node_autostart = zkhandler.readdata(zk_conn, '/domains/{}/node_autostart'.format(uuid)) domain_node_autostart = zkhandler.readdata(zk_conn, '/domains/{}/node_autostart'.format(uuid))
except: except Exception:
domain_node_autostart = None domain_node_autostart = None
try: try:
domain_migration_method = zkhandler.readdata(zk_conn, '/domains/{}/migration_method'.format(uuid)) domain_migration_method = zkhandler.readdata(zk_conn, '/domains/{}/migration_method'.format(uuid))
except: except Exception:
domain_migration_method = None domain_migration_method = None
if not domain_node_limit: if not domain_node_limit:
@ -251,14 +263,14 @@ def getInformationFromXML(zk_conn, uuid):
try: try:
domain_profile = zkhandler.readdata(zk_conn, '/domains/{}/profile'.format(uuid)) domain_profile = zkhandler.readdata(zk_conn, '/domains/{}/profile'.format(uuid))
except: except Exception:
domain_profile = None domain_profile = None
parsed_xml = getDomainXML(zk_conn, uuid) parsed_xml = getDomainXML(zk_conn, uuid)
try: try:
stats_data = loads(zkhandler.readdata(zk_conn, '/domains/{}/stats'.format(uuid))) stats_data = loads(zkhandler.readdata(zk_conn, '/domains/{}/stats'.format(uuid)))
except: except Exception:
stats_data = {} stats_data = {}
domain_uuid, domain_name, domain_description, domain_memory, domain_vcpu, domain_vcputopo = getDomainMainDetails(parsed_xml) domain_uuid, domain_name, domain_description, domain_memory, domain_vcpu, domain_vcputopo = getDomainMainDetails(parsed_xml)
@ -269,7 +281,7 @@ def getInformationFromXML(zk_conn, uuid):
domain_features = getDomainCPUFeatures(parsed_xml) domain_features = getDomainCPUFeatures(parsed_xml)
domain_disks = getDomainDisks(parsed_xml, stats_data) domain_disks = getDomainDisks(parsed_xml, stats_data)
domain_controllers = getDomainControllers(parsed_xml) domain_controllers = getDomainControllers(parsed_xml)
if domain_lastnode: if domain_lastnode:
domain_migrated = 'from {}'.format(domain_lastnode) domain_migrated = 'from {}'.format(domain_lastnode)
else: else:
@ -308,6 +320,7 @@ def getInformationFromXML(zk_conn, uuid):
return domain_information return domain_information
# #
# Get network devices # Get network devices
# #
@ -317,24 +330,24 @@ def getDomainNetworks(parsed_xml, stats_data):
if device.tag == 'interface': if device.tag == 'interface':
try: try:
net_type = device.attrib.get('type') net_type = device.attrib.get('type')
except: except Exception:
net_type = None net_type = None
try: try:
net_mac = device.mac.attrib.get('address') net_mac = device.mac.attrib.get('address')
except: except Exception:
net_mac = None net_mac = None
try: try:
net_bridge = device.source.attrib.get(net_type) net_bridge = device.source.attrib.get(net_type)
except: except Exception:
net_bridge = None net_bridge = None
try: try:
net_model = device.model.attrib.get('type') net_model = device.model.attrib.get('type')
except: except Exception:
net_model = None net_model = None
try: try:
net_stats_list = [x for x in stats_data.get('net_stats', []) if x.get('bridge') == net_bridge] net_stats_list = [x for x in stats_data.get('net_stats', []) if x.get('bridge') == net_bridge]
net_stats = net_stats_list[0] net_stats = net_stats_list[0]
except: except Exception:
net_stats = {} net_stats = {}
net_rd_bytes = net_stats.get('rd_bytes', 0) net_rd_bytes = net_stats.get('rd_bytes', 0)
net_rd_packets = net_stats.get('rd_packets', 0) net_rd_packets = net_stats.get('rd_packets', 0)
@ -362,6 +375,7 @@ def getDomainNetworks(parsed_xml, stats_data):
return dnets return dnets
# #
# Get controller devices # Get controller devices
# #
@ -374,11 +388,12 @@ def getDomainControllers(parsed_xml):
controller_model = device.attrib.get('model') controller_model = device.attrib.get('model')
except KeyError: except KeyError:
controller_model = 'none' controller_model = 'none'
controller_obj = { 'type': controller_type, 'model': controller_model } controller_obj = {'type': controller_type, 'model': controller_model}
dcontrollers.append(controller_obj) dcontrollers.append(controller_obj)
return dcontrollers return dcontrollers
# #
# Verify node is valid in cluster # Verify node is valid in cluster
# #
@ -388,6 +403,7 @@ def verifyNode(zk_conn, node):
else: else:
return False return False
# #
# Get the primary coordinator node # Get the primary coordinator node
# #
@ -396,7 +412,7 @@ def getPrimaryNode(zk_conn):
while True: while True:
try: try:
primary_node = zkhandler.readdata(zk_conn, '/primary_node') primary_node = zkhandler.readdata(zk_conn, '/primary_node')
except: except Exception:
primary_node == 'none' primary_node == 'none'
if primary_node == 'none': if primary_node == 'none':
@ -412,6 +428,7 @@ def getPrimaryNode(zk_conn):
return primary_node return primary_node
# #
# Find a migration target # Find a migration target
# #
@ -421,13 +438,13 @@ def findTargetNode(zk_conn, dom_uuid):
node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(dom_uuid)).split(',') node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(dom_uuid)).split(',')
if not any(node_limit): if not any(node_limit):
node_limit = None node_limit = None
except: except Exception:
node_limit = None node_limit = None
# Determine VM search field or use default; set config value if read fails # Determine VM search field or use default; set config value if read fails
try: try:
search_field = zkhandler.readdata(zk_conn, '/domains/{}/node_selector'.format(dom_uuid)) search_field = zkhandler.readdata(zk_conn, '/domains/{}/node_selector'.format(dom_uuid))
except: except Exception:
search_field = 'mem' search_field = 'mem'
# Execute the search # Execute the search
@ -443,6 +460,7 @@ def findTargetNode(zk_conn, dom_uuid):
# Nothing was found # Nothing was found
return None return None
# Get the list of valid target nodes # Get the list of valid target nodes
def getNodes(zk_conn, node_limit, dom_uuid): def getNodes(zk_conn, node_limit, dom_uuid):
valid_node_list = [] valid_node_list = []
@ -469,6 +487,7 @@ def getNodes(zk_conn, node_limit, dom_uuid):
return valid_node_list return valid_node_list
# via free memory (relative to allocated memory) # via free memory (relative to allocated memory)
def findTargetNodeMem(zk_conn, node_limit, dom_uuid): def findTargetNodeMem(zk_conn, node_limit, dom_uuid):
most_provfree = 0 most_provfree = 0
@ -488,6 +507,7 @@ def findTargetNodeMem(zk_conn, node_limit, dom_uuid):
return target_node return target_node
# via load average # via load average
def findTargetNodeLoad(zk_conn, node_limit, dom_uuid): def findTargetNodeLoad(zk_conn, node_limit, dom_uuid):
least_load = 9999.0 least_load = 9999.0
@ -503,6 +523,7 @@ def findTargetNodeLoad(zk_conn, node_limit, dom_uuid):
return target_node return target_node
# via total vCPUs # via total vCPUs
def findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid): def findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid):
least_vcpus = 9999 least_vcpus = 9999
@ -518,6 +539,7 @@ def findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid):
return target_node return target_node
# via total VMs # via total VMs
def findTargetNodeVMs(zk_conn, node_limit, dom_uuid): def findTargetNodeVMs(zk_conn, node_limit, dom_uuid):
least_vms = 9999 least_vms = 9999
@ -533,6 +555,7 @@ def findTargetNodeVMs(zk_conn, node_limit, dom_uuid):
return target_node return target_node
# Connect to the primary host and run a command # Connect to the primary host and run a command
def runRemoteCommand(node, command, become=False): def runRemoteCommand(node, command, become=False):
import paramiko import paramiko
@ -560,7 +583,6 @@ def runRemoteCommand(node, command, become=False):
ssh_client = paramiko.client.SSHClient() ssh_client = paramiko.client.SSHClient()
ssh_client.load_system_host_keys() ssh_client.load_system_host_keys()
ssh_client.set_missing_host_key_policy(DnssecPolicy()) ssh_client.set_missing_host_key_policy(DnssecPolicy())
#ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(node) ssh_client.connect(node)
stdin, stdout, stderr = ssh_client.exec_command(command) stdin, stdout, stderr = ssh_client.exec_command(command)
return stdout.read().decode('ascii').rstrip(), stderr.read().decode('ascii').rstrip() return stdout.read().decode('ascii').rstrip(), stderr.read().decode('ascii').rstrip()

View File

@ -20,23 +20,12 @@
# #
############################################################################### ###############################################################################
import os
import socket
import time
import uuid
import re import re
import tempfile
import subprocess
import difflib
import colorama
import click
import lxml.objectify
import configparser
import kazoo.client
import daemon_lib.ansiprint as ansiprint from kazoo.exceptions import NoNodeError
import daemon_lib.zkhandler as zkhandler import daemon_lib.zkhandler as zkhandler
import daemon_lib.common as common
# #
# Cluster search functions # Cluster search functions
@ -50,6 +39,7 @@ def getClusterNetworkList(zk_conn):
description_list.append(zkhandler.readdata(zk_conn, '/networks/{}'.format(vni))) description_list.append(zkhandler.readdata(zk_conn, '/networks/{}'.format(vni)))
return vni_list, description_list return vni_list, description_list
def searchClusterByVNI(zk_conn, vni): def searchClusterByVNI(zk_conn, vni):
try: try:
# Get the lists # Get the lists
@ -64,6 +54,7 @@ def searchClusterByVNI(zk_conn, vni):
return description return description
def searchClusterByDescription(zk_conn, description): def searchClusterByDescription(zk_conn, description):
try: try:
# Get the lists # Get the lists
@ -78,6 +69,7 @@ def searchClusterByDescription(zk_conn, description):
return vni return vni
def getNetworkVNI(zk_conn, network): def getNetworkVNI(zk_conn, network):
# Validate and obtain alternate passed value # Validate and obtain alternate passed value
if network.isdigit(): if network.isdigit():
@ -89,6 +81,7 @@ def getNetworkVNI(zk_conn, network):
return net_vni return net_vni
def getNetworkDescription(zk_conn, network): def getNetworkDescription(zk_conn, network):
# Validate and obtain alternate passed value # Validate and obtain alternate passed value
if network.isdigit(): if network.isdigit():
@ -100,16 +93,19 @@ def getNetworkDescription(zk_conn, network):
return net_description return net_description
def getNetworkDHCPLeases(zk_conn, vni): def getNetworkDHCPLeases(zk_conn, vni):
# Get a list of DHCP leases by listing the children of /networks/<vni>/dhcp4_leases # Get a list of DHCP leases by listing the children of /networks/<vni>/dhcp4_leases
dhcp4_leases = zkhandler.listchildren(zk_conn, '/networks/{}/dhcp4_leases'.format(vni)) dhcp4_leases = zkhandler.listchildren(zk_conn, '/networks/{}/dhcp4_leases'.format(vni))
return sorted(dhcp4_leases) return sorted(dhcp4_leases)
def getNetworkDHCPReservations(zk_conn, vni): def getNetworkDHCPReservations(zk_conn, vni):
# Get a list of DHCP reservations by listing the children of /networks/<vni>/dhcp4_reservations # Get a list of DHCP reservations by listing the children of /networks/<vni>/dhcp4_reservations
dhcp4_reservations = zkhandler.listchildren(zk_conn, '/networks/{}/dhcp4_reservations'.format(vni)) dhcp4_reservations = zkhandler.listchildren(zk_conn, '/networks/{}/dhcp4_reservations'.format(vni))
return sorted(dhcp4_reservations) return sorted(dhcp4_reservations)
def getNetworkACLs(zk_conn, vni, _direction): def getNetworkACLs(zk_conn, vni, _direction):
# Get the (sorted) list of active ACLs # Get the (sorted) list of active ACLs
if _direction == 'both': if _direction == 'both':
@ -131,6 +127,7 @@ def getNetworkACLs(zk_conn, vni, _direction):
return full_acl_list return full_acl_list
def getNetworkInformation(zk_conn, vni): def getNetworkInformation(zk_conn, vni):
description = zkhandler.readdata(zk_conn, '/networks/{}'.format(vni)) description = zkhandler.readdata(zk_conn, '/networks/{}'.format(vni))
nettype = zkhandler.readdata(zk_conn, '/networks/{}/nettype'.format(vni)) nettype = zkhandler.readdata(zk_conn, '/networks/{}/nettype'.format(vni))
@ -156,26 +153,27 @@ def getNetworkInformation(zk_conn, vni):
'network': ip6_network, 'network': ip6_network,
'gateway': ip6_gateway, 'gateway': ip6_gateway,
'dhcp_flag': dhcp6_flag, 'dhcp_flag': dhcp6_flag,
}, },
'ip4': { 'ip4': {
'network': ip4_network, 'network': ip4_network,
'gateway': ip4_gateway, 'gateway': ip4_gateway,
'dhcp_flag': dhcp4_flag, 'dhcp_flag': dhcp4_flag,
'dhcp_start': dhcp4_start, 'dhcp_start': dhcp4_start,
'dhcp_end': dhcp4_end 'dhcp_end': dhcp4_end
} }
} }
return network_information return network_information
def getDHCPLeaseInformation(zk_conn, vni, mac_address): def getDHCPLeaseInformation(zk_conn, vni, mac_address):
# Check whether this is a dynamic or static lease # Check whether this is a dynamic or static lease
try: try:
zkhandler.readdata(zk_conn, '/networks/{}/dhcp4_leases/{}'.format(vni, mac_address)) zkhandler.readdata(zk_conn, '/networks/{}/dhcp4_leases/{}'.format(vni, mac_address))
type_key = 'dhcp4_leases' type_key = 'dhcp4_leases'
except kazoo.exceptions.NoNodeError: except NoNodeError:
zkhandler.readdata(zk_conn, '/networks/{}/dhcp4_reservations/{}'.format(vni, mac_address)) zkhandler.readdata(zk_conn, '/networks/{}/dhcp4_reservations/{}'.format(vni, mac_address))
type_key = 'dhcp4_reservations' type_key = 'dhcp4_reservations'
hostname = zkhandler.readdata(zk_conn, '/networks/{}/{}/{}/hostname'.format(vni, type_key, mac_address)) hostname = zkhandler.readdata(zk_conn, '/networks/{}/{}/{}/hostname'.format(vni, type_key, mac_address))
ip4_address = zkhandler.readdata(zk_conn, '/networks/{}/{}/{}/ipaddr'.format(vni, type_key, mac_address)) ip4_address = zkhandler.readdata(zk_conn, '/networks/{}/{}/{}/ipaddr'.format(vni, type_key, mac_address))
if type_key == 'dhcp4_leases': if type_key == 'dhcp4_leases':
@ -192,6 +190,7 @@ def getDHCPLeaseInformation(zk_conn, vni, mac_address):
} }
return lease_information return lease_information
def getACLInformation(zk_conn, vni, direction, description): def getACLInformation(zk_conn, vni, direction, description):
order = zkhandler.readdata(zk_conn, '/networks/{}/firewall_rules/{}/{}/order'.format(vni, direction, description)) order = zkhandler.readdata(zk_conn, '/networks/{}/firewall_rules/{}/{}/order'.format(vni, direction, description))
rule = zkhandler.readdata(zk_conn, '/networks/{}/firewall_rules/{}/{}/rule'.format(vni, direction, description)) rule = zkhandler.readdata(zk_conn, '/networks/{}/firewall_rules/{}/{}/rule'.format(vni, direction, description))
@ -205,32 +204,35 @@ def getACLInformation(zk_conn, vni, direction, description):
} }
return acl_information return acl_information
def isValidMAC(macaddr): def isValidMAC(macaddr):
allowed = re.compile(r""" allowed = re.compile(r"""
( (
^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$ ^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$
) )
""", """,
re.VERBOSE|re.IGNORECASE) re.VERBOSE | re.IGNORECASE)
if allowed.match(macaddr): if allowed.match(macaddr):
return True return True
else: else:
return False return False
def isValidIP(ipaddr): def isValidIP(ipaddr):
ip4_blocks = str(ipaddr).split(".") ip4_blocks = str(ipaddr).split(".")
if len(ip4_blocks) == 4: if len(ip4_blocks) == 4:
for block in ip4_blocks: for block in ip4_blocks:
# Check if number is digit, if not checked before calling this function # Check if number is digit, if not checked before calling this function
if not block.isdigit(): if not block.isdigit():
return False return False
tmp = int(block) tmp = int(block)
if 0 > tmp > 255: if 0 > tmp > 255:
return False return False
return True return True
return False return False
# #
# Direct functions # Direct functions
# #
@ -238,7 +240,7 @@ def add_network(zk_conn, vni, description, nettype,
domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway,
dhcp4_flag, dhcp4_start, dhcp4_end): dhcp4_flag, dhcp4_start, dhcp4_end):
# Ensure start and end DHCP ranges are set if the flag is set # Ensure start and end DHCP ranges are set if the flag is set
if dhcp4_flag and ( not dhcp4_start or not dhcp4_end ): if dhcp4_flag and (not dhcp4_start or not dhcp4_end):
return False, 'ERROR: DHCPv4 start and end addresses are required for a DHCPv4-enabled network.' return False, 'ERROR: DHCPv4 start and end addresses are required for a DHCPv4-enabled network.'
# Check if a network with this VNI or description already exists # Check if a network with this VNI or description already exists
@ -284,6 +286,7 @@ def add_network(zk_conn, vni, description, nettype,
return True, 'Network "{}" added successfully!'.format(description) return True, 'Network "{}" added successfully!'.format(description)
def modify_network(zk_conn, vni, description=None, domain=None, name_servers=None, def modify_network(zk_conn, vni, description=None, domain=None, name_servers=None,
ip4_network=None, ip4_gateway=None, ip6_network=None, ip6_gateway=None, ip4_network=None, ip4_gateway=None, ip6_network=None, ip6_gateway=None,
dhcp4_flag=None, dhcp4_start=None, dhcp4_end=None): dhcp4_flag=None, dhcp4_start=None, dhcp4_end=None):
@ -325,6 +328,7 @@ def modify_network(zk_conn, vni, description=None, domain=None, name_servers=Non
return True, 'Network "{}" modified successfully!'.format(vni) return True, 'Network "{}" modified successfully!'.format(vni)
def remove_network(zk_conn, network): def remove_network(zk_conn, network):
# Validate and obtain alternate passed value # Validate and obtain alternate passed value
vni = getNetworkVNI(zk_conn, network) vni = getNetworkVNI(zk_conn, network)
@ -368,6 +372,7 @@ def add_dhcp_reservation(zk_conn, network, ipaddress, macaddress, hostname):
return True, 'DHCP reservation "{}" added successfully!'.format(macaddress) return True, 'DHCP reservation "{}" added successfully!'.format(macaddress)
def remove_dhcp_reservation(zk_conn, network, reservation): def remove_dhcp_reservation(zk_conn, network, reservation):
# Validate and obtain standard passed value # Validate and obtain standard passed value
net_vni = getNetworkVNI(zk_conn, network) net_vni = getNetworkVNI(zk_conn, network)
@ -402,11 +407,12 @@ def remove_dhcp_reservation(zk_conn, network, reservation):
# Remove the entry from zookeeper # Remove the entry from zookeeper
try: try:
zkhandler.deletekey(zk_conn, '/networks/{}/dhcp4_{}/{}'.format(net_vni, lease_type_zk, match_description)) zkhandler.deletekey(zk_conn, '/networks/{}/dhcp4_{}/{}'.format(net_vni, lease_type_zk, match_description))
except: except Exception:
return False, 'ERROR: Failed to write to Zookeeper!' return False, 'ERROR: Failed to write to Zookeeper!'
return True, 'DHCP {} "{}" removed successfully!'.format(lease_type_human, match_description) return True, 'DHCP {} "{}" removed successfully!'.format(lease_type_human, match_description)
def add_acl(zk_conn, network, direction, description, rule, order): def add_acl(zk_conn, network, direction, description, rule, order):
# Validate and obtain standard passed value # Validate and obtain standard passed value
net_vni = getNetworkVNI(zk_conn, network) net_vni = getNetworkVNI(zk_conn, network)
@ -470,6 +476,7 @@ def add_acl(zk_conn, network, direction, description, rule, order):
return True, 'Firewall rule "{}" added successfully!'.format(description) return True, 'Firewall rule "{}" added successfully!'.format(description)
def remove_acl(zk_conn, network, description): def remove_acl(zk_conn, network, description):
# Validate and obtain standard passed value # Validate and obtain standard passed value
net_vni = getNetworkVNI(zk_conn, network) net_vni = getNetworkVNI(zk_conn, network)
@ -510,6 +517,7 @@ def remove_acl(zk_conn, network, description):
return True, 'Firewall rule "{}" removed successfully!'.format(match_description) return True, 'Firewall rule "{}" removed successfully!'.format(match_description)
def get_info(zk_conn, network): def get_info(zk_conn, network):
# Validate and obtain alternate passed value # Validate and obtain alternate passed value
net_vni = getNetworkVNI(zk_conn, network) net_vni = getNetworkVNI(zk_conn, network)
@ -522,6 +530,7 @@ def get_info(zk_conn, network):
return True, network_information return True, network_information
def get_list(zk_conn, limit, is_fuzzy=True): def get_list(zk_conn, limit, is_fuzzy=True):
net_list = [] net_list = []
full_net_list = zkhandler.listchildren(zk_conn, '/networks') full_net_list = zkhandler.listchildren(zk_conn, '/networks')
@ -542,9 +551,9 @@ def get_list(zk_conn, limit, is_fuzzy=True):
else: else:
net_list.append(getNetworkInformation(zk_conn, net)) net_list.append(getNetworkInformation(zk_conn, net))
#output_string = formatNetworkList(zk_conn, net_list)
return True, net_list return True, net_list
def get_list_dhcp(zk_conn, network, limit, only_static=False, is_fuzzy=True): def get_list_dhcp(zk_conn, network, limit, only_static=False, is_fuzzy=True):
# Validate and obtain alternate passed value # Validate and obtain alternate passed value
net_vni = getNetworkVNI(zk_conn, network) net_vni = getNetworkVNI(zk_conn, network)
@ -555,11 +564,9 @@ def get_list_dhcp(zk_conn, network, limit, only_static=False, is_fuzzy=True):
if only_static: if only_static:
full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni) full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni)
reservations = True
else: else:
full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni) full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni)
full_dhcp_list += getNetworkDHCPLeases(zk_conn, net_vni) full_dhcp_list += getNetworkDHCPLeases(zk_conn, net_vni)
reservations = False
if limit: if limit:
try: try:
@ -567,9 +574,9 @@ def get_list_dhcp(zk_conn, network, limit, only_static=False, is_fuzzy=True):
limit = '^' + limit + '$' limit = '^' + limit + '$'
# Implcitly assume fuzzy limits # Implcitly assume fuzzy limits
if not re.match('\^.*', limit): if not re.match('[^].*', limit):
limit = '.*' + limit limit = '.*' + limit
if not re.match('.*\$', limit): if not re.match('.*[$]', limit):
limit = limit + '.*' limit = limit + '.*'
except Exception as e: except Exception as e:
return False, 'Regex Error: {}'.format(e) return False, 'Regex Error: {}'.format(e)
@ -589,6 +596,7 @@ def get_list_dhcp(zk_conn, network, limit, only_static=False, is_fuzzy=True):
return True, dhcp_list return True, dhcp_list
def get_list_acl(zk_conn, network, limit, direction, is_fuzzy=True): def get_list_acl(zk_conn, network, limit, direction, is_fuzzy=True):
# Validate and obtain alternate passed value # Validate and obtain alternate passed value
net_vni = getNetworkVNI(zk_conn, network) net_vni = getNetworkVNI(zk_conn, network)
@ -612,9 +620,9 @@ def get_list_acl(zk_conn, network, limit, direction, is_fuzzy=True):
limit = '^' + limit + '$' limit = '^' + limit + '$'
# Implcitly assume fuzzy limits # Implcitly assume fuzzy limits
if not re.match('\^.*', limit): if not re.match('[^].*', limit):
limit = '.*' + limit limit = '.*' + limit
if not re.match('.*\$', limit): if not re.match('.*[$]', limit):
limit = limit + '.*' limit = limit + '.*'
except Exception as e: except Exception as e:
return False, 'Regex Error: {}'.format(e) return False, 'Regex Error: {}'.format(e)
@ -630,326 +638,4 @@ def get_list_acl(zk_conn, network, limit, direction, is_fuzzy=True):
if valid_acl: if valid_acl:
acl_list.append(acl) acl_list.append(acl)
#output_string = formatACLList(zk_conn, net_vni, direction, acl_list)
return True, acl_list return True, acl_list
# CLI-only functions
def getOutputColours(network_information):
if network_information['ip6']['network'] != "None":
v6_flag_colour = ansiprint.green()
else:
v6_flag_colour = ansiprint.blue()
if network_information['ip4']['network'] != "None":
v4_flag_colour = ansiprint.green()
else:
v4_flag_colour = ansiprint.blue()
if network_information['ip6']['dhcp_flag'] == "True":
dhcp6_flag_colour = ansiprint.green()
else:
dhcp6_flag_colour = ansiprint.blue()
if network_information['ip4']['dhcp_flag'] == "True":
dhcp4_flag_colour = ansiprint.green()
else:
dhcp4_flag_colour = ansiprint.blue()
return v6_flag_colour, v4_flag_colour, dhcp6_flag_colour, dhcp4_flag_colour
def format_info(network_information, long_output):
if not network_information:
click.echo("No network found")
return
v6_flag_colour, v4_flag_colour, dhcp6_flag_colour, dhcp4_flag_colour = getOutputColours(network_information)
# Format a nice output: do this line-by-line then concat the elements at the end
ainformation = []
ainformation.append('{}Virtual network information:{}'.format(ansiprint.bold(), ansiprint.end()))
ainformation.append('')
# Basic information
ainformation.append('{}VNI:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['vni']))
ainformation.append('{}Type:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['type']))
ainformation.append('{}Description:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['description']))
if network_information['type'] == 'managed':
ainformation.append('{}Domain:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['domain']))
ainformation.append('{}DNS Servers:{} {}'.format(ansiprint.purple(), ansiprint.end(), ', '.join(network_information['name_servers'])))
if network_information['ip6']['network'] != "None":
ainformation.append('')
ainformation.append('{}IPv6 network:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['ip6']['network']))
ainformation.append('{}IPv6 gateway:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['ip6']['gateway']))
ainformation.append('{}DHCPv6 enabled:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), dhcp6_flag_colour, network_information['ip6']['dhcp_flag'], ansiprint.end()))
if network_information['ip4']['network'] != "None":
ainformation.append('')
ainformation.append('{}IPv4 network:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['ip4']['network']))
ainformation.append('{}IPv4 gateway:{} {}'.format(ansiprint.purple(), ansiprint.end(), network_information['ip4']['gateway']))
ainformation.append('{}DHCPv4 enabled:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), dhcp4_flag_colour, network_information['ip4']['dhcp_flag'], ansiprint.end()))
if network_information['ip4']['dhcp_flag'] == "True":
ainformation.append('{}DHCPv4 range:{} {} - {}'.format(ansiprint.purple(), ansiprint.end(), network_information['ip4']['dhcp_start'], network_information['ip4']['dhcp_end']))
if long_output:
dhcp4_reservations_list = getNetworkDHCPReservations(zk_conn, vni)
if dhcp4_reservations_list:
ainformation.append('')
ainformation.append('{}Client DHCPv4 reservations:{}'.format(ansiprint.bold(), ansiprint.end()))
ainformation.append('')
# Only show static reservations in the detailed information
dhcp4_reservations_string = formatDHCPLeaseList(zk_conn, vni, dhcp4_reservations_list, reservations=True)
for line in dhcp4_reservations_string.split('\n'):
ainformation.append(line)
firewall_rules = zkhandler.listchildren(zk_conn, '/networks/{}/firewall_rules'.format(vni))
if firewall_rules:
ainformation.append('')
ainformation.append('{}Network firewall rules:{}'.format(ansiprint.bold(), ansiprint.end()))
ainformation.append('')
formatted_firewall_rules = get_list_firewall_rules(zk_conn, vni)
# Join it all together
click.echo('\n'.join(ainformation))
def format_list(network_list):
if not network_list:
click.echo("No network found")
return
network_list_output = []
# Determine optimal column widths
net_vni_length = 5
net_description_length = 12
net_nettype_length = 8
net_domain_length = 6
net_v6_flag_length = 6
net_dhcp6_flag_length = 7
net_v4_flag_length = 6
net_dhcp4_flag_length = 7
for network_information in network_list:
# vni column
_net_vni_length = len(str(network_information['vni'])) + 1
if _net_vni_length > net_vni_length:
net_vni_length = _net_vni_length
# description column
_net_description_length = len(network_information['description']) + 1
if _net_description_length > net_description_length:
net_description_length = _net_description_length
# domain column
_net_domain_length = len(network_information['domain']) + 1
if _net_domain_length > net_domain_length:
net_domain_length = _net_domain_length
# Format the string (header)
network_list_output.append('{bold}\
{net_vni: <{net_vni_length}} \
{net_description: <{net_description_length}} \
{net_nettype: <{net_nettype_length}} \
{net_domain: <{net_domain_length}} \
{net_v6_flag: <{net_v6_flag_length}} \
{net_dhcp6_flag: <{net_dhcp6_flag_length}} \
{net_v4_flag: <{net_v4_flag_length}} \
{net_dhcp4_flag: <{net_dhcp4_flag_length}} \
{end_bold}'.format(
bold=ansiprint.bold(),
end_bold=ansiprint.end(),
net_vni_length=net_vni_length,
net_description_length=net_description_length,
net_nettype_length=net_nettype_length,
net_domain_length=net_domain_length,
net_v6_flag_length=net_v6_flag_length,
net_dhcp6_flag_length=net_dhcp6_flag_length,
net_v4_flag_length=net_v4_flag_length,
net_dhcp4_flag_length=net_dhcp4_flag_length,
net_vni='VNI',
net_description='Description',
net_nettype='Type',
net_domain='Domain',
net_v6_flag='IPv6',
net_dhcp6_flag='DHCPv6',
net_v4_flag='IPv4',
net_dhcp4_flag='DHCPv4',
)
)
for network_information in network_list:
v6_flag_colour, v4_flag_colour, dhcp6_flag_colour, dhcp4_flag_colour = getOutputColours(network_information)
if network_information['ip4']['network'] != "None":
v4_flag = 'True'
else:
v4_flag = 'False'
if network_information['ip6']['network'] != "None":
v6_flag = 'True'
else:
v6_flag = 'False'
if network_information['ip4']['dhcp_flag'] == "True":
dhcp4_range = '{} - {}'.format(network_information['ip4']['dhcp_start'], network_information['ip4']['dhcp_end'])
else:
dhcp4_range = 'N/A'
network_list_output.append(
'{bold}\
{net_vni: <{net_vni_length}} \
{net_description: <{net_description_length}} \
{net_nettype: <{net_nettype_length}} \
{net_domain: <{net_domain_length}} \
{v6_flag_colour}{net_v6_flag: <{net_v6_flag_length}}{colour_off} \
{dhcp6_flag_colour}{net_dhcp6_flag: <{net_dhcp6_flag_length}}{colour_off} \
{v4_flag_colour}{net_v4_flag: <{net_v4_flag_length}}{colour_off} \
{dhcp4_flag_colour}{net_dhcp4_flag: <{net_dhcp4_flag_length}}{colour_off} \
{end_bold}'.format(
bold='',
end_bold='',
net_vni_length=net_vni_length,
net_description_length=net_description_length,
net_nettype_length=net_nettype_length,
net_domain_length=net_domain_length,
net_v6_flag_length=net_v6_flag_length,
net_dhcp6_flag_length=net_dhcp6_flag_length,
net_v4_flag_length=net_v4_flag_length,
net_dhcp4_flag_length=net_dhcp4_flag_length,
net_vni=network_information['vni'],
net_description=network_information['description'],
net_nettype=network_information['type'],
net_domain=network_information['domain'],
net_v6_flag=v6_flag,
v6_flag_colour=v6_flag_colour,
net_dhcp6_flag=network_information['ip6']['dhcp_flag'],
dhcp6_flag_colour=dhcp6_flag_colour,
net_v4_flag=v4_flag,
v4_flag_colour=v4_flag_colour,
net_dhcp4_flag=network_information['ip4']['dhcp_flag'],
dhcp4_flag_colour=dhcp4_flag_colour,
colour_off=ansiprint.end()
)
)
click.echo('\n'.join(sorted(network_list_output)))
def format_list_dhcp(dhcp_lease_list):
dhcp_lease_list_output = []
# Determine optimal column widths
lease_hostname_length = 9
lease_ip4_address_length = 11
lease_mac_address_length = 13
lease_timestamp_length = 13
for dhcp_lease_information in dhcp_lease_list:
# hostname column
_lease_hostname_length = len(dhcp_lease_information['hostname']) + 1
if _lease_hostname_length > lease_hostname_length:
lease_hostname_length = _lease_hostname_length
# ip4_address column
_lease_ip4_address_length = len(dhcp_lease_information['ip4_address']) + 1
if _lease_ip4_address_length > lease_ip4_address_length:
lease_ip4_address_length = _lease_ip4_address_length
# mac_address column
_lease_mac_address_length = len(dhcp_lease_information['mac_address']) + 1
if _lease_mac_address_length > lease_mac_address_length:
lease_mac_address_length = _lease_mac_address_length
# Format the string (header)
dhcp_lease_list_output.append('{bold}\
{lease_hostname: <{lease_hostname_length}} \
{lease_ip4_address: <{lease_ip4_address_length}} \
{lease_mac_address: <{lease_mac_address_length}} \
{lease_timestamp: <{lease_timestamp_length}} \
{end_bold}'.format(
bold=ansiprint.bold(),
end_bold=ansiprint.end(),
lease_hostname_length=lease_hostname_length,
lease_ip4_address_length=lease_ip4_address_length,
lease_mac_address_length=lease_mac_address_length,
lease_timestamp_length=lease_timestamp_length,
lease_hostname='Hostname',
lease_ip4_address='IP Address',
lease_mac_address='MAC Address',
lease_timestamp='Timestamp'
)
)
for dhcp_lease_information in dhcp_lease_list:
dhcp_lease_list_output.append('{bold}\
{lease_hostname: <{lease_hostname_length}} \
{lease_ip4_address: <{lease_ip4_address_length}} \
{lease_mac_address: <{lease_mac_address_length}} \
{lease_timestamp: <{lease_timestamp_length}} \
{end_bold}'.format(
bold='',
end_bold='',
lease_hostname_length=lease_hostname_length,
lease_ip4_address_length=lease_ip4_address_length,
lease_mac_address_length=lease_mac_address_length,
lease_timestamp_length=12,
lease_hostname=dhcp_lease_information['hostname'],
lease_ip4_address=dhcp_lease_information['ip4_address'],
lease_mac_address=dhcp_lease_information['mac_address'],
lease_timestamp=dhcp_lease_information['timestamp']
)
)
click.echo('\n'.join(sorted(dhcp_lease_list_output)))
def format_list_acl(acl_list):
acl_list_output = []
# Determine optimal column widths
acl_direction_length = 10
acl_order_length = 6
acl_description_length = 12
acl_rule_length = 5
for acl_information in acl_list:
# order column
_acl_order_length = len(str(acl_information['order'])) + 1
if _acl_order_length > acl_order_length:
acl_order_length = _acl_order_length
# description column
_acl_description_length = len(acl_information['description']) + 1
if _acl_description_length > acl_description_length:
acl_description_length = _acl_description_length
# rule column
_acl_rule_length = len(acl_information['rule']) + 1
if _acl_rule_length > acl_rule_length:
acl_rule_length = _acl_rule_length
# Format the string (header)
acl_list_output.append('{bold}\
{acl_direction: <{acl_direction_length}} \
{acl_order: <{acl_order_length}} \
{acl_description: <{acl_description_length}} \
{acl_rule: <{acl_rule_length}} \
{end_bold}'.format(
bold=ansiprint.bold(),
end_bold=ansiprint.end(),
acl_direction_length=acl_direction_length,
acl_order_length=acl_order_length,
acl_description_length=acl_description_length,
acl_rule_length=acl_rule_length,
acl_direction='Direction',
acl_order='Order',
acl_description='Description',
acl_rule='Rule',
)
)
for acl_information in acl_list:
acl_list_output.append('{bold}\
{acl_direction: <{acl_direction_length}} \
{acl_order: <{acl_order_length}} \
{acl_description: <{acl_description_length}} \
{acl_rule: <{acl_rule_length}} \
{end_bold}'.format(
bold='',
end_bold='',
acl_direction_length=acl_direction_length,
acl_order_length=acl_order_length,
acl_description_length=acl_description_length,
acl_rule_length=acl_rule_length,
acl_direction=acl_information['direction'],
acl_order=acl_information['order'],
acl_description=acl_information['description'],
acl_rule=acl_information['rule'],
)
)
click.echo('\n'.join(sorted(acl_list_output)))

View File

@ -20,24 +20,12 @@
# #
############################################################################### ###############################################################################
import os
import socket
import time import time
import uuid
import re import re
import tempfile
import subprocess
import difflib
import colorama
import click
import lxml.objectify
import configparser
import kazoo.client
import daemon_lib.ansiprint as ansiprint
import daemon_lib.zkhandler as zkhandler import daemon_lib.zkhandler as zkhandler
import daemon_lib.common as common import daemon_lib.common as common
import daemon_lib.vm as pvc_vm
def getNodeInformation(zk_conn, node_name): def getNodeInformation(zk_conn, node_name):
""" """
@ -88,6 +76,7 @@ def getNodeInformation(zk_conn, node_name):
} }
return node_information return node_information
# #
# Direct Functions # Direct Functions
# #
@ -118,6 +107,7 @@ def secondary_node(zk_conn, node):
return True, retmsg return True, retmsg
def primary_node(zk_conn, node): def primary_node(zk_conn, node):
# Verify node is valid # Verify node is valid
if not common.verifyNode(zk_conn, node): if not common.verifyNode(zk_conn, node):
@ -145,6 +135,7 @@ def primary_node(zk_conn, node):
return True, retmsg return True, retmsg
def flush_node(zk_conn, node, wait=False): def flush_node(zk_conn, node, wait=False):
# Verify node is valid # Verify node is valid
if not common.verifyNode(zk_conn, node): if not common.verifyNode(zk_conn, node):
@ -164,13 +155,14 @@ def flush_node(zk_conn, node, wait=False):
return True, retmsg return True, retmsg
def ready_node(zk_conn, node, wait=False): def ready_node(zk_conn, node, wait=False):
# Verify node is valid # Verify node is valid
if not common.verifyNode(zk_conn, node): if not common.verifyNode(zk_conn, node):
return False, 'ERROR: No node named "{}" is present in the cluster.'.format(node) return False, 'ERROR: No node named "{}" is present in the cluster.'.format(node)
retmsg = 'Restoring hypervisor {} to active service.'.format(node) retmsg = 'Restoring hypervisor {} to active service.'.format(node)
# Add the new domain to Zookeeper # Add the new domain to Zookeeper
zkhandler.writedata(zk_conn, { zkhandler.writedata(zk_conn, {
'/nodes/{}/domainstate'.format(node): 'unflush' '/nodes/{}/domainstate'.format(node): 'unflush'
@ -183,6 +175,7 @@ def ready_node(zk_conn, node, wait=False):
return True, retmsg return True, retmsg
def get_info(zk_conn, node): def get_info(zk_conn, node):
# Verify node is valid # Verify node is valid
if not common.verifyNode(zk_conn, node): if not common.verifyNode(zk_conn, node):
@ -195,6 +188,7 @@ def get_info(zk_conn, node):
return True, node_information return True, node_information
def get_list(zk_conn, limit, daemon_state=None, coordinator_state=None, domain_state=None, is_fuzzy=True): def get_list(zk_conn, limit, daemon_state=None, coordinator_state=None, domain_state=None, is_fuzzy=True):
node_list = [] node_list = []
full_node_list = zkhandler.listchildren(zk_conn, '/nodes') full_node_list = zkhandler.listchildren(zk_conn, '/nodes')
@ -227,215 +221,3 @@ def get_list(zk_conn, limit, daemon_state=None, coordinator_state=None, domain_s
node_list = limited_node_list node_list = limited_node_list
return True, node_list return True, node_list
#
# CLI-specific functions
#
def getOutputColours(node_information):
if node_information['daemon_state'] == 'run':
daemon_state_colour = ansiprint.green()
elif node_information['daemon_state'] == 'stop':
daemon_state_colour = ansiprint.red()
elif node_information['daemon_state'] == 'shutdown':
daemon_state_colour = ansiprint.yellow()
elif node_information['daemon_state'] == 'init':
daemon_state_colour = ansiprint.yellow()
elif node_information['daemon_state'] == 'dead':
daemon_state_colour = ansiprint.red() + ansiprint.bold()
else:
daemon_state_colour = ansiprint.blue()
if node_information['coordinator_state'] == 'primary':
coordinator_state_colour = ansiprint.green()
elif node_information['coordinator_state'] == 'secondary':
coordinator_state_colour = ansiprint.blue()
else:
coordinator_state_colour = ansiprint.cyan()
if node_information['domain_state'] == 'ready':
domain_state_colour = ansiprint.green()
else:
domain_state_colour = ansiprint.blue()
return daemon_state_colour, coordinator_state_colour, domain_state_colour
def format_info(node_information, long_output):
daemon_state_colour, coordinator_state_colour, domain_state_colour = getOutputColours(node_information)
# Format a nice output; do this line-by-line then concat the elements at the end
ainformation = []
# Basic information
ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['name']))
ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), daemon_state_colour, node_information['daemon_state'], ansiprint.end()))
ainformation.append('{}Coordinator State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), coordinator_state_colour, node_information['coordinator_state'], ansiprint.end()))
ainformation.append('{}Domain State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), domain_state_colour, node_information['domain_state'], ansiprint.end()))
ainformation.append('{}Active VM Count:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['domains_count']))
if long_output:
ainformation.append('')
ainformation.append('{}Architecture:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['arch']))
ainformation.append('{}Operating System:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['os']))
ainformation.append('{}Kernel Version:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['kernel']))
ainformation.append('')
ainformation.append('{}Host CPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['total']))
ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['allocated']))
ainformation.append('{}Load:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['load']))
ainformation.append('{}Total RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['total']))
ainformation.append('{}Used RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['used']))
ainformation.append('{}Free RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['free']))
ainformation.append('{}Allocated RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['allocated']))
ainformation.append('{}Provisioned RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['provisioned']))
# Join it all together
information = '\n'.join(ainformation)
click.echo(information)
click.echo('')
def format_list(node_list):
node_list_output = []
# Determine optimal column widths
node_name_length = 5
daemon_state_length = 7
coordinator_state_length = 12
domain_state_length = 8
domains_count_length = 4
cpu_count_length = 6
load_length = 5
mem_total_length = 6
mem_used_length = 5
mem_free_length = 5
mem_alloc_length = 4
mem_prov_length = 4
for node_information in node_list:
# node_name column
_node_name_length = len(node_information['name']) + 1
if _node_name_length > node_name_length:
node_name_length = _node_name_length
# daemon_state column
_daemon_state_length = len(node_information['daemon_state']) + 1
if _daemon_state_length > daemon_state_length:
daemon_state_length = _daemon_state_length
# coordinator_state column
_coordinator_state_length = len(node_information['coordinator_state']) + 1
if _coordinator_state_length > coordinator_state_length:
coordinator_state_length = _coordinator_state_length
# domain_state column
_domain_state_length = len(node_information['domain_state']) + 1
if _domain_state_length > domain_state_length:
domain_state_length = _domain_state_length
# domains_count column
_domains_count_length = len(str(node_information['domains_count'])) + 1
if _domains_count_length > domains_count_length:
domains_count_length = _domains_count_length
# cpu_count column
_cpu_count_length = len(str(node_information['cpu_count'])) + 1
if _cpu_count_length > cpu_count_length:
cpu_count_length = _cpu_count_length
# load column
_load_length = len(str(node_information['load'])) + 1
if _load_length > load_length:
load_length = _load_length
# mem_total column
_mem_total_length = len(str(node_information['memory']['total'])) + 1
if _mem_total_length > mem_total_length:
mem_total_length = _mem_total_length
# mem_used column
_mem_used_length = len(str(node_information['memory']['used'])) + 1
if _mem_used_length > mem_used_length:
mem_used_length = _mem_used_length
# mem_free column
_mem_free_length = len(str(node_information['memory']['free'])) + 1
if _mem_free_length > mem_free_length:
mem_free_length = _mem_free_length
# mem_alloc column
_mem_alloc_length = len(str(node_information['memory']['allocated'])) + 1
if _mem_alloc_length > mem_alloc_length:
mem_alloc_length = _mem_alloc_length
# mem_prov column
_mem_prov_length = len(str(node_information['memory']['provisioned'])) + 1
if _mem_prov_length > mem_prov_length:
mem_prov_length = _mem_prov_length
# Format the string (header)
node_list_output.append(
'{bold}{node_name: <{node_name_length}} \
St: {daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \
Res: {node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \
Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}'.format(
node_name_length=node_name_length,
daemon_state_length=daemon_state_length,
coordinator_state_length=coordinator_state_length,
domain_state_length=domain_state_length,
domains_count_length=domains_count_length,
cpu_count_length=cpu_count_length,
load_length=load_length,
mem_total_length=mem_total_length,
mem_used_length=mem_used_length,
mem_free_length=mem_free_length,
mem_alloc_length=mem_alloc_length,
mem_prov_length=mem_prov_length,
bold=ansiprint.bold(),
end_bold=ansiprint.end(),
daemon_state_colour='',
coordinator_state_colour='',
domain_state_colour='',
end_colour='',
node_name='Name',
node_daemon_state='Daemon',
node_coordinator_state='Coordinator',
node_domain_state='Domain',
node_domains_count='VMs',
node_cpu_count='vCPUs',
node_load='Load',
node_mem_total='Total',
node_mem_used='Used',
node_mem_free='Free',
node_mem_allocated='VMs Run',
node_mem_provisioned='VMs Total'
)
)
# Format the string (elements)
for node_information in node_list:
daemon_state_colour, coordinator_state_colour, domain_state_colour = getOutputColours(node_information)
node_list_output.append(
'{bold}{node_name: <{node_name_length}} \
{daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \
{node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \
{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}'.format(
node_name_length=node_name_length,
daemon_state_length=daemon_state_length,
coordinator_state_length=coordinator_state_length,
domain_state_length=domain_state_length,
domains_count_length=domains_count_length,
cpu_count_length=cpu_count_length,
load_length=load_length,
mem_total_length=mem_total_length,
mem_used_length=mem_used_length,
mem_free_length=mem_free_length,
mem_alloc_length=mem_alloc_length,
mem_prov_length=mem_prov_length,
bold='',
end_bold='',
daemon_state_colour=daemon_state_colour,
coordinator_state_colour=coordinator_state_colour,
domain_state_colour=domain_state_colour,
end_colour=ansiprint.end(),
node_name=node_information['name'],
node_daemon_state=node_information['daemon_state'],
node_coordinator_state=node_information['coordinator_state'],
node_domain_state=node_information['domain_state'],
node_domains_count=node_information['domains_count'],
node_cpu_count=node_information['vcpu']['allocated'],
node_load=node_information['load'],
node_mem_total=node_information['memory']['total'],
node_mem_used=node_information['memory']['used'],
node_mem_free=node_information['memory']['free'],
node_mem_allocated=node_information['memory']['allocated'],
node_mem_provisioned=node_information['memory']['provisioned']
)
)
click.echo('\n'.join(sorted(node_list_output)))

View File

@ -20,27 +20,16 @@
# #
############################################################################### ###############################################################################
import os
import socket
import time import time
import uuid
import re import re
import subprocess
import difflib
import colorama
import click
import lxml.objectify import lxml.objectify
import configparser
import kazoo.client
from collections import deque
import daemon_lib.ansiprint as ansiprint
import daemon_lib.zkhandler as zkhandler import daemon_lib.zkhandler as zkhandler
import daemon_lib.common as common import daemon_lib.common as common
import daemon_lib.ceph as ceph import daemon_lib.ceph as ceph
# #
# Cluster search functions # Cluster search functions
# #
@ -53,6 +42,7 @@ def getClusterDomainList(zk_conn):
name_list.append(zkhandler.readdata(zk_conn, '/domains/%s' % uuid)) name_list.append(zkhandler.readdata(zk_conn, '/domains/%s' % uuid))
return uuid_list, name_list return uuid_list, name_list
def searchClusterByUUID(zk_conn, uuid): def searchClusterByUUID(zk_conn, uuid):
try: try:
# Get the lists # Get the lists
@ -67,6 +57,7 @@ def searchClusterByUUID(zk_conn, uuid):
return name return name
def searchClusterByName(zk_conn, name): def searchClusterByName(zk_conn, name):
try: try:
# Get the lists # Get the lists
@ -81,6 +72,7 @@ def searchClusterByName(zk_conn, name):
return uuid return uuid
def getDomainUUID(zk_conn, domain): def getDomainUUID(zk_conn, domain):
# Validate that VM exists in cluster # Validate that VM exists in cluster
if common.validateUUID(domain): if common.validateUUID(domain):
@ -92,6 +84,7 @@ def getDomainUUID(zk_conn, domain):
return dom_uuid return dom_uuid
def getDomainName(zk_conn, domain): def getDomainName(zk_conn, domain):
# Validate that VM exists in cluster # Validate that VM exists in cluster
if common.validateUUID(domain): if common.validateUUID(domain):
@ -103,6 +96,7 @@ def getDomainName(zk_conn, domain):
return dom_name return dom_name
# #
# Direct functions # Direct functions
# #
@ -118,6 +112,7 @@ def is_migrated(zk_conn, domain):
else: else:
return False return False
def flush_locks(zk_conn, domain): def flush_locks(zk_conn, domain):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -145,7 +140,7 @@ def flush_locks(zk_conn, domain):
else: else:
message = 'ERROR: Failed to flush locks on VM "{}"; check node logs for details.'.format(domain) message = 'ERROR: Failed to flush locks on VM "{}"; check node logs for details.'.format(domain)
success = False success = False
except: except Exception:
message = 'ERROR: Command ignored by node.' message = 'ERROR: Command ignored by node.'
success = False success = False
@ -157,11 +152,12 @@ def flush_locks(zk_conn, domain):
return success, message return success, message
def define_vm(zk_conn, config_data, target_node, node_limit, node_selector, node_autostart, migration_method=None, profile=None, initial_state='stop'): def define_vm(zk_conn, config_data, target_node, node_limit, node_selector, node_autostart, migration_method=None, profile=None, initial_state='stop'):
# Parse the XML data # Parse the XML data
try: try:
parsed_xml = lxml.objectify.fromstring(config_data) parsed_xml = lxml.objectify.fromstring(config_data)
except: except Exception:
return False, 'ERROR: Failed to parse XML data.' return False, 'ERROR: Failed to parse XML data.'
dom_uuid = parsed_xml.uuid.text dom_uuid = parsed_xml.uuid.text
dom_name = parsed_xml.name.text dom_name = parsed_xml.name.text
@ -216,6 +212,7 @@ def define_vm(zk_conn, config_data, target_node, node_limit, node_selector, node
return True, 'Added new VM with Name "{}" and UUID "{}" to database.'.format(dom_name, dom_uuid) return True, 'Added new VM with Name "{}" and UUID "{}" to database.'.format(dom_name, dom_uuid)
def modify_vm_metadata(zk_conn, domain, node_limit, node_selector, node_autostart, provisioner_profile, migration_method): def modify_vm_metadata(zk_conn, domain, node_limit, node_selector, node_autostart, provisioner_profile, migration_method):
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
if not dom_uuid: if not dom_uuid:
@ -248,6 +245,7 @@ def modify_vm_metadata(zk_conn, domain, node_limit, node_selector, node_autostar
return True, 'Successfully modified PVC metadata of VM "{}".'.format(domain) return True, 'Successfully modified PVC metadata of VM "{}".'.format(domain)
def modify_vm(zk_conn, domain, restart, new_vm_config): def modify_vm(zk_conn, domain, restart, new_vm_config):
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
if not dom_uuid: if not dom_uuid:
@ -257,7 +255,7 @@ def modify_vm(zk_conn, domain, restart, new_vm_config):
# Parse and valiate the XML # Parse and valiate the XML
try: try:
parsed_xml = lxml.objectify.fromstring(new_vm_config) parsed_xml = lxml.objectify.fromstring(new_vm_config)
except: except Exception:
return False, 'ERROR: Failed to parse XML data.' return False, 'ERROR: Failed to parse XML data.'
# Obtain the RBD disk list using the common functions # Obtain the RBD disk list using the common functions
@ -284,11 +282,12 @@ def modify_vm(zk_conn, domain, restart, new_vm_config):
if restart: if restart:
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
lock.acquire() lock.acquire()
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'restart' }) zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'restart'})
lock.release() lock.release()
return True, '' return True, ''
def dump_vm(zk_conn, domain): def dump_vm(zk_conn, domain):
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
if not dom_uuid: if not dom_uuid:
@ -299,6 +298,7 @@ def dump_vm(zk_conn, domain):
return True, vm_xml return True, vm_xml
def undefine_vm(zk_conn, domain): def undefine_vm(zk_conn, domain):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -311,7 +311,7 @@ def undefine_vm(zk_conn, domain):
# Set the domain into stop mode # Set the domain into stop mode
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
lock.acquire() lock.acquire()
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'stop' }) zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'stop'})
lock.release() lock.release()
# Wait for 2 seconds to allow state to flow to all nodes # Wait for 2 seconds to allow state to flow to all nodes
@ -326,6 +326,7 @@ def undefine_vm(zk_conn, domain):
return True, 'Undefined VM "{}" from the cluster.'.format(domain) return True, 'Undefined VM "{}" from the cluster.'.format(domain)
def remove_vm(zk_conn, domain): def remove_vm(zk_conn, domain):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -340,7 +341,7 @@ def remove_vm(zk_conn, domain):
# Set the domain into stop mode # Set the domain into stop mode
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
lock.acquire() lock.acquire()
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'stop' }) zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'stop'})
lock.release() lock.release()
# Wait for 2 seconds to allow state to flow to all nodes # Wait for 2 seconds to allow state to flow to all nodes
@ -365,6 +366,7 @@ def remove_vm(zk_conn, domain):
return True, 'Removed VM "{}" and disks from the cluster.'.format(domain) return True, 'Removed VM "{}" and disks from the cluster.'.format(domain)
def start_vm(zk_conn, domain): def start_vm(zk_conn, domain):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -374,11 +376,12 @@ def start_vm(zk_conn, domain):
# Set the VM to start # Set the VM to start
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
lock.acquire() lock.acquire()
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'start' }) zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'start'})
lock.release() lock.release()
return True, 'Starting VM "{}".'.format(domain) return True, 'Starting VM "{}".'.format(domain)
def restart_vm(zk_conn, domain, wait=False): def restart_vm(zk_conn, domain, wait=False):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -395,7 +398,7 @@ def restart_vm(zk_conn, domain, wait=False):
# Set the VM to restart # Set the VM to restart
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
lock.acquire() lock.acquire()
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'restart' }) zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'restart'})
lock.release() lock.release()
if wait: if wait:
@ -405,6 +408,7 @@ def restart_vm(zk_conn, domain, wait=False):
return True, retmsg return True, retmsg
def shutdown_vm(zk_conn, domain, wait=False): def shutdown_vm(zk_conn, domain, wait=False):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -421,7 +425,7 @@ def shutdown_vm(zk_conn, domain, wait=False):
# Set the VM to shutdown # Set the VM to shutdown
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
lock.acquire() lock.acquire()
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'shutdown' }) zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'shutdown'})
lock.release() lock.release()
if wait: if wait:
@ -431,23 +435,22 @@ def shutdown_vm(zk_conn, domain, wait=False):
return True, retmsg return True, retmsg
def stop_vm(zk_conn, domain): def stop_vm(zk_conn, domain):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
if not dom_uuid: if not dom_uuid:
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain) return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
# Get state and verify we're OK to proceed
current_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid))
# Set the VM to start # Set the VM to start
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
lock.acquire() lock.acquire()
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'stop' }) zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'stop'})
lock.release() lock.release()
return True, 'Forcibly stopping VM "{}".'.format(domain) return True, 'Forcibly stopping VM "{}".'.format(domain)
def disable_vm(zk_conn, domain): def disable_vm(zk_conn, domain):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -462,11 +465,12 @@ def disable_vm(zk_conn, domain):
# Set the VM to start # Set the VM to start
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
lock.acquire() lock.acquire()
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'disable' }) zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'disable'})
lock.release() lock.release()
return True, 'Marked VM "{}" as disable.'.format(domain) return True, 'Marked VM "{}" as disable.'.format(domain)
def move_vm(zk_conn, domain, target_node, wait=False, force_live=False): def move_vm(zk_conn, domain, target_node, wait=False, force_live=False):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -516,10 +520,10 @@ def move_vm(zk_conn, domain, target_node, wait=False, force_live=False):
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
lock.acquire() lock.acquire()
zkhandler.writedata(zk_conn, { zkhandler.writedata(zk_conn, {
'/domains/{}/state'.format(dom_uuid): target_state, '/domains/{}/state'.format(dom_uuid): target_state,
'/domains/{}/node'.format(dom_uuid): target_node, '/domains/{}/node'.format(dom_uuid): target_node,
'/domains/{}/lastnode'.format(dom_uuid): '' '/domains/{}/lastnode'.format(dom_uuid): ''
}) })
lock.release() lock.release()
if wait: if wait:
@ -529,6 +533,7 @@ def move_vm(zk_conn, domain, target_node, wait=False, force_live=False):
return True, retmsg return True, retmsg
def migrate_vm(zk_conn, domain, target_node, force_migrate, wait=False, force_live=False): def migrate_vm(zk_conn, domain, target_node, force_migrate, wait=False, force_live=False):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -594,6 +599,7 @@ def migrate_vm(zk_conn, domain, target_node, force_migrate, wait=False, force_li
return True, retmsg return True, retmsg
def unmigrate_vm(zk_conn, domain, wait=False, force_live=False): def unmigrate_vm(zk_conn, domain, wait=False, force_live=False):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -634,6 +640,7 @@ def unmigrate_vm(zk_conn, domain, wait=False, force_live=False):
return True, retmsg return True, retmsg
def get_console_log(zk_conn, domain, lines=1000): def get_console_log(zk_conn, domain, lines=1000):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -649,6 +656,7 @@ def get_console_log(zk_conn, domain, lines=1000):
return True, loglines return True, loglines
def get_info(zk_conn, domain): def get_info(zk_conn, domain):
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain) dom_uuid = getDomainUUID(zk_conn, domain)
@ -662,6 +670,7 @@ def get_info(zk_conn, domain):
return True, domain_information return True, domain_information
def get_list(zk_conn, node, state, limit, is_fuzzy=True): def get_list(zk_conn, node, state, limit, is_fuzzy=True):
if node: if node:
# Verify node is valid # Verify node is valid
@ -669,8 +678,8 @@ def get_list(zk_conn, node, state, limit, is_fuzzy=True):
return False, 'Specified node "{}" is invalid.'.format(node) return False, 'Specified node "{}" is invalid.'.format(node)
if state: if state:
valid_states = [ 'start', 'restart', 'shutdown', 'stop', 'disable', 'fail', 'migrate', 'unmigrate', 'provision' ] valid_states = ['start', 'restart', 'shutdown', 'stop', 'disable', 'fail', 'migrate', 'unmigrate', 'provision']
if not state in valid_states: if state not in valid_states:
return False, 'VM state "{}" is not valid.'.format(state) return False, 'VM state "{}" is not valid.'.format(state)
full_vm_list = zkhandler.listchildren(zk_conn, '/domains') full_vm_list = zkhandler.listchildren(zk_conn, '/domains')
@ -680,9 +689,9 @@ def get_list(zk_conn, node, state, limit, is_fuzzy=True):
if limit and is_fuzzy: if limit and is_fuzzy:
try: try:
# Implcitly assume fuzzy limits # Implcitly assume fuzzy limits
if not re.match('\^.*', limit): if not re.match('[^].*', limit):
limit = '.*' + limit limit = '.*' + limit
if not re.match('.*\$', limit): if not re.match('.*[$]', limit):
limit = limit + '.*' limit = limit + '.*'
except Exception as e: except Exception as e:
return False, 'Regex Error: {}'.format(e) return False, 'Regex Error: {}'.format(e)
@ -722,269 +731,3 @@ def get_list(zk_conn, node, state, limit, is_fuzzy=True):
vm_list.append(common.getInformationFromXML(zk_conn, vm)) vm_list.append(common.getInformationFromXML(zk_conn, vm))
return True, vm_list return True, vm_list
#
# CLI-specific functions
#
def format_info(zk_conn, domain_information, long_output):
# Format a nice output; do this line-by-line then concat the elements at the end
ainformation = []
ainformation.append('{}Virtual machine information:{}'.format(ansiprint.bold(), ansiprint.end()))
ainformation.append('')
# Basic information
ainformation.append('{}UUID:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['uuid']))
ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['name']))
ainformation.append('{}Description:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['description']))
ainformation.append('{}Profile:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['profile']))
ainformation.append('{}Memory (M):{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['memory']))
ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['vcpu']))
ainformation.append('{}Topology (S/C/T):{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['vcpu_topology']))
if long_output == True:
# Virtualization information
ainformation.append('')
ainformation.append('{}Emulator:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['emulator']))
ainformation.append('{}Type:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['type']))
ainformation.append('{}Arch:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['arch']))
ainformation.append('{}Machine:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['machine']))
ainformation.append('{}Features:{} {}'.format(ansiprint.purple(), ansiprint.end(), ' '.join(domain_information['features'])))
# PVC cluster information
ainformation.append('')
dstate_colour = {
'start': ansiprint.green(),
'restart': ansiprint.yellow(),
'shutdown': ansiprint.yellow(),
'stop': ansiprint.red(),
'disable': ansiprint.blue(),
'fail': ansiprint.red(),
'migrate': ansiprint.blue(),
'unmigrate': ansiprint.blue()
}
ainformation.append('{}State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), dstate_colour[domain_information['state']], domain_information['state'], ansiprint.end()))
ainformation.append('{}Current Node:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['node']))
if not domain_information['last_node']:
domain_information['last_node'] = "N/A"
ainformation.append('{}Previous Node:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['last_node']))
# Get a failure reason if applicable
if domain_information['failed_reason']:
ainformation.append('')
ainformation.append('{}Failure reason:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['failed_reason']))
if not domain_information['node_selector']:
formatted_node_selector = "False"
else:
formatted_node_selector = domain_information['node_selector']
if not domain_information['node_limit']:
formatted_node_limit = "False"
else:
formatted_node_limit = ', '.join(domain_information['node_limit'])
if not domain_information['node_autostart']:
formatted_node_autostart = "False"
else:
formatted_node_autostart = domain_information['node_autostart']
if not domain_information['migration_method']:
formatted_migration_method = "False"
else:
formatted_migration_method = domain_information['migration_method']
ainformation.append('{}Migration selector:{} {}'.format(ansiprint.purple(), ansiprint.end(), formatted_node_selector))
ainformation.append('{}Node limit:{} {}'.format(ansiprint.purple(), ansiprint.end(), formatted_node_limit))
ainformation.append('{}Autostart:{} {}'.format(ansiprint.purple(), ansiprint.end(), formatted_node_autostart))
ainformation.append('{}Migration Method:{} {}'.format(ansiprint.purple(), ansiprint.end(), formatted_migration_method))
# Network list
net_list = []
for net in domain_information['networks']:
# Split out just the numerical (VNI) part of the brXXXX name
net_vnis = re.findall(r'\d+', net['source'])
if net_vnis:
net_vni = net_vnis[0]
else:
net_vni = re.sub('br', '', net['source'])
net_exists = zkhandler.exists(zk_conn, '/networks/{}'.format(net_vni))
if not net_exists and net_vni != 'cluster':
net_list.append(ansiprint.red() + net_vni + ansiprint.end() + ' [invalid]')
else:
net_list.append(net_vni)
ainformation.append('')
ainformation.append('{}Networks:{} {}'.format(ansiprint.purple(), ansiprint.end(), ', '.join(net_list)))
if long_output == True:
# Disk list
ainformation.append('')
name_length = 0
for disk in domain_information['disks']:
_name_length = len(disk['name']) + 1
if _name_length > name_length:
name_length = _name_length
ainformation.append('{0}Disks:{1} {2}ID Type {3: <{width}} Dev Bus{4}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), 'Name', ansiprint.end(), width=name_length))
for disk in domain_information['disks']:
ainformation.append(' {0: <3} {1: <5} {2: <{width}} {3: <4} {4: <5}'.format(domain_information['disks'].index(disk), disk['type'], disk['name'], disk['dev'], disk['bus'], width=name_length))
ainformation.append('')
ainformation.append('{}Interfaces:{} {}ID Type Source Model MAC{}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end()))
for net in domain_information['networks']:
ainformation.append(' {0: <3} {1: <8} {2: <10} {3: <8} {4}'.format(domain_information['networks'].index(net), net['type'], net['source'], net['model'], net['mac']))
# Controller list
ainformation.append('')
ainformation.append('{}Controllers:{} {}ID Type Model{}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end()))
for controller in domain_information['controllers']:
ainformation.append(' {0: <3} {1: <14} {2: <8}'.format(domain_information['controllers'].index(controller), controller['type'], controller['model']))
# Join it all together
information = '\n'.join(ainformation)
click.echo(information)
click.echo('')
def format_list(zk_conn, vm_list, raw):
# Function to strip the "br" off of nets and return a nicer list
def getNiceNetID(domain_information):
# Network list
net_list = []
for net in domain_information['networks']:
# Split out just the numerical (VNI) part of the brXXXX name
net_vnis = re.findall(r'\d+', net['source'])
if net_vnis:
net_vni = net_vnis[0]
else:
net_vni = re.sub('br', '', net['source'])
net_list.append(net_vni)
return net_list
# Handle raw mode since it just lists the names
if raw:
for vm in sorted(item['name'] for item in vm_list):
click.echo(vm)
return True, ''
vm_list_output = []
# Determine optimal column widths
# Dynamic columns: node_name, node, migrated
vm_name_length = 5
vm_uuid_length = 37
vm_state_length = 6
vm_nets_length = 9
vm_ram_length = 8
vm_vcpu_length = 6
vm_node_length = 8
vm_migrated_length = 10
for domain_information in vm_list:
net_list = getNiceNetID(domain_information)
# vm_name column
_vm_name_length = len(domain_information['name']) + 1
if _vm_name_length > vm_name_length:
vm_name_length = _vm_name_length
# vm_state column
_vm_state_length = len(domain_information['state']) + 1
if _vm_state_length > vm_state_length:
vm_state_length = _vm_state_length
# vm_nets column
_vm_nets_length = len(','.join(net_list)) + 1
if _vm_nets_length > vm_nets_length:
vm_nets_length = _vm_nets_length
# vm_node column
_vm_node_length = len(domain_information['node']) + 1
if _vm_node_length > vm_node_length:
vm_node_length = _vm_node_length
# vm_migrated column
_vm_migrated_length = len(domain_information['migrated']) + 1
if _vm_migrated_length > vm_migrated_length:
vm_migrated_length = _vm_migrated_length
# Format the string (header)
vm_list_output.append(
'{bold}{vm_name: <{vm_name_length}} {vm_uuid: <{vm_uuid_length}} \
{vm_state_colour}{vm_state: <{vm_state_length}}{end_colour} \
{vm_networks: <{vm_nets_length}} \
{vm_memory: <{vm_ram_length}} {vm_vcpu: <{vm_vcpu_length}} \
{vm_node: <{vm_node_length}} \
{vm_migrated: <{vm_migrated_length}}{end_bold}'.format(
vm_name_length=vm_name_length,
vm_uuid_length=vm_uuid_length,
vm_state_length=vm_state_length,
vm_nets_length=vm_nets_length,
vm_ram_length=vm_ram_length,
vm_vcpu_length=vm_vcpu_length,
vm_node_length=vm_node_length,
vm_migrated_length=vm_migrated_length,
bold=ansiprint.bold(),
end_bold=ansiprint.end(),
vm_state_colour='',
end_colour='',
vm_name='Name',
vm_uuid='UUID',
vm_state='State',
vm_networks='Networks',
vm_memory='RAM (M)',
vm_vcpu='vCPUs',
vm_node='Node',
vm_migrated='Migrated'
)
)
# Format the string (elements)
for domain_information in vm_list:
if domain_information['state'] == 'start':
vm_state_colour = ansiprint.green()
elif domain_information['state'] == 'restart':
vm_state_colour = ansiprint.yellow()
elif domain_information['state'] == 'shutdown':
vm_state_colour = ansiprint.yellow()
elif domain_information['state'] == 'stop':
vm_state_colour = ansiprint.red()
elif domain_information['state'] == 'fail':
vm_state_colour = ansiprint.red()
else:
vm_state_colour = ansiprint.blue()
# Handle colouring for an invalid network config
raw_net_list = getNiceNetID(domain_information)
net_list = []
vm_net_colour = ''
for net_vni in raw_net_list:
net_exists = zkhandler.exists(zk_conn, '/networks/{}'.format(net_vni))
if not net_exists and net_vni != 'cluster':
vm_net_colour = ansiprint.red()
net_list.append(net_vni)
vm_list_output.append(
'{bold}{vm_name: <{vm_name_length}} {vm_uuid: <{vm_uuid_length}} \
{vm_state_colour}{vm_state: <{vm_state_length}}{end_colour} \
{vm_net_colour}{vm_networks: <{vm_nets_length}}{end_colour} \
{vm_memory: <{vm_ram_length}} {vm_vcpu: <{vm_vcpu_length}} \
{vm_node: <{vm_node_length}} \
{vm_migrated: <{vm_migrated_length}}{end_bold}'.format(
vm_name_length=vm_name_length,
vm_uuid_length=vm_uuid_length,
vm_state_length=vm_state_length,
vm_nets_length=vm_nets_length,
vm_ram_length=vm_ram_length,
vm_vcpu_length=vm_vcpu_length,
vm_node_length=vm_node_length,
vm_migrated_length=vm_migrated_length,
bold='',
end_bold='',
vm_state_colour=vm_state_colour,
end_colour=ansiprint.end(),
vm_name=domain_information['name'],
vm_uuid=domain_information['uuid'],
vm_state=domain_information['state'],
vm_net_colour=vm_net_colour,
vm_networks=','.join(net_list),
vm_memory=domain_information['memory'],
vm_vcpu=domain_information['vcpu'],
vm_node=domain_information['node'],
vm_migrated=domain_information['migrated']
)
)
click.echo('\n'.join(sorted(vm_list_output)))
return True, ''

View File

@ -20,10 +20,9 @@
# #
############################################################################### ###############################################################################
import kazoo.client import time
import uuid import uuid
import daemon_lib.ansiprint as ansiprint
# Exists function # Exists function
def exists(zk_conn, key): def exists(zk_conn, key):
@ -33,15 +32,18 @@ def exists(zk_conn, key):
else: else:
return False return False
# Child list function # Child list function
def listchildren(zk_conn, key): def listchildren(zk_conn, key):
children = zk_conn.get_children(key) children = zk_conn.get_children(key)
return children return children
# Delete key function # Delete key function
def deletekey(zk_conn, key, recursive=True): def deletekey(zk_conn, key, recursive=True):
zk_conn.delete(key, recursive=recursive) zk_conn.delete(key, recursive=recursive)
# Rename key recursive function # Rename key recursive function
def rename_key_element(zk_conn, zk_transaction, source_key, destination_key): def rename_key_element(zk_conn, zk_transaction, source_key, destination_key):
data_raw = zk_conn.get(source_key) data_raw = zk_conn.get(source_key)
@ -56,6 +58,7 @@ def rename_key_element(zk_conn, zk_transaction, source_key, destination_key):
zk_transaction.delete(source_key) zk_transaction.delete(source_key)
# Rename key function # Rename key function
def renamekey(zk_conn, kv): def renamekey(zk_conn, kv):
# Start up a transaction # Start up a transaction
@ -81,13 +84,14 @@ def renamekey(zk_conn, kv):
except Exception: except Exception:
return False return False
# Data read function # Data read function
def readdata(zk_conn, key): def readdata(zk_conn, key):
data_raw = zk_conn.get(key) data_raw = zk_conn.get(key)
data = data_raw[0].decode('utf8') data = data_raw[0].decode('utf8')
meta = data_raw[1]
return data return data
# Data write function # Data write function
def writedata(zk_conn, kv): def writedata(zk_conn, kv):
# Start up a transaction # Start up a transaction
@ -126,8 +130,10 @@ def writedata(zk_conn, kv):
except Exception: except Exception:
return False return False
# Write lock function # Write lock function
def writelock(zk_conn, key): def writelock(zk_conn, key):
count = 1
while True: while True:
try: try:
lock_id = str(uuid.uuid1()) lock_id = str(uuid.uuid1())
@ -142,8 +148,10 @@ def writelock(zk_conn, key):
continue continue
return lock return lock
# Read lock function # Read lock function
def readlock(zk_conn, key): def readlock(zk_conn, key):
count = 1
while True: while True:
try: try:
lock_id = str(uuid.uuid1()) lock_id = str(uuid.uuid1())
@ -158,6 +166,7 @@ def readlock(zk_conn, key):
continue continue
return lock return lock
# Exclusive lock function # Exclusive lock function
def exclusivelock(zk_conn, key): def exclusivelock(zk_conn, key):
count = 1 count = 1

15
lint Executable file
View File

@ -0,0 +1,15 @@
#!/usr/bin/env bash
if ! which flake8 &>/dev/null; then
echo "Flake8 is required to lint this project"
exit 1
fi
flake8 \
--ignore=E501 \
--exclude=api-daemon/migrations/versions,api-daemon/provisioner/examples
ret=$?
if [[ $ret -eq 0 ]]; then
echo "No linting issues found!"
fi
exit $ret

View File

@ -20,4 +20,4 @@
# #
############################################################################### ###############################################################################
import pvcnoded.Daemon import pvcnoded.Daemon # noqa: F401

View File

@ -24,10 +24,10 @@ import time
import json import json
import psutil import psutil
import pvcnoded.log as log
import pvcnoded.zkhandler as zkhandler import pvcnoded.zkhandler as zkhandler
import pvcnoded.common as common import pvcnoded.common as common
class CephOSDInstance(object): class CephOSDInstance(object):
def __init__(self, zk_conn, this_node, osd_id): def __init__(self, zk_conn, this_node, osd_id):
self.zk_conn = zk_conn self.zk_conn = zk_conn
@ -67,6 +67,7 @@ class CephOSDInstance(object):
if data and data != self.stats: if data and data != self.stats:
self.stats = json.loads(data) self.stats = json.loads(data)
def add_osd(zk_conn, logger, node, device, weight): def add_osd(zk_conn, logger, node, device, weight):
# We are ready to create a new OSD on this node # We are ready to create a new OSD on this node
logger.out('Creating new OSD disk on block device {}'.format(device), state='i') logger.out('Creating new OSD disk on block device {}'.format(device), state='i')
@ -189,13 +190,14 @@ def add_osd(zk_conn, logger, node, device, weight):
logger.out('Failed to create new OSD disk: {}'.format(e), state='e') logger.out('Failed to create new OSD disk: {}'.format(e), state='e')
return False return False
def remove_osd(zk_conn, logger, osd_id, osd_obj): def remove_osd(zk_conn, logger, osd_id, osd_obj):
logger.out('Removing OSD disk {}'.format(osd_id), state='i') logger.out('Removing OSD disk {}'.format(osd_id), state='i')
try: try:
# 1. Verify the OSD is present # 1. Verify the OSD is present
retcode, stdout, stderr = common.run_os_command('ceph osd ls') retcode, stdout, stderr = common.run_os_command('ceph osd ls')
osd_list = stdout.split('\n') osd_list = stdout.split('\n')
if not osd_id in osd_list: if osd_id not in osd_list:
logger.out('Could not find OSD {} in the cluster'.format(osd_id), state='e') logger.out('Could not find OSD {} in the cluster'.format(osd_id), state='e')
return True return True
@ -220,10 +222,10 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
osd_string = osd osd_string = osd
num_pgs = osd_string['num_pgs'] num_pgs = osd_string['num_pgs']
if num_pgs > 0: if num_pgs > 0:
time.sleep(5) time.sleep(5)
else: else:
raise raise
except: except Exception:
break break
# 3. Stop the OSD process and wait for it to be terminated # 3. Stop the OSD process and wait for it to be terminated
@ -248,7 +250,7 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
# 4. Determine the block devices # 4. Determine the block devices
retcode, stdout, stderr = common.run_os_command('readlink /var/lib/ceph/osd/ceph-{}/block'.format(osd_id)) retcode, stdout, stderr = common.run_os_command('readlink /var/lib/ceph/osd/ceph-{}/block'.format(osd_id))
vg_name = stdout.split('/')[-2] # e.g. /dev/ceph-<uuid>/osd-block-<uuid> vg_name = stdout.split('/')[-2] # e.g. /dev/ceph-<uuid>/osd-block-<uuid>
retcode, stdout, stderr = common.run_os_command('vgs --separator , --noheadings -o pv_name {}'.format(vg_name)) retcode, stdout, stderr = common.run_os_command('vgs --separator , --noheadings -o pv_name {}'.format(vg_name))
pv_block = stdout.strip() pv_block = stdout.strip()
@ -282,6 +284,7 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
logger.out('Failed to purge OSD disk with ID {}: {}'.format(osd_id, e), state='e') logger.out('Failed to purge OSD disk with ID {}: {}'.format(osd_id, e), state='e')
return False return False
class CephPoolInstance(object): class CephPoolInstance(object):
def __init__(self, zk_conn, this_node, name): def __init__(self, zk_conn, this_node, name):
self.zk_conn = zk_conn self.zk_conn = zk_conn
@ -320,6 +323,7 @@ class CephPoolInstance(object):
if data and data != self.stats: if data and data != self.stats:
self.stats = json.loads(data) self.stats = json.loads(data)
class CephVolumeInstance(object): class CephVolumeInstance(object):
def __init__(self, zk_conn, this_node, pool, name): def __init__(self, zk_conn, this_node, pool, name):
self.zk_conn = zk_conn self.zk_conn = zk_conn
@ -343,8 +347,9 @@ class CephVolumeInstance(object):
if data and data != self.stats: if data and data != self.stats:
self.stats = json.loads(data) self.stats = json.loads(data)
class CephSnapshotInstance(object): class CephSnapshotInstance(object):
def __init__(self, zk_conn, this_node, name): def __init__(self, zk_conn, this_node, pool, volume, name):
self.zk_conn = zk_conn self.zk_conn = zk_conn
self.this_node = this_node self.this_node = this_node
self.pool = pool self.pool = pool
@ -367,6 +372,7 @@ class CephSnapshotInstance(object):
if data and data != self.stats: if data and data != self.stats:
self.stats = json.loads(data) self.stats = json.loads(data)
# Primary command function # Primary command function
# This command pipe is only used for OSD adds and removes # This command pipe is only used for OSD adds and removes
def run_command(zk_conn, logger, this_node, data, d_osd): def run_command(zk_conn, logger, this_node, data, d_osd):

View File

@ -27,10 +27,9 @@ import psycopg2
from threading import Thread, Event from threading import Thread, Event
import pvcnoded.log as log
import pvcnoded.zkhandler as zkhandler
import pvcnoded.common as common import pvcnoded.common as common
class DNSAggregatorInstance(object): class DNSAggregatorInstance(object):
# Initialization function # Initialization function
def __init__(self, zk_conn, config, logger): def __init__(self, zk_conn, config, logger):
@ -67,6 +66,7 @@ class DNSAggregatorInstance(object):
del self.dns_networks[network] del self.dns_networks[network]
self.dns_axfr_daemon.update_networks(self.dns_networks) self.dns_axfr_daemon.update_networks(self.dns_networks)
class PowerDNSInstance(object): class PowerDNSInstance(object):
# Initialization function # Initialization function
def __init__(self, aggregator): def __init__(self, aggregator):
@ -86,37 +86,30 @@ class PowerDNSInstance(object):
) )
# Define the PowerDNS config # Define the PowerDNS config
dns_configuration = [ dns_configuration = [
# Option # Explanation # Option # Explanation
'--no-config', '--no-config',
'--daemon=no', # Start directly '--daemon=no', # Start directly
'--guardian=yes', # Use a guardian '--guardian=yes', # Use a guardian
'--disable-syslog=yes', # Log only to stdout (which is then captured) '--disable-syslog=yes', # Log only to stdout (which is then captured)
'--disable-axfr=no', # Allow AXFRs '--disable-axfr=no', # Allow AXFRs
'--allow-axfr-ips=0.0.0.0/0', # Allow AXFRs to anywhere '--allow-axfr-ips=0.0.0.0/0', # Allow AXFRs to anywhere
'--local-address={},{}'.format(self.vni_ipaddr, self.upstream_ipaddr), '--local-address={},{}'.format(self.vni_ipaddr, self.upstream_ipaddr), # Listen on floating IPs
# Listen on floating IPs '--local-port=53', # On port 53
'--local-port=53', # On port 53 '--log-dns-details=on', # Log details
'--log-dns-details=on', # Log details '--loglevel=3', # Log info
'--loglevel=3', # Log info '--master=yes', # Enable master mode
'--master=yes', # Enable master mode '--slave=yes', # Enable slave mode
'--slave=yes', # Enable slave mode '--slave-renotify=yes', # Renotify out for our slaved zones
'--slave-renotify=yes', # Renotify out for our slaved zones '--version-string=powerdns', # Set the version string
'--version-string=powerdns', # Set the version string '--default-soa-name=dns.pvc.local', # Override dnsmasq's invalid name
'--default-soa-name=dns.pvc.local', # Override dnsmasq's invalid name '--socket-dir={}'.format(self.config['pdns_dynamic_directory']), # Standard socket directory
'--socket-dir={}'.format(self.config['pdns_dynamic_directory']), '--launch=gpgsql', # Use the PostgreSQL backend
# Standard socket directory '--gpgsql-host={}'.format(self.config['pdns_postgresql_host']), # PostgreSQL instance
'--launch=gpgsql', # Use the PostgreSQL backend '--gpgsql-port={}'.format(self.config['pdns_postgresql_port']), # Default port
'--gpgsql-host={}'.format(self.config['pdns_postgresql_host']), '--gpgsql-dbname={}'.format(self.config['pdns_postgresql_dbname']), # Database name
# PostgreSQL instance '--gpgsql-user={}'.format(self.config['pdns_postgresql_user']), # User name
'--gpgsql-port={}'.format(self.config['pdns_postgresql_port']), '--gpgsql-password={}'.format(self.config['pdns_postgresql_password']), # User password
# Default port '--gpgsql-dnssec=no', # Do DNSSEC elsewhere
'--gpgsql-dbname={}'.format(self.config['pdns_postgresql_dbname']),
# Database name
'--gpgsql-user={}'.format(self.config['pdns_postgresql_user']),
# User name
'--gpgsql-password={}'.format(self.config['pdns_postgresql_password']),
# User password
'--gpgsql-dnssec=no', # Do DNSSEC elsewhere
] ]
# Start the pdns process in a thread # Start the pdns process in a thread
self.dns_server_daemon = common.run_os_daemon( self.dns_server_daemon = common.run_os_daemon(
@ -132,7 +125,6 @@ class PowerDNSInstance(object):
state='o' state='o'
) )
def stop(self): def stop(self):
if self.dns_server_daemon: if self.dns_server_daemon:
self.logger.out( self.logger.out(
@ -148,6 +140,7 @@ class PowerDNSInstance(object):
state='o' state='o'
) )
class DNSNetworkInstance(object): class DNSNetworkInstance(object):
# Initialization function # Initialization function
def __init__(self, aggregator, network): def __init__(self, aggregator, network):
@ -160,10 +153,6 @@ class DNSNetworkInstance(object):
# Add a new network to the aggregator database # Add a new network to the aggregator database
def add_network(self): def add_network(self):
network_domain = self.network.domain network_domain = self.network.domain
if self.network.ip4_gateway != 'None':
network_gateway = self.network.ip4_gateway
else:
network_gateway = self.network.ip6_gateway
self.logger.out( self.logger.out(
'Adding entry for client domain {}'.format( 'Adding entry for client domain {}'.format(
@ -176,11 +165,11 @@ class DNSNetworkInstance(object):
# Connect to the database # Connect to the database
self.sql_conn = psycopg2.connect( self.sql_conn = psycopg2.connect(
"host='{}' port='{}' dbname='{}' user='{}' password='{}' sslmode='disable'".format( "host='{}' port='{}' dbname='{}' user='{}' password='{}' sslmode='disable'".format(
self.config['pdns_postgresql_host'], self.config['pdns_postgresql_host'],
self.config['pdns_postgresql_port'], self.config['pdns_postgresql_port'],
self.config['pdns_postgresql_dbname'], self.config['pdns_postgresql_dbname'],
self.config['pdns_postgresql_user'], self.config['pdns_postgresql_user'],
self.config['pdns_postgresql_password'] self.config['pdns_postgresql_password']
) )
) )
sql_curs = self.sql_conn.cursor() sql_curs = self.sql_conn.cursor()
@ -252,11 +241,11 @@ class DNSNetworkInstance(object):
# Connect to the database # Connect to the database
self.sql_conn = psycopg2.connect( self.sql_conn = psycopg2.connect(
"host='{}' port='{}' dbname='{}' user='{}' password='{}' sslmode='disable'".format( "host='{}' port='{}' dbname='{}' user='{}' password='{}' sslmode='disable'".format(
self.config['pdns_postgresql_host'], self.config['pdns_postgresql_host'],
self.config['pdns_postgresql_port'], self.config['pdns_postgresql_port'],
self.config['pdns_postgresql_dbname'], self.config['pdns_postgresql_dbname'],
self.config['pdns_postgresql_user'], self.config['pdns_postgresql_user'],
self.config['pdns_postgresql_password'] self.config['pdns_postgresql_password']
) )
) )
sql_curs = self.sql_conn.cursor() sql_curs = self.sql_conn.cursor()
@ -308,16 +297,16 @@ class AXFRDaemonInstance(object):
# after the leader transitions # after the leader transitions
self.sql_conn = psycopg2.connect( self.sql_conn = psycopg2.connect(
"host='{}' port='{}' dbname='{}' user='{}' password='{}' sslmode='disable'".format( "host='{}' port='{}' dbname='{}' user='{}' password='{}' sslmode='disable'".format(
self.config['pdns_postgresql_host'], self.config['pdns_postgresql_host'],
self.config['pdns_postgresql_port'], self.config['pdns_postgresql_port'],
self.config['pdns_postgresql_dbname'], self.config['pdns_postgresql_dbname'],
self.config['pdns_postgresql_user'], self.config['pdns_postgresql_user'],
self.config['pdns_postgresql_password'] self.config['pdns_postgresql_password']
) )
) )
# Start the thread # Start the thread
self.thread.start() self.thread.start()
def stop(self): def stop(self):
self.thread_stopper.set() self.thread_stopper.set()
@ -332,12 +321,10 @@ class AXFRDaemonInstance(object):
while not self.thread_stopper.is_set(): while not self.thread_stopper.is_set():
# We do this for each network # We do this for each network
for network, instance in self.dns_networks.items(): for network, instance in self.dns_networks.items():
zone_modified = False
# Set up our SQL cursor # Set up our SQL cursor
try: try:
sql_curs = self.sql_conn.cursor() sql_curs = self.sql_conn.cursor()
except: except Exception:
time.sleep(0.5) time.sleep(0.5)
continue continue
@ -446,7 +433,7 @@ class AXFRDaemonInstance(object):
self.logger.out('Old but not new: {}'.format(in_old_not_in_new), state='d', prefix='dns-aggregator') self.logger.out('Old but not new: {}'.format(in_old_not_in_new), state='d', prefix='dns-aggregator')
# Go through the old list # Go through the old list
remove_records = list() # list of database IDs remove_records = list() # list of database IDs
for i in range(len(records_old)): for i in range(len(records_old)):
record_id = records_old_ids[i] record_id = records_old_ids[i]
record = records_old[i] record = records_old[i]

View File

@ -20,9 +20,6 @@
# #
############################################################################### ###############################################################################
# Version string for startup output
version = '0.9.1'
import kazoo.client import kazoo.client
import libvirt import libvirt
import sys import sys
@ -56,6 +53,9 @@ import pvcnoded.DNSAggregatorInstance as DNSAggregatorInstance
import pvcnoded.CephInstance as CephInstance import pvcnoded.CephInstance as CephInstance
import pvcnoded.MetadataAPIInstance as MetadataAPIInstance import pvcnoded.MetadataAPIInstance as MetadataAPIInstance
# Version string for startup output
version = '0.9.1'
############################################################################### ###############################################################################
# PVCD - node daemon startup program # PVCD - node daemon startup program
############################################################################### ###############################################################################
@ -74,6 +74,7 @@ import pvcnoded.MetadataAPIInstance as MetadataAPIInstance
# Daemon functions # Daemon functions
############################################################################### ###############################################################################
# Create timer to update this node in Zookeeper # Create timer to update this node in Zookeeper
def startKeepaliveTimer(): def startKeepaliveTimer():
# Create our timer object # Create our timer object
@ -85,14 +86,16 @@ def startKeepaliveTimer():
node_keepalive() node_keepalive()
return update_timer return update_timer
def stopKeepaliveTimer(): def stopKeepaliveTimer():
global update_timer global update_timer
try: try:
update_timer.shutdown() update_timer.shutdown()
logger.out('Stopping keepalive timer', state='s') logger.out('Stopping keepalive timer', state='s')
except: except Exception:
pass pass
############################################################################### ###############################################################################
# PHASE 1a - Configuration parsing # PHASE 1a - Configuration parsing
############################################################################### ###############################################################################
@ -100,13 +103,12 @@ def stopKeepaliveTimer():
# Get the config file variable from the environment # Get the config file variable from the environment
try: try:
pvcnoded_config_file = os.environ['PVCD_CONFIG_FILE'] pvcnoded_config_file = os.environ['PVCD_CONFIG_FILE']
except: except Exception:
print('ERROR: The "PVCD_CONFIG_FILE" environment variable must be set before starting pvcnoded.') print('ERROR: The "PVCD_CONFIG_FILE" environment variable must be set before starting pvcnoded.')
exit(1) exit(1)
# Set local hostname and domain variables # Set local hostname and domain variables
myfqdn = gethostname() myfqdn = gethostname()
#myfqdn = 'pvc-hv1.domain.net'
myhostname = myfqdn.split('.', 1)[0] myhostname = myfqdn.split('.', 1)[0]
mydomainname = ''.join(myfqdn.split('.', 1)[1:]) mydomainname = ''.join(myfqdn.split('.', 1)[1:])
try: try:
@ -125,6 +127,7 @@ staticdata.append(subprocess.run(['uname', '-r'], stdout=subprocess.PIPE).stdout
staticdata.append(subprocess.run(['uname', '-o'], stdout=subprocess.PIPE).stdout.decode('ascii').strip()) staticdata.append(subprocess.run(['uname', '-o'], stdout=subprocess.PIPE).stdout.decode('ascii').strip())
staticdata.append(subprocess.run(['uname', '-m'], stdout=subprocess.PIPE).stdout.decode('ascii').strip()) staticdata.append(subprocess.run(['uname', '-m'], stdout=subprocess.PIPE).stdout.decode('ascii').strip())
# Read and parse the config file # Read and parse the config file
def readConfig(pvcnoded_config_file, myhostname): def readConfig(pvcnoded_config_file, myhostname):
print('Loading configuration from file "{}"'.format(pvcnoded_config_file)) print('Loading configuration from file "{}"'.format(pvcnoded_config_file))
@ -176,7 +179,7 @@ def readConfig(pvcnoded_config_file, myhostname):
config_debug = { config_debug = {
'debug': o_config['pvc']['debug'] 'debug': o_config['pvc']['debug']
} }
except: except Exception:
config_debug = { config_debug = {
'debug': False 'debug': False
} }
@ -223,9 +226,7 @@ def readConfig(pvcnoded_config_file, myhostname):
config = {**config, **config_networking} config = {**config, **config_networking}
# Create the by-id address entries # Create the by-id address entries
for net in [ 'vni', for net in ['vni', 'storage', 'upstream']:
'storage',
'upstream' ]:
address_key = '{}_dev_ip'.format(net) address_key = '{}_dev_ip'.format(net)
floating_key = '{}_floating_ip'.format(net) floating_key = '{}_floating_ip'.format(net)
network_key = '{}_network'.format(net) network_key = '{}_network'.format(net)
@ -233,7 +234,7 @@ def readConfig(pvcnoded_config_file, myhostname):
# Verify the network provided is valid # Verify the network provided is valid
try: try:
network = ip_network(config[network_key]) network = ip_network(config[network_key])
except Exception as e: except Exception:
print('ERROR: Network address {} for {} is not valid!'.format(config[network_key], network_key)) print('ERROR: Network address {} for {} is not valid!'.format(config[network_key], network_key))
exit(1) exit(1)
@ -251,9 +252,9 @@ def readConfig(pvcnoded_config_file, myhostname):
# Set the ipaddr # Set the ipaddr
floating_addr = ip_address(config[floating_key].split('/')[0]) floating_addr = ip_address(config[floating_key].split('/')[0])
# Verify we're in the network # Verify we're in the network
if not floating_addr in list(network.hosts()): if floating_addr not in list(network.hosts()):
raise raise
except Exception as e: except Exception:
print('ERROR: Floating address {} for {} is not valid!'.format(config[floating_key], floating_key)) print('ERROR: Floating address {} for {} is not valid!'.format(config[floating_key], floating_key))
exit(1) exit(1)
@ -271,10 +272,11 @@ def readConfig(pvcnoded_config_file, myhostname):
# Handle an empty ipmi_hostname # Handle an empty ipmi_hostname
if config['ipmi_hostname'] == '': if config['ipmi_hostname'] == '':
config['ipmi_hostname'] = myshorthostname + '-lom.' + mydomainname config['ipmi_hostname'] = myhostname + '-lom.' + mydomainname
return config return config
# Get the config object from readConfig() # Get the config object from readConfig()
config = readConfig(pvcnoded_config_file, myhostname) config = readConfig(pvcnoded_config_file, myhostname)
debug = config['debug'] debug = config['debug']
@ -513,6 +515,7 @@ except Exception as e:
logger.out('ERROR: Failed to connect to Zookeeper cluster: {}'.format(e), state='e') logger.out('ERROR: Failed to connect to Zookeeper cluster: {}'.format(e), state='e')
exit(1) exit(1)
# Handle zookeeper failures # Handle zookeeper failures
def zk_listener(state): def zk_listener(state):
global zk_conn, update_timer global zk_conn, update_timer
@ -535,7 +538,7 @@ def zk_listener(state):
_zk_conn = kazoo.client.KazooClient(hosts=config['coordinators']) _zk_conn = kazoo.client.KazooClient(hosts=config['coordinators'])
try: try:
_zk_conn.start() _zk_conn.start()
except: except Exception:
del _zk_conn del _zk_conn
continue continue
@ -545,12 +548,14 @@ def zk_listener(state):
zk_conn.add_listener(zk_listener) zk_conn.add_listener(zk_listener)
break break
zk_conn.add_listener(zk_listener) zk_conn.add_listener(zk_listener)
############################################################################### ###############################################################################
# PHASE 5 - Gracefully handle termination # PHASE 5 - Gracefully handle termination
############################################################################### ###############################################################################
# Cleanup function # Cleanup function
def cleanup(): def cleanup():
global zk_conn, update_timer, d_domain global zk_conn, update_timer, d_domain
@ -558,7 +563,7 @@ def cleanup():
logger.out('Terminating pvcnoded and cleaning up', state='s') logger.out('Terminating pvcnoded and cleaning up', state='s')
# Set shutdown state in Zookeeper # Set shutdown state in Zookeeper
zkhandler.writedata(zk_conn, { '/nodes/{}/daemonstate'.format(myhostname): 'shutdown' }) zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(myhostname): 'shutdown'})
# Waiting for any flushes to complete # Waiting for any flushes to complete
logger.out('Waiting for any active flushes', state='s') logger.out('Waiting for any active flushes', state='s')
@ -571,22 +576,21 @@ def cleanup():
if d_domain[domain].getnode() == myhostname: if d_domain[domain].getnode() == myhostname:
try: try:
d_domain[domain].console_log_instance.stop() d_domain[domain].console_log_instance.stop()
except NameError as e: except NameError:
pass pass
except AttributeError as e: except AttributeError:
pass pass
# Force into secondary coordinator state if needed # Force into secondary coordinator state if needed
try: try:
if this_node.router_state == 'primary': if this_node.router_state == 'primary':
is_primary = True
zkhandler.writedata(zk_conn, { zkhandler.writedata(zk_conn, {
'/primary_node': 'none' '/primary_node': 'none'
}) })
logger.out('Waiting for primary migration', state='s') logger.out('Waiting for primary migration', state='s')
while this_node.router_state != 'secondary': while this_node.router_state != 'secondary':
time.sleep(0.5) time.sleep(0.5)
except: except Exception:
pass pass
# Stop keepalive thread # Stop keepalive thread
@ -601,7 +605,7 @@ def cleanup():
node_keepalive() node_keepalive()
# Set stop state in Zookeeper # Set stop state in Zookeeper
zkhandler.writedata(zk_conn, { '/nodes/{}/daemonstate'.format(myhostname): 'stop' }) zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(myhostname): 'stop'})
# Forcibly terminate dnsmasq because it gets stuck sometimes # Forcibly terminate dnsmasq because it gets stuck sometimes
common.run_os_command('killall dnsmasq') common.run_os_command('killall dnsmasq')
@ -610,21 +614,24 @@ def cleanup():
try: try:
zk_conn.stop() zk_conn.stop()
zk_conn.close() zk_conn.close()
except: except Exception:
pass pass
logger.out('Terminated pvc daemon', state='s') logger.out('Terminated pvc daemon', state='s')
sys.exit(0) sys.exit(0)
# Termination function # Termination function
def term(signum='', frame=''): def term(signum='', frame=''):
cleanup() cleanup()
# Hangup (logrotate) function # Hangup (logrotate) function
def hup(signum='', frame=''): def hup(signum='', frame=''):
if config['file_logging']: if config['file_logging']:
logger.hup() logger.hup()
# Handle signals gracefully # Handle signals gracefully
signal.signal(signal.SIGTERM, term) signal.signal(signal.SIGTERM, term)
signal.signal(signal.SIGINT, term) signal.signal(signal.SIGINT, term)
@ -648,7 +655,7 @@ if zk_conn.exists('/nodes/{}'.format(myhostname)):
'/nodes/{}/daemonstate'.format(myhostname): 'init', '/nodes/{}/daemonstate'.format(myhostname): 'init',
'/nodes/{}/routerstate'.format(myhostname): init_routerstate, '/nodes/{}/routerstate'.format(myhostname): init_routerstate,
'/nodes/{}/staticdata'.format(myhostname): ' '.join(staticdata), '/nodes/{}/staticdata'.format(myhostname): ' '.join(staticdata),
# Keepalives and fencing information (always load and set from config on boot) # Keepalives and fencing information (always load and set from config on boot)
'/nodes/{}/ipmihostname'.format(myhostname): config['ipmi_hostname'], '/nodes/{}/ipmihostname'.format(myhostname): config['ipmi_hostname'],
'/nodes/{}/ipmiusername'.format(myhostname): config['ipmi_username'], '/nodes/{}/ipmiusername'.format(myhostname): config['ipmi_username'],
'/nodes/{}/ipmipassword'.format(myhostname): config['ipmi_password'] '/nodes/{}/ipmipassword'.format(myhostname): config['ipmi_password']
@ -658,7 +665,7 @@ else:
keepalive_time = int(time.time()) keepalive_time = int(time.time())
zkhandler.writedata(zk_conn, { zkhandler.writedata(zk_conn, {
'/nodes/{}'.format(myhostname): config['daemon_mode'], '/nodes/{}'.format(myhostname): config['daemon_mode'],
# Basic state information # Basic state information
'/nodes/{}/daemonmode'.format(myhostname): config['daemon_mode'], '/nodes/{}/daemonmode'.format(myhostname): config['daemon_mode'],
'/nodes/{}/daemonstate'.format(myhostname): 'init', '/nodes/{}/daemonstate'.format(myhostname): 'init',
'/nodes/{}/routerstate'.format(myhostname): 'client', '/nodes/{}/routerstate'.format(myhostname): 'client',
@ -674,7 +681,7 @@ else:
'/nodes/{}/networkscount'.format(myhostname): '0', '/nodes/{}/networkscount'.format(myhostname): '0',
'/nodes/{}/domainscount'.format(myhostname): '0', '/nodes/{}/domainscount'.format(myhostname): '0',
'/nodes/{}/runningdomains'.format(myhostname): '', '/nodes/{}/runningdomains'.format(myhostname): '',
# Keepalives and fencing information # Keepalives and fencing information
'/nodes/{}/keepalive'.format(myhostname): str(keepalive_time), '/nodes/{}/keepalive'.format(myhostname): str(keepalive_time),
'/nodes/{}/ipmihostname'.format(myhostname): config['ipmi_hostname'], '/nodes/{}/ipmihostname'.format(myhostname): config['ipmi_hostname'],
'/nodes/{}/ipmiusername'.format(myhostname): config['ipmi_username'], '/nodes/{}/ipmiusername'.format(myhostname): config['ipmi_username'],
@ -692,7 +699,7 @@ if current_primary and current_primary != 'none':
else: else:
if config['daemon_mode'] == 'coordinator': if config['daemon_mode'] == 'coordinator':
logger.out('No primary node found; creating with us as primary.', state='i') logger.out('No primary node found; creating with us as primary.', state='i')
zkhandler.writedata(zk_conn, { '/primary_node': myhostname }) zkhandler.writedata(zk_conn, {'/primary_node': myhostname})
############################################################################### ###############################################################################
# PHASE 7a - Ensure IPMI is reachable and working # PHASE 7a - Ensure IPMI is reachable and working
@ -726,17 +733,17 @@ if enable_networking:
common.run_os_command( common.run_os_command(
'/bin/mkdir --parents {}/networks'.format( '/bin/mkdir --parents {}/networks'.format(
config['nft_dynamic_directory'] config['nft_dynamic_directory']
) )
) )
common.run_os_command( common.run_os_command(
'/bin/mkdir --parents {}/static'.format( '/bin/mkdir --parents {}/static'.format(
config['nft_dynamic_directory'] config['nft_dynamic_directory']
) )
) )
common.run_os_command( common.run_os_command(
'/bin/mkdir --parents {}'.format( '/bin/mkdir --parents {}'.format(
config['nft_dynamic_directory'] config['nft_dynamic_directory']
) )
) )
# Set up the basic features of the nftables firewall # Set up the basic features of the nftables firewall
@ -744,8 +751,8 @@ if enable_networking:
flush ruleset flush ruleset
# Add the filter table and chains # Add the filter table and chains
add table inet filter add table inet filter
add chain inet filter forward {{ type filter hook forward priority 0; }} add chain inet filter forward {{type filter hook forward priority 0; }}
add chain inet filter input {{ type filter hook input priority 0; }} add chain inet filter input {{type filter hook input priority 0; }}
# Include static rules and network rules # Include static rules and network rules
include "{rulesdir}/static/*" include "{rulesdir}/static/*"
include "{rulesdir}/networks/*" include "{rulesdir}/networks/*"
@ -776,13 +783,13 @@ d_network = dict()
d_domain = dict() d_domain = dict()
d_osd = dict() d_osd = dict()
d_pool = dict() d_pool = dict()
d_volume = dict() # Dict of Dicts d_volume = dict() # Dict of Dicts
node_list = [] node_list = []
network_list = [] network_list = []
domain_list = [] domain_list = []
osd_list = [] osd_list = []
pool_list = [] pool_list = []
volume_list = dict() # Dict of Lists volume_list = dict() # Dict of Lists
if enable_networking: if enable_networking:
# Create an instance of the DNS Aggregator and Metadata API if we're a coordinator # Create an instance of the DNS Aggregator and Metadata API if we're a coordinator
@ -796,6 +803,7 @@ else:
dns_aggregator = None dns_aggregator = None
metadata_api = None metadata_api = None
# Node objects # Node objects
@zk_conn.ChildrenWatch('/nodes') @zk_conn.ChildrenWatch('/nodes')
def update_nodes(new_node_list): def update_nodes(new_node_list):
@ -803,12 +811,12 @@ def update_nodes(new_node_list):
# Add any missing nodes to the list # Add any missing nodes to the list
for node in new_node_list: for node in new_node_list:
if not node in node_list: if node not in node_list:
d_node[node] = NodeInstance.NodeInstance(node, myhostname, zk_conn, config, logger, d_node, d_network, d_domain, dns_aggregator, metadata_api) d_node[node] = NodeInstance.NodeInstance(node, myhostname, zk_conn, config, logger, d_node, d_network, d_domain, dns_aggregator, metadata_api)
# Remove any deleted nodes from the list # Remove any deleted nodes from the list
for node in node_list: for node in node_list:
if not node in new_node_list: if node not in new_node_list:
# Delete the object # Delete the object
del(d_node[node]) del(d_node[node])
@ -820,18 +828,21 @@ def update_nodes(new_node_list):
for node in d_node: for node in d_node:
d_node[node].update_node_list(d_node) d_node[node].update_node_list(d_node)
# Alias for our local node (passed to network and domain objects) # Alias for our local node (passed to network and domain objects)
this_node = d_node[myhostname] this_node = d_node[myhostname]
# Maintenance mode # Maintenance mode
@zk_conn.DataWatch('/maintenance') @zk_conn.DataWatch('/maintenance')
def set_maintenance(_maintenance, stat, event=''): def set_maintenance(_maintenance, stat, event=''):
global maintenance global maintenance
try: try:
maintenance = bool(strtobool(_maintenance.decode('ascii'))) maintenance = bool(strtobool(_maintenance.decode('ascii')))
except: except Exception:
maintenance = False maintenance = False
# Primary node # Primary node
@zk_conn.DataWatch('/primary_node') @zk_conn.DataWatch('/primary_node')
def update_primary(new_primary, stat, event=''): def update_primary(new_primary, stat, event=''):
@ -877,6 +888,7 @@ def update_primary(new_primary, stat, event=''):
for node in d_node: for node in d_node:
d_node[node].primary_node = new_primary d_node[node].primary_node = new_primary
if enable_networking: if enable_networking:
# Network objects # Network objects
@zk_conn.ChildrenWatch('/networks') @zk_conn.ChildrenWatch('/networks')
@ -885,13 +897,13 @@ if enable_networking:
# Add any missing networks to the list # Add any missing networks to the list
for network in new_network_list: for network in new_network_list:
if not network in network_list: if network not in network_list:
d_network[network] = VXNetworkInstance.VXNetworkInstance(network, zk_conn, config, logger, this_node, dns_aggregator) d_network[network] = VXNetworkInstance.VXNetworkInstance(network, zk_conn, config, logger, this_node, dns_aggregator)
if config['daemon_mode'] == 'coordinator' and d_network[network].nettype == 'managed': if config['daemon_mode'] == 'coordinator' and d_network[network].nettype == 'managed':
try: try:
dns_aggregator.add_network(d_network[network]) dns_aggregator.add_network(d_network[network])
except Exception as e: except Exception as e:
logger.out('Failed to create DNS Aggregator for network {}'.format(network), 'w') logger.out('Failed to create DNS Aggregator for network {}: {}'.format(network, e), 'w')
# Start primary functionality # Start primary functionality
if this_node.router_state == 'primary' and d_network[network].nettype == 'managed': if this_node.router_state == 'primary' and d_network[network].nettype == 'managed':
d_network[network].createGateways() d_network[network].createGateways()
@ -899,7 +911,7 @@ if enable_networking:
# Remove any deleted networks from the list # Remove any deleted networks from the list
for network in network_list: for network in network_list:
if not network in new_network_list: if network not in new_network_list:
if d_network[network].nettype == 'managed': if d_network[network].nettype == 'managed':
# Stop primary functionality # Stop primary functionality
if this_node.router_state == 'primary': if this_node.router_state == 'primary':
@ -923,7 +935,7 @@ if enable_networking:
if enable_hypervisor: if enable_hypervisor:
# VM command pipeline key # VM command pipeline key
@zk_conn.DataWatch('/cmd/domains') @zk_conn.DataWatch('/cmd/domains')
def cmd(data, stat, event=''): def cmd_domains(data, stat, event=''):
if data: if data:
VMInstance.run_command(zk_conn, logger, this_node, data.decode('ascii')) VMInstance.run_command(zk_conn, logger, this_node, data.decode('ascii'))
@ -934,12 +946,12 @@ if enable_hypervisor:
# Add any missing domains to the list # Add any missing domains to the list
for domain in new_domain_list: for domain in new_domain_list:
if not domain in domain_list: if domain not in domain_list:
d_domain[domain] = VMInstance.VMInstance(domain, zk_conn, config, logger, this_node) d_domain[domain] = VMInstance.VMInstance(domain, zk_conn, config, logger, this_node)
# Remove any deleted domains from the list # Remove any deleted domains from the list
for domain in domain_list: for domain in domain_list:
if not domain in new_domain_list: if domain not in new_domain_list:
# Delete the object # Delete the object
del(d_domain[domain]) del(d_domain[domain])
@ -954,7 +966,7 @@ if enable_hypervisor:
if enable_storage: if enable_storage:
# Ceph command pipeline key # Ceph command pipeline key
@zk_conn.DataWatch('/cmd/ceph') @zk_conn.DataWatch('/cmd/ceph')
def cmd(data, stat, event=''): def cmd_ceph(data, stat, event=''):
if data: if data:
CephInstance.run_command(zk_conn, logger, this_node, data.decode('ascii'), d_osd) CephInstance.run_command(zk_conn, logger, this_node, data.decode('ascii'), d_osd)
@ -965,12 +977,12 @@ if enable_storage:
# Add any missing OSDs to the list # Add any missing OSDs to the list
for osd in new_osd_list: for osd in new_osd_list:
if not osd in osd_list: if osd not in osd_list:
d_osd[osd] = CephInstance.CephOSDInstance(zk_conn, this_node, osd) d_osd[osd] = CephInstance.CephOSDInstance(zk_conn, this_node, osd)
# Remove any deleted OSDs from the list # Remove any deleted OSDs from the list
for osd in osd_list: for osd in osd_list:
if not osd in new_osd_list: if osd not in new_osd_list:
# Delete the object # Delete the object
del(d_osd[osd]) del(d_osd[osd])
@ -985,14 +997,14 @@ if enable_storage:
# Add any missing Pools to the list # Add any missing Pools to the list
for pool in new_pool_list: for pool in new_pool_list:
if not pool in pool_list: if pool not in pool_list:
d_pool[pool] = CephInstance.CephPoolInstance(zk_conn, this_node, pool) d_pool[pool] = CephInstance.CephPoolInstance(zk_conn, this_node, pool)
d_volume[pool] = dict() d_volume[pool] = dict()
volume_list[pool] = [] volume_list[pool] = []
# Remove any deleted Pools from the list # Remove any deleted Pools from the list
for pool in pool_list: for pool in pool_list:
if not pool in new_pool_list: if pool not in new_pool_list:
# Delete the object # Delete the object
del(d_pool[pool]) del(d_pool[pool])
@ -1008,12 +1020,12 @@ if enable_storage:
# Add any missing Volumes to the list # Add any missing Volumes to the list
for volume in new_volume_list: for volume in new_volume_list:
if not volume in volume_list[pool]: if volume not in volume_list[pool]:
d_volume[pool][volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume) d_volume[pool][volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume)
# Remove any deleted Volumes from the list # Remove any deleted Volumes from the list
for volume in volume_list[pool]: for volume in volume_list[pool]:
if not volume in new_volume_list: if volume not in new_volume_list:
# Delete the object # Delete the object
del(d_volume[pool][volume]) del(d_volume[pool][volume])
@ -1021,6 +1033,7 @@ if enable_storage:
volume_list[pool] = new_volume_list volume_list[pool] = new_volume_list
logger.out('{}Volume list [{pool}]:{} {plist}'.format(fmt_blue, fmt_end, pool=pool, plist=' '.join(volume_list[pool])), state='i') logger.out('{}Volume list [{pool}]:{} {plist}'.format(fmt_blue, fmt_end, pool=pool, plist=' '.join(volume_list[pool])), state='i')
############################################################################### ###############################################################################
# PHASE 9 - Run the daemon # PHASE 9 - Run the daemon
############################################################################### ###############################################################################
@ -1044,7 +1057,7 @@ def collect_ceph_stats(queue):
logger.out("Getting health stats from monitor", state='d', prefix='ceph-thread') logger.out("Getting health stats from monitor", state='d', prefix='ceph-thread')
# Get Ceph cluster health for local status output # Get Ceph cluster health for local status output
command = { "prefix": "health", "format": "json" } command = {"prefix": "health", "format": "json"}
try: try:
health_status = json.loads(ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1]) health_status = json.loads(ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1])
ceph_health = health_status['status'] ceph_health = health_status['status']
@ -1064,7 +1077,7 @@ def collect_ceph_stats(queue):
if debug: if debug:
logger.out("Set ceph health information in zookeeper (primary only)", state='d', prefix='ceph-thread') logger.out("Set ceph health information in zookeeper (primary only)", state='d', prefix='ceph-thread')
command = { "prefix": "status", "format": "pretty" } command = {"prefix": "status", "format": "pretty"}
ceph_status = ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1].decode('ascii') ceph_status = ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1].decode('ascii')
try: try:
zkhandler.writedata(zk_conn, { zkhandler.writedata(zk_conn, {
@ -1078,7 +1091,7 @@ def collect_ceph_stats(queue):
logger.out("Set ceph rados df information in zookeeper (primary only)", state='d', prefix='ceph-thread') logger.out("Set ceph rados df information in zookeeper (primary only)", state='d', prefix='ceph-thread')
# Get rados df info # Get rados df info
command = { "prefix": "df", "format": "pretty" } command = {"prefix": "df", "format": "pretty"}
ceph_df = ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1].decode('ascii') ceph_df = ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1].decode('ascii')
try: try:
zkhandler.writedata(zk_conn, { zkhandler.writedata(zk_conn, {
@ -1092,7 +1105,7 @@ def collect_ceph_stats(queue):
logger.out("Set pool information in zookeeper (primary only)", state='d', prefix='ceph-thread') logger.out("Set pool information in zookeeper (primary only)", state='d', prefix='ceph-thread')
# Get pool info # Get pool info
command = { "prefix": "df", "format": "json" } command = {"prefix": "df", "format": "json"}
try: try:
ceph_pool_df_raw = json.loads(ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1])['pools'] ceph_pool_df_raw = json.loads(ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1])['pools']
except Exception as e: except Exception as e:
@ -1143,14 +1156,14 @@ def collect_ceph_stats(queue):
'write_ops': pool['write_ops'], 'write_ops': pool['write_ops'],
'write_bytes': pool['write_bytes'] 'write_bytes': pool['write_bytes']
} }
# Write the pool data to Zookeeper # Write the pool data to Zookeeper
zkhandler.writedata(zk_conn, { zkhandler.writedata(zk_conn, {
'/ceph/pools/{}/stats'.format(pool['name']): str(json.dumps(pool_df)) '/ceph/pools/{}/stats'.format(pool['name']): str(json.dumps(pool_df))
}) })
except Exception as e: except Exception as e:
# One or more of the status commands timed out, just continue # One or more of the status commands timed out, just continue
logger.out('Failed to format and send pool data', state='w') logger.out('Failed to format and send pool data: {}'.format(e), state='w')
pass pass
# Only grab OSD stats if there are OSDs to grab (otherwise `ceph osd df` hangs) # Only grab OSD stats if there are OSDs to grab (otherwise `ceph osd df` hangs)
@ -1163,7 +1176,7 @@ def collect_ceph_stats(queue):
# Parse the dump data # Parse the dump data
osd_dump = dict() osd_dump = dict()
command = { "prefix": "osd dump", "format": "json" } command = {"prefix": "osd dump", "format": "json"}
try: try:
retcode, stdout, stderr = common.run_os_command('ceph osd dump --format json --connect-timeout 2', timeout=2) retcode, stdout, stderr = common.run_os_command('ceph osd dump --format json --connect-timeout 2', timeout=2)
osd_dump_raw = json.loads(stdout)['osds'] osd_dump_raw = json.loads(stdout)['osds']
@ -1189,7 +1202,7 @@ def collect_ceph_stats(queue):
osd_df = dict() osd_df = dict()
command = { "prefix": "osd df", "format": "json" } command = {"prefix": "osd df", "format": "json"}
try: try:
osd_df_raw = json.loads(ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1])['nodes'] osd_df_raw = json.loads(ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1])['nodes']
except Exception as e: except Exception as e:
@ -1216,7 +1229,7 @@ def collect_ceph_stats(queue):
osd_status = dict() osd_status = dict()
command = { "prefix": "osd status", "format": "pretty" } command = {"prefix": "osd status", "format": "pretty"}
try: try:
osd_status_raw = ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1].decode('ascii') osd_status_raw = ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1].decode('ascii')
except Exception as e: except Exception as e:
@ -1296,6 +1309,7 @@ def collect_ceph_stats(queue):
if debug: if debug:
logger.out("Thread finished", state='d', prefix='ceph-thread') logger.out("Thread finished", state='d', prefix='ceph-thread')
# State table for pretty stats # State table for pretty stats
libvirt_vm_states = { libvirt_vm_states = {
0: "NOSTATE", 0: "NOSTATE",
@ -1308,6 +1322,7 @@ libvirt_vm_states = {
7: "PMSUSPENDED" 7: "PMSUSPENDED"
} }
# VM stats update function # VM stats update function
def collect_vm_stats(queue): def collect_vm_stats(queue):
if debug: if debug:
@ -1318,7 +1333,7 @@ def collect_vm_stats(queue):
if debug: if debug:
logger.out("Connecting to libvirt", state='d', prefix='vm-thread') logger.out("Connecting to libvirt", state='d', prefix='vm-thread')
lv_conn = libvirt.open(libvirt_name) lv_conn = libvirt.open(libvirt_name)
if lv_conn == None: if lv_conn is None:
logger.out('Failed to open connection to "{}"'.format(libvirt_name), state='e') logger.out('Failed to open connection to "{}"'.format(libvirt_name), state='e')
return return
@ -1337,13 +1352,13 @@ def collect_vm_stats(queue):
memprov += instance.getmemory() memprov += instance.getmemory()
vcpualloc += instance.getvcpus() vcpualloc += instance.getvcpus()
if instance.getstate() == 'start' and instance.getnode() == this_node.name: if instance.getstate() == 'start' and instance.getnode() == this_node.name:
if instance.getdom() != None: if instance.getdom() is not None:
try: try:
if instance.getdom().state()[0] != libvirt.VIR_DOMAIN_RUNNING: if instance.getdom().state()[0] != libvirt.VIR_DOMAIN_RUNNING:
raise raise
except Exception as e: except Exception:
# Toggle a state "change" # Toggle a state "change"
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(domain): instance.getstate() }) zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(domain): instance.getstate()})
elif instance.getnode() == this_node.name: elif instance.getnode() == this_node.name:
memprov += instance.getmemory() memprov += instance.getmemory()
@ -1371,7 +1386,7 @@ def collect_vm_stats(queue):
if debug: if debug:
try: try:
logger.out("Failed getting VM information for {}: {}".format(domain.name(), e), state='d', prefix='vm-thread') logger.out("Failed getting VM information for {}: {}".format(domain.name(), e), state='d', prefix='vm-thread')
except: except Exception:
pass pass
continue continue
@ -1451,6 +1466,7 @@ def collect_vm_stats(queue):
if debug: if debug:
logger.out("Thread finished", state='d', prefix='vm-thread') logger.out("Thread finished", state='d', prefix='vm-thread')
# Keepalive update function # Keepalive update function
def node_keepalive(): def node_keepalive():
if debug: if debug:
@ -1462,7 +1478,7 @@ def node_keepalive():
try: try:
if zkhandler.readdata(zk_conn, '/upstream_ip') != config['upstream_floating_ip']: if zkhandler.readdata(zk_conn, '/upstream_ip') != config['upstream_floating_ip']:
raise raise
except: except Exception:
zkhandler.writedata(zk_conn, {'/upstream_ip': config['upstream_floating_ip']}) zkhandler.writedata(zk_conn, {'/upstream_ip': config['upstream_floating_ip']})
# Get past state and update if needed # Get past state and update if needed
@ -1471,7 +1487,7 @@ def node_keepalive():
past_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(this_node.name)) past_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(this_node.name))
if past_state != 'run': if past_state != 'run':
this_node.daemon_state = 'run' this_node.daemon_state = 'run'
zkhandler.writedata(zk_conn, { '/nodes/{}/daemonstate'.format(this_node.name): 'run' }) zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(this_node.name): 'run'})
else: else:
this_node.daemon_state = 'run' this_node.daemon_state = 'run'
@ -1487,13 +1503,13 @@ def node_keepalive():
vm_thread_queue = Queue() vm_thread_queue = Queue()
vm_stats_thread = Thread(target=collect_vm_stats, args=(vm_thread_queue,), kwargs={}) vm_stats_thread = Thread(target=collect_vm_stats, args=(vm_thread_queue,), kwargs={})
vm_stats_thread.start() vm_stats_thread.start()
# Run Ceph status collection in separate thread for parallelization # Run Ceph status collection in separate thread for parallelization
if enable_storage: if enable_storage:
ceph_thread_queue = Queue() ceph_thread_queue = Queue()
ceph_stats_thread = Thread(target=collect_ceph_stats, args=(ceph_thread_queue,), kwargs={}) ceph_stats_thread = Thread(target=collect_ceph_stats, args=(ceph_thread_queue,), kwargs={})
ceph_stats_thread.start() ceph_stats_thread.start()
# Get node performance statistics # Get node performance statistics
this_node.memtotal = int(psutil.virtual_memory().total / 1024 / 1024) this_node.memtotal = int(psutil.virtual_memory().total / 1024 / 1024)
this_node.memused = int(psutil.virtual_memory().used / 1024 / 1024) this_node.memused = int(psutil.virtual_memory().used / 1024 / 1024)
@ -1517,7 +1533,7 @@ def node_keepalive():
this_node.memalloc = vm_thread_queue.get() this_node.memalloc = vm_thread_queue.get()
this_node.memprov = vm_thread_queue.get() this_node.memprov = vm_thread_queue.get()
this_node.vcpualloc = vm_thread_queue.get() this_node.vcpualloc = vm_thread_queue.get()
except: except Exception:
pass pass
else: else:
this_node.domains_count = 0 this_node.domains_count = 0
@ -1530,7 +1546,7 @@ def node_keepalive():
ceph_health_colour = ceph_thread_queue.get() ceph_health_colour = ceph_thread_queue.get()
ceph_health = ceph_thread_queue.get() ceph_health = ceph_thread_queue.get()
osds_this_node = ceph_thread_queue.get() osds_this_node = ceph_thread_queue.get()
except: except Exception:
ceph_health_colour = fmt_cyan ceph_health_colour = fmt_cyan
ceph_health = 'UNKNOWN' ceph_health = 'UNKNOWN'
osds_this_node = '?' osds_this_node = '?'
@ -1552,7 +1568,7 @@ def node_keepalive():
'/nodes/{}/runningdomains'.format(this_node.name): ' '.join(this_node.domain_list), '/nodes/{}/runningdomains'.format(this_node.name): ' '.join(this_node.domain_list),
'/nodes/{}/keepalive'.format(this_node.name): str(keepalive_time) '/nodes/{}/keepalive'.format(this_node.name): str(keepalive_time)
}) })
except: except Exception:
logger.out('Failed to set keepalive data', state='e') logger.out('Failed to set keepalive data', state='e')
return return
@ -1621,17 +1637,15 @@ def node_keepalive():
for node_name in d_node: for node_name in d_node:
try: try:
node_daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node_name)) node_daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node_name))
node_domain_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node_name))
node_keepalive = int(zkhandler.readdata(zk_conn, '/nodes/{}/keepalive'.format(node_name))) node_keepalive = int(zkhandler.readdata(zk_conn, '/nodes/{}/keepalive'.format(node_name)))
except: except Exception:
node_daemon_state = 'unknown' node_daemon_state = 'unknown'
node_domain_state = 'unknown'
node_keepalive = 0 node_keepalive = 0
# Handle deadtime and fencng if needed # Handle deadtime and fencng if needed
# (A node is considered dead when its keepalive timer is >6*keepalive_interval seconds # (A node is considered dead when its keepalive timer is >6*keepalive_interval seconds
# out-of-date while in 'start' state) # out-of-date while in 'start' state)
node_deadtime = int(time.time()) - ( int(config['keepalive_interval']) * int(config['fence_intervals']) ) node_deadtime = int(time.time()) - (int(config['keepalive_interval']) * int(config['fence_intervals']))
if node_keepalive < node_deadtime and node_daemon_state == 'run': if node_keepalive < node_deadtime and node_daemon_state == 'run':
logger.out('Node {} seems dead - starting monitor for fencing'.format(node_name), state='w') logger.out('Node {} seems dead - starting monitor for fencing'.format(node_name), state='w')
zk_lock = zkhandler.writelock(zk_conn, '/nodes/{}/daemonstate'.format(node_name)) zk_lock = zkhandler.writelock(zk_conn, '/nodes/{}/daemonstate'.format(node_name))
@ -1642,11 +1656,12 @@ def node_keepalive():
fence_thread = Thread(target=fencing.fenceNode, args=(node_name, zk_conn, config, logger), kwargs={}) fence_thread = Thread(target=fencing.fenceNode, args=(node_name, zk_conn, config, logger), kwargs={})
fence_thread.start() fence_thread.start()
# Write the updated data after we start the fence thread # Write the updated data after we start the fence thread
zkhandler.writedata(zk_conn, { '/nodes/{}/daemonstate'.format(node_name): 'dead' }) zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(node_name): 'dead'})
if debug: if debug:
logger.out("Keepalive finished", state='d', prefix='main-thread') logger.out("Keepalive finished", state='d', prefix='main-thread')
# Start keepalive thread # Start keepalive thread
update_timer = startKeepaliveTimer() update_timer = startKeepaliveTimer()
@ -1654,5 +1669,5 @@ update_timer = startKeepaliveTimer()
while True: while True:
try: try:
time.sleep(1) time.sleep(1)
except: except Exception:
break break

View File

@ -32,6 +32,7 @@ from psycopg2.extras import RealDictCursor
import daemon_lib.vm as pvc_vm import daemon_lib.vm as pvc_vm
import daemon_lib.network as pvc_network import daemon_lib.network as pvc_network
class MetadataAPIInstance(object): class MetadataAPIInstance(object):
mdapi = flask.Flask(__name__) mdapi = flask.Flask(__name__)
@ -49,33 +50,33 @@ class MetadataAPIInstance(object):
@self.mdapi.route('/', methods=['GET']) @self.mdapi.route('/', methods=['GET'])
def api_root(): def api_root():
return flask.jsonify({"message": "PVC Provisioner Metadata API version 1"}), 209 return flask.jsonify({"message": "PVC Provisioner Metadata API version 1"}), 209
@self.mdapi.route('/<version>/meta-data/', methods=['GET']) @self.mdapi.route('/<version>/meta-data/', methods=['GET'])
def api_metadata_root(version): def api_metadata_root(version):
metadata = """instance-id\nname\nprofile""" metadata = """instance-id\nname\nprofile"""
return metadata, 200 return metadata, 200
@self.mdapi.route('/<version>/meta-data/instance-id', methods=['GET']) @self.mdapi.route('/<version>/meta-data/instance-id', methods=['GET'])
def api_metadata_instanceid(version): def api_metadata_instanceid(version):
source_address = flask.request.__dict__['environ']['REMOTE_ADDR'] source_address = flask.request.__dict__['environ']['REMOTE_ADDR']
vm_details = self.get_vm_details(source_address) vm_details = self.get_vm_details(source_address)
instance_id = vm_details.get('uuid', None) instance_id = vm_details.get('uuid', None)
return instance_id, 200 return instance_id, 200
@self.mdapi.route('/<version>/meta-data/name', methods=['GET']) @self.mdapi.route('/<version>/meta-data/name', methods=['GET'])
def api_metadata_hostname(version): def api_metadata_hostname(version):
source_address = flask.request.__dict__['environ']['REMOTE_ADDR'] source_address = flask.request.__dict__['environ']['REMOTE_ADDR']
vm_details = self.get_vm_details(source_address) vm_details = self.get_vm_details(source_address)
vm_name = vm_details.get('name', None) vm_name = vm_details.get('name', None)
return vm_name, 200 return vm_name, 200
@self.mdapi.route('/<version>/meta-data/profile', methods=['GET']) @self.mdapi.route('/<version>/meta-data/profile', methods=['GET'])
def api_metadata_profile(version): def api_metadata_profile(version):
source_address = flask.request.__dict__['environ']['REMOTE_ADDR'] source_address = flask.request.__dict__['environ']['REMOTE_ADDR']
vm_details = self.get_vm_details(source_address) vm_details = self.get_vm_details(source_address)
vm_profile = vm_details.get('profile', None) vm_profile = vm_details.get('profile', None)
return vm_profile, 200 return vm_profile, 200
@self.mdapi.route('/<version>/user-data', methods=['GET']) @self.mdapi.route('/<version>/user-data', methods=['GET'])
def api_userdata(version): def api_userdata(version):
source_address = flask.request.__dict__['environ']['REMOTE_ADDR'] source_address = flask.request.__dict__['environ']['REMOTE_ADDR']
@ -88,7 +89,7 @@ class MetadataAPIInstance(object):
else: else:
userdata = None userdata = None
return flask.Response(userdata) return flask.Response(userdata)
def launch_wsgi(self): def launch_wsgi(self):
try: try:
self.md_http_server = gevent.pywsgi.WSGIServer( self.md_http_server = gevent.pywsgi.WSGIServer(
@ -142,7 +143,7 @@ class MetadataAPIInstance(object):
# Obtain a list of templates # Obtain a list of templates
def get_profile_userdata(self, vm_profile): def get_profile_userdata(self, vm_profile):
query = """SELECT userdata.userdata FROM profile query = """SELECT userdata.userdata FROM profile
JOIN userdata ON profile.userdata = userdata.id JOIN userdata ON profile.userdata = userdata.id
WHERE profile.name = %s; WHERE profile.name = %s;
""" """
@ -159,7 +160,7 @@ class MetadataAPIInstance(object):
def get_vm_details(self, source_address): def get_vm_details(self, source_address):
# Start connection to Zookeeper # Start connection to Zookeeper
_discard, networks = pvc_network.get_list(self.zk_conn, None) _discard, networks = pvc_network.get_list(self.zk_conn, None)
# Figure out which server this is via the DHCP address # Figure out which server this is via the DHCP address
host_information = dict() host_information = dict()
networks_managed = (x for x in networks if x.get('type') == 'managed') networks_managed = (x for x in networks if x.get('type') == 'managed')
@ -170,26 +171,21 @@ class MetadataAPIInstance(object):
try: try:
if information.get('ip4_address', None) == source_address: if information.get('ip4_address', None) == source_address:
host_information = information host_information = information
except: except Exception:
pass pass
# Get our real information on the host; now we can start querying about it # Get our real information on the host; now we can start querying about it
client_hostname = host_information.get('hostname', None)
client_macaddr = host_information.get('mac_address', None) client_macaddr = host_information.get('mac_address', None)
client_ipaddr = host_information.get('ip4_address', None)
# Find the VM with that MAC address - we can't assume that the hostname is actually right # Find the VM with that MAC address - we can't assume that the hostname is actually right
_discard, vm_list = pvc_vm.get_list(self.zk_conn, None, None, None) _discard, vm_list = pvc_vm.get_list(self.zk_conn, None, None, None)
vm_name = None
vm_details = dict() vm_details = dict()
for vm in vm_list: for vm in vm_list:
try: try:
for network in vm.get('networks'): for network in vm.get('networks'):
if network.get('mac', None) == client_macaddr: if network.get('mac', None) == client_macaddr:
vm_name = vm.get('name')
vm_details = vm vm_details = vm
except: except Exception:
pass pass
return vm_details return vm_details

View File

@ -24,10 +24,10 @@ import time
from threading import Thread from threading import Thread
import pvcnoded.log as log
import pvcnoded.zkhandler as zkhandler import pvcnoded.zkhandler as zkhandler
import pvcnoded.common as common import pvcnoded.common as common
class NodeInstance(object): class NodeInstance(object):
# Initialization function # Initialization function
def __init__(self, name, this_node, zk_conn, config, logger, d_node, d_network, d_domain, dns_aggregator, metadata_api): def __init__(self, name, this_node, zk_conn, config, logger, d_node, d_network, d_domain, dns_aggregator, metadata_api):
@ -324,12 +324,12 @@ class NodeInstance(object):
self.logger.out('Acquiring write lock for synchronization phase A', state='i') self.logger.out('Acquiring write lock for synchronization phase A', state='i')
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase A', state='o') self.logger.out('Acquired write lock for synchronization phase A', state='o')
time.sleep(1) # Time for reader to acquire the lock time.sleep(1) # Time fir reader to acquire the lock
self.logger.out('Releasing write lock for synchronization phase A', state='i') self.logger.out('Releasing write lock for synchronization phase A', state='i')
zkhandler.writedata(self.zk_conn, {'/locks/primary_node': ''}) zkhandler.writedata(self.zk_conn, {'/locks/primary_node': ''})
lock.release() lock.release()
self.logger.out('Released write lock for synchronization phase A', state='o') self.logger.out('Released write lock for synchronization phase A', state='o')
time.sleep(0.1) # Time for new writer to acquire the lock time.sleep(0.1) # Time fir new writer to acquire the lock
# Synchronize nodes B (I am reader) # Synchronize nodes B (I am reader)
lock = zkhandler.readlock(self.zk_conn, '/locks/primary_node') lock = zkhandler.readlock(self.zk_conn, '/locks/primary_node')
@ -345,7 +345,7 @@ class NodeInstance(object):
self.logger.out('Acquiring write lock for synchronization phase C', state='i') self.logger.out('Acquiring write lock for synchronization phase C', state='i')
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase C', state='o') self.logger.out('Acquired write lock for synchronization phase C', state='o')
time.sleep(0.5) # Time for reader to acquire the lock time.sleep(0.5) # Time fir reader to acquire the lock
# 1. Add Upstream floating IP # 1. Add Upstream floating IP
self.logger.out( self.logger.out(
'Creating floating upstream IP {}/{} on interface {}'.format( 'Creating floating upstream IP {}/{} on interface {}'.format(
@ -366,7 +366,7 @@ class NodeInstance(object):
self.logger.out('Acquiring write lock for synchronization phase D', state='i') self.logger.out('Acquiring write lock for synchronization phase D', state='i')
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase D', state='o') self.logger.out('Acquired write lock for synchronization phase D', state='o')
time.sleep(0.2) # Time for reader to acquire the lock time.sleep(0.2) # Time fir reader to acquire the lock
# 2. Add Cluster floating IP # 2. Add Cluster floating IP
self.logger.out( self.logger.out(
'Creating floating management IP {}/{} on interface {}'.format( 'Creating floating management IP {}/{} on interface {}'.format(
@ -387,7 +387,7 @@ class NodeInstance(object):
self.logger.out('Acquiring write lock for synchronization phase E', state='i') self.logger.out('Acquiring write lock for synchronization phase E', state='i')
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase E', state='o') self.logger.out('Acquired write lock for synchronization phase E', state='o')
time.sleep(0.2) # Time for reader to acquire the lock time.sleep(0.2) # Time fir reader to acquire the lock
# 3. Add Metadata link-local IP # 3. Add Metadata link-local IP
self.logger.out( self.logger.out(
'Creating Metadata link-local IP {}/{} on interface {}'.format( 'Creating Metadata link-local IP {}/{} on interface {}'.format(
@ -408,7 +408,7 @@ class NodeInstance(object):
self.logger.out('Acquiring write lock for synchronization phase F', state='i') self.logger.out('Acquiring write lock for synchronization phase F', state='i')
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase F', state='o') self.logger.out('Acquired write lock for synchronization phase F', state='o')
time.sleep(0.2) # Time for reader to acquire the lock time.sleep(0.2) # Time fir reader to acquire the lock
# 4. Add gateway IPs # 4. Add gateway IPs
for network in self.d_network: for network in self.d_network:
self.d_network[network].createGateways() self.d_network[network].createGateways()
@ -422,7 +422,7 @@ class NodeInstance(object):
self.logger.out('Acquiring write lock for synchronization phase G', state='i') self.logger.out('Acquiring write lock for synchronization phase G', state='i')
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase G', state='o') self.logger.out('Acquired write lock for synchronization phase G', state='o')
time.sleep(0.2) # Time for reader to acquire the lock time.sleep(0.2) # Time fir reader to acquire the lock
# 5. Transition Patroni primary # 5. Transition Patroni primary
self.logger.out('Setting Patroni leader to this node', state='i') self.logger.out('Setting Patroni leader to this node', state='i')
tick = 1 tick = 1
@ -499,7 +499,7 @@ class NodeInstance(object):
""" """
Relinquish primary coordinator status to a peer node Relinquish primary coordinator status to a peer node
""" """
time.sleep(0.2) # Initial delay for the first writer to grab the lock time.sleep(0.2) # Initial delay for the first writer to grab the lock
# Synchronize nodes A (I am reader) # Synchronize nodes A (I am reader)
lock = zkhandler.readlock(self.zk_conn, '/locks/primary_node') lock = zkhandler.readlock(self.zk_conn, '/locks/primary_node')
@ -515,7 +515,7 @@ class NodeInstance(object):
self.logger.out('Acquiring write lock for synchronization phase B', state='i') self.logger.out('Acquiring write lock for synchronization phase B', state='i')
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase B', state='o') self.logger.out('Acquired write lock for synchronization phase B', state='o')
time.sleep(0.2) # Time for reader to acquire the lock time.sleep(0.2) # Time fir reader to acquire the lock
# 1. Stop DNS aggregator # 1. Stop DNS aggregator
self.dns_aggregator.stop_aggregator() self.dns_aggregator.stop_aggregator()
# 2. Stop DHCP servers # 2. Stop DHCP servers
@ -531,7 +531,7 @@ class NodeInstance(object):
common.run_os_command("systemctl stop pvcapid.service") common.run_os_command("systemctl stop pvcapid.service")
# 4. Stop metadata API # 4. Stop metadata API
self.metadata_api.stop() self.metadata_api.stop()
time.sleep(0.1) # Time for new writer to acquire the lock time.sleep(0.1) # Time fir new writer to acquire the lock
# Synchronize nodes C (I am reader) # Synchronize nodes C (I am reader)
lock = zkhandler.readlock(self.zk_conn, '/locks/primary_node') lock = zkhandler.readlock(self.zk_conn, '/locks/primary_node')
@ -606,9 +606,9 @@ class NodeInstance(object):
lock = zkhandler.readlock(self.zk_conn, '/locks/primary_node') lock = zkhandler.readlock(self.zk_conn, '/locks/primary_node')
self.logger.out('Acquiring read lock for synchronization phase G', state='i') self.logger.out('Acquiring read lock for synchronization phase G', state='i')
try: try:
lock.acquire(timeout=60) # Don't wait forever and completely block us lock.acquire(timeout=60) # Don't wait forever and completely block us
self.logger.out('Acquired read lock for synchronization phase G', state='o') self.logger.out('Acquired read lock for synchronization phase G', state='o')
except: except Exception:
pass pass
self.logger.out('Releasing read lock for synchronization phase G', state='i') self.logger.out('Releasing read lock for synchronization phase G', state='i')
lock.release() lock.release()
@ -647,8 +647,8 @@ class NodeInstance(object):
if target_node is None: if target_node is None:
self.logger.out('Failed to find migration target for VM "{}"; shutting down and setting autostart flag'.format(dom_uuid), state='e') self.logger.out('Failed to find migration target for VM "{}"; shutting down and setting autostart flag'.format(dom_uuid), state='e')
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(dom_uuid): 'shutdown' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(dom_uuid): 'shutdown'})
zkhandler.writedata(self.zk_conn, { '/domains/{}/node_autostart'.format(dom_uuid): 'True' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/node_autostart'.format(dom_uuid): 'True'})
else: else:
self.logger.out('Migrating VM "{}" to node "{}"'.format(dom_uuid, target_node), state='i') self.logger.out('Migrating VM "{}" to node "{}"'.format(dom_uuid, target_node), state='i')
zkhandler.writedata(self.zk_conn, { zkhandler.writedata(self.zk_conn, {
@ -666,8 +666,8 @@ class NodeInstance(object):
break break
time.sleep(0.2) time.sleep(0.2)
zkhandler.writedata(self.zk_conn, { '/nodes/{}/runningdomains'.format(self.name): '' }) zkhandler.writedata(self.zk_conn, {'/nodes/{}/runningdomains'.format(self.name): ''})
zkhandler.writedata(self.zk_conn, { '/nodes/{}/domainstate'.format(self.name): 'flushed' }) zkhandler.writedata(self.zk_conn, {'/nodes/{}/domainstate'.format(self.name): 'flushed'})
self.flush_thread = None self.flush_thread = None
self.flush_stopper = False self.flush_stopper = False
return return
@ -698,7 +698,7 @@ class NodeInstance(object):
try: try:
last_node = zkhandler.readdata(self.zk_conn, '/domains/{}/lastnode'.format(dom_uuid)) last_node = zkhandler.readdata(self.zk_conn, '/domains/{}/lastnode'.format(dom_uuid))
except: except Exception:
continue continue
if last_node != self.name: if last_node != self.name:
@ -715,7 +715,7 @@ class NodeInstance(object):
while zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(dom_uuid)) in ['migrate', 'unmigrate', 'shutdown']: while zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(dom_uuid)) in ['migrate', 'unmigrate', 'shutdown']:
time.sleep(0.1) time.sleep(0.1)
zkhandler.writedata(self.zk_conn, { '/nodes/{}/domainstate'.format(self.name): 'ready' }) zkhandler.writedata(self.zk_conn, {'/nodes/{}/domainstate'.format(self.name): 'ready'})
self.flush_thread = None self.flush_thread = None
self.flush_stopper = False self.flush_stopper = False
return return

View File

@ -21,16 +21,14 @@
############################################################################### ###############################################################################
import os import os
import uuid
import time import time
import libvirt
from threading import Thread, Event from threading import Thread, Event
from collections import deque from collections import deque
import pvcnoded.log as log
import pvcnoded.zkhandler as zkhandler import pvcnoded.zkhandler as zkhandler
class VMConsoleWatcherInstance(object): class VMConsoleWatcherInstance(object):
# Initialization function # Initialization function
def __init__(self, domuuid, domname, zk_conn, config, logger, this_node): def __init__(self, domuuid, domname, zk_conn, config, logger, this_node):
@ -89,7 +87,7 @@ class VMConsoleWatcherInstance(object):
self.fetch_lines() self.fetch_lines()
# Update Zookeeper with the new loglines if they changed # Update Zookeeper with the new loglines if they changed
if self.loglines != self.last_loglines: if self.loglines != self.last_loglines:
zkhandler.writedata(self.zk_conn, { '/domains/{}/consolelog'.format(self.domuuid): self.loglines }) zkhandler.writedata(self.zk_conn, {'/domains/{}/consolelog'.format(self.domuuid): self.loglines})
self.last_loglines = self.loglines self.last_loglines = self.loglines
def fetch_lines(self): def fetch_lines(self):

View File

@ -27,7 +27,6 @@ import json
from threading import Thread from threading import Thread
import pvcnoded.log as log
import pvcnoded.zkhandler as zkhandler import pvcnoded.zkhandler as zkhandler
import pvcnoded.common as common import pvcnoded.common as common
@ -35,6 +34,7 @@ import pvcnoded.VMConsoleWatcherInstance as VMConsoleWatcherInstance
import daemon_lib.common as daemon_common import daemon_lib.common as daemon_common
def flush_locks(zk_conn, logger, dom_uuid): def flush_locks(zk_conn, logger, dom_uuid):
logger.out('Flushing RBD locks for VM "{}"'.format(dom_uuid), state='i') logger.out('Flushing RBD locks for VM "{}"'.format(dom_uuid), state='i')
# Get the list of RBD images # Get the list of RBD images
@ -65,6 +65,7 @@ def flush_locks(zk_conn, logger, dom_uuid):
return True return True
# Primary command function # Primary command function
def run_command(zk_conn, logger, this_node, data): def run_command(zk_conn, logger, this_node, data):
# Get the command and args # Get the command and args
@ -93,6 +94,7 @@ def run_command(zk_conn, logger, this_node, data):
# Wait 1 seconds before we free the lock, to ensure the client hits the lock # Wait 1 seconds before we free the lock, to ensure the client hits the lock
time.sleep(1) time.sleep(1)
class VMInstance(object): class VMInstance(object):
# Initialization function # Initialization function
def __init__(self, domuuid, zk_conn, config, logger, this_node): def __init__(self, domuuid, zk_conn, config, logger, this_node):
@ -112,11 +114,11 @@ class VMInstance(object):
self.last_lastnode = zkhandler.readdata(self.zk_conn, '/domains/{}/lastnode'.format(self.domuuid)) self.last_lastnode = zkhandler.readdata(self.zk_conn, '/domains/{}/lastnode'.format(self.domuuid))
try: try:
self.pinpolicy = zkhandler.readdata(self.zk_conn, '/domains/{}/pinpolicy'.format(self.domuuid)) self.pinpolicy = zkhandler.readdata(self.zk_conn, '/domains/{}/pinpolicy'.format(self.domuuid))
except: except Exception:
self.pinpolicy = "none" self.pinpolicy = "none"
try: try:
self.migration_method = zkhandler.readdata(self.zk_conn, '/domains/{}/migration_method'.format(self.domuuid)) self.migration_method = zkhandler.readdata(self.zk_conn, '/domains/{}/migration_method'.format(self.domuuid))
except: except Exception:
self.migration_method = 'none' self.migration_method = 'none'
# These will all be set later # These will all be set later
@ -166,7 +168,7 @@ class VMInstance(object):
else: else:
domain_information = daemon_common.getInformationFromXML(self.zk_conn, self.domuuid) domain_information = daemon_common.getInformationFromXML(self.zk_conn, self.domuuid)
memory = int(domain_information['memory']) memory = int(domain_information['memory'])
except: except Exception:
memory = 0 memory = 0
return memory return memory
@ -174,19 +176,19 @@ class VMInstance(object):
def getvcpus(self): def getvcpus(self):
try: try:
vcpus = int(self.dom.info()[3]) vcpus = int(self.dom.info()[3])
except: except Exception:
vcpus = 0 vcpus = 0
return vcpus return vcpus
# Manage local node domain_list # Manage local node domain_list
def addDomainToList(self): def addDomainToList(self):
if not self.domuuid in self.this_node.domain_list: if self.domuuid not in self.this_node.domain_list:
try: try:
# Add the domain to the domain_list array # Add the domain to the domain_list array
self.this_node.domain_list.append(self.domuuid) self.this_node.domain_list.append(self.domuuid)
# Push the change up to Zookeeper # Push the change up to Zookeeper
zkhandler.writedata(self.zk_conn, { '/nodes/{}/runningdomains'.format(self.this_node.name): ' '.join(self.this_node.domain_list) }) zkhandler.writedata(self.zk_conn, {'/nodes/{}/runningdomains'.format(self.this_node.name): ' '.join(self.this_node.domain_list)})
except Exception as e: except Exception as e:
self.logger.out('Error adding domain to list: {}'.format(e), state='e') self.logger.out('Error adding domain to list: {}'.format(e), state='e')
@ -196,7 +198,7 @@ class VMInstance(object):
# Remove the domain from the domain_list array # Remove the domain from the domain_list array
self.this_node.domain_list.remove(self.domuuid) self.this_node.domain_list.remove(self.domuuid)
# Push the change up to Zookeeper # Push the change up to Zookeeper
zkhandler.writedata(self.zk_conn, { '/nodes/{}/runningdomains'.format(self.this_node.name): ' '.join(self.this_node.domain_list) }) zkhandler.writedata(self.zk_conn, {'/nodes/{}/runningdomains'.format(self.this_node.name): ' '.join(self.this_node.domain_list)})
except Exception as e: except Exception as e:
self.logger.out('Error removing domain from list: {}'.format(e), state='e') self.logger.out('Error removing domain from list: {}'.format(e), state='e')
@ -211,7 +213,7 @@ class VMInstance(object):
# Start up a new Libvirt connection # Start up a new Libvirt connection
libvirt_name = "qemu:///system" libvirt_name = "qemu:///system"
lv_conn = libvirt.open(libvirt_name) lv_conn = libvirt.open(libvirt_name)
if lv_conn == None: if lv_conn is None:
self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}'.format(self.domuuid))
self.instart = False self.instart = False
return return
@ -220,13 +222,13 @@ class VMInstance(object):
try: try:
self.dom = self.lookupByUUID(self.domuuid) self.dom = self.lookupByUUID(self.domuuid)
curstate = self.dom.state()[0] curstate = self.dom.state()[0]
except: except Exception:
curstate = 'notstart' curstate = 'notstart'
if curstate == libvirt.VIR_DOMAIN_RUNNING: if curstate == libvirt.VIR_DOMAIN_RUNNING:
# If it is running just update the model # If it is running just update the model
self.addDomainToList() self.addDomainToList()
zkhandler.writedata(self.zk_conn, { '/domains/{}/failedreason'.format(self.domuuid): '' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/failedreason'.format(self.domuuid): ''})
else: else:
# Or try to create it # Or try to create it
try: try:
@ -236,11 +238,11 @@ class VMInstance(object):
self.addDomainToList() self.addDomainToList()
self.logger.out('Successfully started VM', state='o', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Successfully started VM', state='o', prefix='Domain {}'.format(self.domuuid))
self.dom = dom self.dom = dom
zkhandler.writedata(self.zk_conn, { '/domains/{}/failedreason'.format(self.domuuid): '' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/failedreason'.format(self.domuuid): ''})
except libvirt.libvirtError as e: except libvirt.libvirtError as e:
self.logger.out('Failed to create VM', state='e', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Failed to create VM', state='e', prefix='Domain {}'.format(self.domuuid))
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'fail' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'fail'})
zkhandler.writedata(self.zk_conn, { '/domains/{}/failedreason'.format(self.domuuid): str(e) }) zkhandler.writedata(self.zk_conn, {'/domains/{}/failedreason'.format(self.domuuid): str(e)})
self.dom = None self.dom = None
lv_conn.close() lv_conn.close()
@ -255,7 +257,7 @@ class VMInstance(object):
# Start up a new Libvirt connection # Start up a new Libvirt connection
libvirt_name = "qemu:///system" libvirt_name = "qemu:///system"
lv_conn = libvirt.open(libvirt_name) lv_conn = libvirt.open(libvirt_name)
if lv_conn == None: if lv_conn is None:
self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}'.format(self.domuuid))
self.inrestart = False self.inrestart = False
return return
@ -265,7 +267,7 @@ class VMInstance(object):
self.start_vm() self.start_vm()
self.addDomainToList() self.addDomainToList()
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'start' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
lv_conn.close() lv_conn.close()
self.inrestart = False self.inrestart = False
@ -295,8 +297,8 @@ class VMInstance(object):
self.logger.out('Failed to stop VM', state='e', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Failed to stop VM', state='e', prefix='Domain {}'.format(self.domuuid))
self.removeDomainFromList() self.removeDomainFromList()
if self.inrestart == False: if self.inrestart is False:
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'stop' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'stop'})
self.logger.out('Successfully stopped VM', state='o', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Successfully stopped VM', state='o', prefix='Domain {}'.format(self.domuuid))
self.dom = None self.dom = None
@ -325,12 +327,12 @@ class VMInstance(object):
try: try:
lvdomstate = self.dom.state()[0] lvdomstate = self.dom.state()[0]
except: except Exception:
lvdomstate = None lvdomstate = None
if lvdomstate != libvirt.VIR_DOMAIN_RUNNING: if lvdomstate != libvirt.VIR_DOMAIN_RUNNING:
self.removeDomainFromList() self.removeDomainFromList()
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'stop' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'stop'})
self.logger.out('Successfully shutdown VM', state='o', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Successfully shutdown VM', state='o', prefix='Domain {}'.format(self.domuuid))
self.dom = None self.dom = None
# Stop the log watcher # Stop the log watcher
@ -339,7 +341,7 @@ class VMInstance(object):
if tick >= self.config['vm_shutdown_timeout']: if tick >= self.config['vm_shutdown_timeout']:
self.logger.out('Shutdown timeout ({}s) expired, forcing off'.format(self.config['vm_shutdown_timeout']), state='e', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Shutdown timeout ({}s) expired, forcing off'.format(self.config['vm_shutdown_timeout']), state='e', prefix='Domain {}'.format(self.domuuid))
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'stop' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'stop'})
break break
self.inshutdown = False self.inshutdown = False
@ -350,7 +352,7 @@ class VMInstance(object):
if self.inrestart: if self.inrestart:
# Wait to prevent race conditions # Wait to prevent race conditions
time.sleep(1) time.sleep(1)
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'start' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
# Migrate the VM to a target host # Migrate the VM to a target host
def migrate_vm(self, force_live=False, force_shutdown=False): def migrate_vm(self, force_live=False, force_shutdown=False):
@ -387,7 +389,7 @@ class VMInstance(object):
migrate_lock_node.acquire() migrate_lock_node.acquire()
migrate_lock_state.acquire() migrate_lock_state.acquire()
time.sleep(0.2) # Initial delay for the first writer to grab the lock time.sleep(0.2) # Initial delay for the first writer to grab the lock
# Don't try to migrate a node to itself, set back to start # Don't try to migrate a node to itself, set back to start
if self.node == self.lastnode or self.node == self.this_node.name: if self.node == self.lastnode or self.node == self.this_node.name:
@ -422,7 +424,7 @@ class VMInstance(object):
self.logger.out('Acquiring write lock for synchronization phase B', state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Acquiring write lock for synchronization phase B', state='i', prefix='Domain {}'.format(self.domuuid))
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase B', state='o', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Acquired write lock for synchronization phase B', state='o', prefix='Domain {}'.format(self.domuuid))
time.sleep(0.5) # Time for reader to acquire the lock time.sleep(0.5) # Time fir reader to acquire the lock
def migrate_live(): def migrate_live():
self.logger.out('Setting up live migration', state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Setting up live migration', state='i', prefix='Domain {}'.format(self.domuuid))
@ -435,7 +437,7 @@ class VMInstance(object):
dest_lv_conn = libvirt.open(dest_lv) dest_lv_conn = libvirt.open(dest_lv)
if not dest_lv_conn: if not dest_lv_conn:
raise raise
except: except Exception:
self.logger.out('Failed to open connection to {}; aborting live migration.'.format(dest_lv), state='e', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Failed to open connection to {}; aborting live migration.'.format(dest_lv), state='e', prefix='Domain {}'.format(self.domuuid))
return False return False
@ -459,7 +461,7 @@ class VMInstance(object):
def migrate_shutdown(): def migrate_shutdown():
self.logger.out('Shutting down VM for offline migration', state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Shutting down VM for offline migration', state='i', prefix='Domain {}'.format(self.domuuid))
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'shutdown' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'shutdown'})
while zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(self.domuuid)) != 'stop': while zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(self.domuuid)) != 'stop':
time.sleep(0.5) time.sleep(0.5)
return True return True
@ -510,10 +512,10 @@ class VMInstance(object):
self.logger.out('Acquiring write lock for synchronization phase C', state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Acquiring write lock for synchronization phase C', state='i', prefix='Domain {}'.format(self.domuuid))
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase C', state='o', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Acquired write lock for synchronization phase C', state='o', prefix='Domain {}'.format(self.domuuid))
time.sleep(0.5) # Time for reader to acquire the lock time.sleep(0.5) # Time fir reader to acquire the lock
if do_migrate_shutdown: if do_migrate_shutdown:
migrate_shutdown_result = migrate_shutdown() migrate_shutdown()
self.logger.out('Releasing write lock for synchronization phase C', state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Releasing write lock for synchronization phase C', state='i', prefix='Domain {}'.format(self.domuuid))
lock.release() lock.release()
@ -548,23 +550,22 @@ class VMInstance(object):
time.sleep(0.1) time.sleep(0.1)
self.inreceive = True self.inreceive = True
live_receive = True
self.logger.out('Receiving VM migration from node "{}"'.format(self.node), state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Receiving VM migration from node "{}"'.format(self.node), state='i', prefix='Domain {}'.format(self.domuuid))
# Ensure our lock key is populated # Ensure our lock key is populated
zkhandler.writedata(self.zk_conn, { '/locks/domain_migrate/{}'.format(self.domuuid): self.domuuid }) zkhandler.writedata(self.zk_conn, {'/locks/domain_migrate/{}'.format(self.domuuid): self.domuuid})
# Synchronize nodes A (I am writer) # Synchronize nodes A (I am writer)
lock = zkhandler.writelock(self.zk_conn, '/locks/domain_migrate/{}'.format(self.domuuid)) lock = zkhandler.writelock(self.zk_conn, '/locks/domain_migrate/{}'.format(self.domuuid))
self.logger.out('Acquiring write lock for synchronization phase A', state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Acquiring write lock for synchronization phase A', state='i', prefix='Domain {}'.format(self.domuuid))
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase A', state='o', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Acquired write lock for synchronization phase A', state='o', prefix='Domain {}'.format(self.domuuid))
time.sleep(0.5) # Time for reader to acquire the lock time.sleep(0.5) # Time fir reader to acquire the lock
self.logger.out('Releasing write lock for synchronization phase A', state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Releasing write lock for synchronization phase A', state='i', prefix='Domain {}'.format(self.domuuid))
lock.release() lock.release()
self.logger.out('Released write lock for synchronization phase A', state='o', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Released write lock for synchronization phase A', state='o', prefix='Domain {}'.format(self.domuuid))
time.sleep(0.1) # Time for new writer to acquire the lock time.sleep(0.1) # Time fir new writer to acquire the lock
# Synchronize nodes B (I am reader) # Synchronize nodes B (I am reader)
lock = zkhandler.readlock(self.zk_conn, '/locks/domain_migrate/{}'.format(self.domuuid)) lock = zkhandler.readlock(self.zk_conn, '/locks/domain_migrate/{}'.format(self.domuuid))
@ -594,7 +595,7 @@ class VMInstance(object):
self.logger.out('Acquiring write lock for synchronization phase D', state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Acquiring write lock for synchronization phase D', state='i', prefix='Domain {}'.format(self.domuuid))
lock.acquire() lock.acquire()
self.logger.out('Acquired write lock for synchronization phase D', state='o', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Acquired write lock for synchronization phase D', state='o', prefix='Domain {}'.format(self.domuuid))
time.sleep(0.5) # Time for reader to acquire the lock time.sleep(0.5) # Time fir reader to acquire the lock
self.state = zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(self.domuuid)) self.state = zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(self.domuuid))
self.dom = self.lookupByUUID(self.domuuid) self.dom = self.lookupByUUID(self.domuuid)
@ -603,11 +604,11 @@ class VMInstance(object):
if lvdomstate == libvirt.VIR_DOMAIN_RUNNING: if lvdomstate == libvirt.VIR_DOMAIN_RUNNING:
# VM has been received and started # VM has been received and started
self.addDomainToList() self.addDomainToList()
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'start' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
self.logger.out('Successfully received migrated VM', state='o', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Successfully received migrated VM', state='o', prefix='Domain {}'.format(self.domuuid))
else: else:
# The receive somehow failed # The receive somehow failed
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'fail' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'fail'})
else: else:
if self.node == self.this_node.name: if self.node == self.this_node.name:
if self.state in ['start']: if self.state in ['start']:
@ -615,7 +616,7 @@ class VMInstance(object):
self.logger.out('Receive aborted via state change', state='w', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Receive aborted via state change', state='w', prefix='Domain {}'.format(self.domuuid))
elif self.state in ['stop']: elif self.state in ['stop']:
# The send was shutdown-based # The send was shutdown-based
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'start' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
else: else:
# The send failed or was aborted # The send failed or was aborted
self.logger.out('Migrate aborted or failed; VM in state {}'.format(self.state), state='w', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Migrate aborted or failed; VM in state {}'.format(self.state), state='w', prefix='Domain {}'.format(self.domuuid))
@ -624,7 +625,7 @@ class VMInstance(object):
lock.release() lock.release()
self.logger.out('Released write lock for synchronization phase D', state='o', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Released write lock for synchronization phase D', state='o', prefix='Domain {}'.format(self.domuuid))
zkhandler.writedata(self.zk_conn, { '/locks/domain_migrate/{}'.format(self.domuuid): '' }) zkhandler.writedata(self.zk_conn, {'/locks/domain_migrate/{}'.format(self.domuuid): ''})
self.inreceive = False self.inreceive = False
return return
@ -639,11 +640,11 @@ class VMInstance(object):
# Check the current state of the VM # Check the current state of the VM
try: try:
if self.dom != None: if self.dom is not None:
running, reason = self.dom.state() running, reason = self.dom.state()
else: else:
raise raise
except: except Exception:
running = libvirt.VIR_DOMAIN_NOSTATE running = libvirt.VIR_DOMAIN_NOSTATE
self.logger.out('VM state change for "{}": {} {}'.format(self.domuuid, self.state, self.node), state='i') self.logger.out('VM state change for "{}": {} {}'.format(self.domuuid, self.state, self.node), state='i')
@ -663,12 +664,12 @@ class VMInstance(object):
# provision # provision
# Conditional pass one - Are we already performing an action # Conditional pass one - Are we already performing an action
if self.instart == False \ if self.instart is False \
and self.inrestart == False \ and self.inrestart is False \
and self.inmigrate == False \ and self.inmigrate is False \
and self.inreceive == False \ and self.inreceive is False \
and self.inshutdown == False \ and self.inshutdown is False \
and self.instop == False: and self.instop is False:
# Conditional pass two - Is this VM configured to run on this node # Conditional pass two - Is this VM configured to run on this node
if self.node == self.this_node.name: if self.node == self.this_node.name:
# Conditional pass three - Is this VM currently running on this node # Conditional pass three - Is this VM currently running on this node
@ -683,7 +684,7 @@ class VMInstance(object):
elif self.state == "migrate" or self.state == "migrate-live": elif self.state == "migrate" or self.state == "migrate-live":
# Start the log watcher # Start the log watcher
self.console_log_instance.start() self.console_log_instance.start()
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'start' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
# Add domain to running list # Add domain to running list
self.addDomainToList() self.addDomainToList()
# VM should be restarted # VM should be restarted
@ -706,7 +707,7 @@ class VMInstance(object):
self.receive_migrate() self.receive_migrate()
# VM should be restarted (i.e. started since it isn't running) # VM should be restarted (i.e. started since it isn't running)
if self.state == "restart": if self.state == "restart":
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(self.domuuid): 'start' }) zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
# VM should be shut down; ensure it's gone from this node's domain_list # VM should be shut down; ensure it's gone from this node's domain_list
elif self.state == "shutdown": elif self.state == "shutdown":
self.removeDomainFromList() self.removeDomainFromList()
@ -734,7 +735,6 @@ class VMInstance(object):
else: else:
self.terminate_vm() self.terminate_vm()
# This function is a wrapper for libvirt.lookupByUUID which fixes some problems # This function is a wrapper for libvirt.lookupByUUID which fixes some problems
# 1. Takes a text UUID and handles converting it to bytes # 1. Takes a text UUID and handles converting it to bytes
# 2. Try's it and returns a sensible value if not # 2. Try's it and returns a sensible value if not
@ -753,7 +753,7 @@ class VMInstance(object):
try: try:
# Open a libvirt connection # Open a libvirt connection
lv_conn = libvirt.open(libvirt_name) lv_conn = libvirt.open(libvirt_name)
if lv_conn == None: if lv_conn is None:
self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}'.format(self.domuuid))
return None return None
@ -761,13 +761,13 @@ class VMInstance(object):
dom = lv_conn.lookupByUUID(buuid) dom = lv_conn.lookupByUUID(buuid)
# Fail # Fail
except: except Exception:
dom = None dom = None
# After everything # After everything
finally: finally:
# Close the libvirt connection # Close the libvirt connection
if lv_conn != None: if lv_conn is not None:
lv_conn.close() lv_conn.close()
# Return the dom object (or None) # Return the dom object (or None)

View File

@ -25,13 +25,13 @@ import time
from textwrap import dedent from textwrap import dedent
import pvcnoded.log as log
import pvcnoded.zkhandler as zkhandler import pvcnoded.zkhandler as zkhandler
import pvcnoded.common as common import pvcnoded.common as common
class VXNetworkInstance(object): class VXNetworkInstance(object):
# Initialization function # Initialization function
def __init__ (self, vni, zk_conn, config, logger, this_node, dns_aggregator): def __init__(self, vni, zk_conn, config, logger, this_node, dns_aggregator):
self.vni = vni self.vni = vni
self.zk_conn = zk_conn self.zk_conn = zk_conn
self.config = config self.config = config
@ -96,13 +96,13 @@ class VXNetworkInstance(object):
self.ip6_gateway = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_gateway'.format(self.vni)) self.ip6_gateway = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_gateway'.format(self.vni))
self.ip6_network = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_network'.format(self.vni)) self.ip6_network = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_network'.format(self.vni))
self.ip6_cidrnetmask = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_network'.format(self.vni)).split('/')[-1] self.ip6_cidrnetmask = zkhandler.readdata(self.zk_conn, '/networks/{}/ip6_network'.format(self.vni)).split('/')[-1]
self.dhcp6_flag = ( zkhandler.readdata(self.zk_conn, '/networks/{}/dhcp6_flag'.format(self.vni)) == 'True' ) self.dhcp6_flag = (zkhandler.readdata(self.zk_conn, '/networks/{}/dhcp6_flag'.format(self.vni)) == 'True')
self.ip4_gateway = zkhandler.readdata(self.zk_conn, '/networks/{}/ip4_gateway'.format(self.vni)) self.ip4_gateway = zkhandler.readdata(self.zk_conn, '/networks/{}/ip4_gateway'.format(self.vni))
self.ip4_network = zkhandler.readdata(self.zk_conn, '/networks/{}/ip4_network'.format(self.vni)) self.ip4_network = zkhandler.readdata(self.zk_conn, '/networks/{}/ip4_network'.format(self.vni))
self.ip4_cidrnetmask = zkhandler.readdata(self.zk_conn, '/networks/{}/ip4_network'.format(self.vni)).split('/')[-1] self.ip4_cidrnetmask = zkhandler.readdata(self.zk_conn, '/networks/{}/ip4_network'.format(self.vni)).split('/')[-1]
self.dhcp4_flag = ( zkhandler.readdata(self.zk_conn, '/networks/{}/dhcp4_flag'.format(self.vni)) == 'True' ) self.dhcp4_flag = (zkhandler.readdata(self.zk_conn, '/networks/{}/dhcp4_flag'.format(self.vni)) == 'True')
self.dhcp4_start = ( zkhandler.readdata(self.zk_conn, '/networks/{}/dhcp4_start'.format(self.vni)) == 'True' ) self.dhcp4_start = (zkhandler.readdata(self.zk_conn, '/networks/{}/dhcp4_start'.format(self.vni)) == 'True')
self.dhcp4_end = ( zkhandler.readdata(self.zk_conn, '/networks/{}/dhcp4_end'.format(self.vni)) == 'True' ) self.dhcp4_end = (zkhandler.readdata(self.zk_conn, '/networks/{}/dhcp4_end'.format(self.vni)) == 'True')
self.vxlan_nic = 'vxlan{}'.format(self.vni) self.vxlan_nic = 'vxlan{}'.format(self.vni)
self.bridge_nic = 'vmbr{}'.format(self.vni) self.bridge_nic = 'vmbr{}'.format(self.vni)
@ -227,7 +227,7 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
self.startDHCPServer() self.startDHCPServer()
@self.zk_conn.DataWatch('/networks/{}/ip6_gateway'.format(self.vni)) @self.zk_conn.DataWatch('/networks/{}/ip6_gateway'.format(self.vni))
def watch_network_gateway(data, stat, event=''): def watch_network_gateway6(data, stat, event=''):
if event and event.type == 'DELETED': if event and event.type == 'DELETED':
# The key has been deleted after existing before; terminate this watcher # The key has been deleted after existing before; terminate this watcher
# because this class instance is about to be reaped in Daemon.py # because this class instance is about to be reaped in Daemon.py
@ -249,14 +249,14 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
self.startDHCPServer() self.startDHCPServer()
@self.zk_conn.DataWatch('/networks/{}/dhcp6_flag'.format(self.vni)) @self.zk_conn.DataWatch('/networks/{}/dhcp6_flag'.format(self.vni))
def watch_network_dhcp_status(data, stat, event=''): def watch_network_dhcp6_status(data, stat, event=''):
if event and event.type == 'DELETED': if event and event.type == 'DELETED':
# The key has been deleted after existing before; terminate this watcher # The key has been deleted after existing before; terminate this watcher
# because this class instance is about to be reaped in Daemon.py # because this class instance is about to be reaped in Daemon.py
return False return False
if data and self.dhcp6_flag != ( data.decode('ascii') == 'True' ): if data and self.dhcp6_flag != (data.decode('ascii') == 'True'):
self.dhcp6_flag = ( data.decode('ascii') == 'True' ) self.dhcp6_flag = (data.decode('ascii') == 'True')
if self.dhcp6_flag and not self.dhcp_server_daemon and self.this_node.router_state in ['primary', 'takeover']: if self.dhcp6_flag and not self.dhcp_server_daemon and self.this_node.router_state in ['primary', 'takeover']:
self.startDHCPServer() self.startDHCPServer()
elif self.dhcp_server_daemon and not self.dhcp4_flag and self.this_node.router_state in ['primary', 'takeover']: elif self.dhcp_server_daemon and not self.dhcp4_flag and self.this_node.router_state in ['primary', 'takeover']:
@ -278,7 +278,7 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
self.startDHCPServer() self.startDHCPServer()
@self.zk_conn.DataWatch('/networks/{}/ip4_gateway'.format(self.vni)) @self.zk_conn.DataWatch('/networks/{}/ip4_gateway'.format(self.vni))
def watch_network_gateway(data, stat, event=''): def watch_network_gateway4(data, stat, event=''):
if event and event.type == 'DELETED': if event and event.type == 'DELETED':
# The key has been deleted after existing before; terminate this watcher # The key has been deleted after existing before; terminate this watcher
# because this class instance is about to be reaped in Daemon.py # because this class instance is about to be reaped in Daemon.py
@ -300,14 +300,14 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
self.startDHCPServer() self.startDHCPServer()
@self.zk_conn.DataWatch('/networks/{}/dhcp4_flag'.format(self.vni)) @self.zk_conn.DataWatch('/networks/{}/dhcp4_flag'.format(self.vni))
def watch_network_dhcp_status(data, stat, event=''): def watch_network_dhcp4_status(data, stat, event=''):
if event and event.type == 'DELETED': if event and event.type == 'DELETED':
# The key has been deleted after existing before; terminate this watcher # The key has been deleted after existing before; terminate this watcher
# because this class instance is about to be reaped in Daemon.py # because this class instance is about to be reaped in Daemon.py
return False return False
if data and self.dhcp4_flag != ( data.decode('ascii') == 'True' ): if data and self.dhcp4_flag != (data.decode('ascii') == 'True'):
self.dhcp4_flag = ( data.decode('ascii') == 'True' ) self.dhcp4_flag = (data.decode('ascii') == 'True')
if self.dhcp4_flag and not self.dhcp_server_daemon and self.this_node.router_state in ['primary', 'takeover']: if self.dhcp4_flag and not self.dhcp_server_daemon and self.this_node.router_state in ['primary', 'takeover']:
self.startDHCPServer() self.startDHCPServer()
elif self.dhcp_server_daemon and not self.dhcp6_flag and self.this_node.router_state in ['primary', 'takeover']: elif self.dhcp_server_daemon and not self.dhcp6_flag and self.this_node.router_state in ['primary', 'takeover']:
@ -356,7 +356,7 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
self.startDHCPServer() self.startDHCPServer()
@self.zk_conn.ChildrenWatch('/networks/{}/firewall_rules/in'.format(self.vni)) @self.zk_conn.ChildrenWatch('/networks/{}/firewall_rules/in'.format(self.vni))
def watch_network_firewall_rules(new_rules, event=''): def watch_network_firewall_rules_in(new_rules, event=''):
if event and event.type == 'DELETED': if event and event.type == 'DELETED':
# The key has been deleted after existing before; terminate this watcher # The key has been deleted after existing before; terminate this watcher
# because this class instance is about to be reaped in Daemon.py # because this class instance is about to be reaped in Daemon.py
@ -368,7 +368,7 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
self.updateFirewallRules() self.updateFirewallRules()
@self.zk_conn.ChildrenWatch('/networks/{}/firewall_rules/out'.format(self.vni)) @self.zk_conn.ChildrenWatch('/networks/{}/firewall_rules/out'.format(self.vni))
def watch_network_firewall_rules(new_rules, event=''): def watch_network_firewall_rules_out(new_rules, event=''):
if event and event.type == 'DELETED': if event and event.type == 'DELETED':
# The key has been deleted after existing before; terminate this watcher # The key has been deleted after existing before; terminate this watcher
# because this class instance is about to be reaped in Daemon.py # because this class instance is about to be reaped in Daemon.py
@ -409,7 +409,7 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
try: try:
os.remove(filename) os.remove(filename)
self.dhcp_server_daemon.signal('hup') self.dhcp_server_daemon.signal('hup')
except: except Exception:
pass pass
def updateFirewallRules(self): def updateFirewallRules(self):
@ -438,7 +438,7 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
for order in sorted(ordered_acls_out.keys()): for order in sorted(ordered_acls_out.keys()):
sorted_acl_list['out'].append(ordered_acls_out[order]) sorted_acl_list['out'].append(ordered_acls_out[order])
for direction in 'in', 'out': for direction in 'in', 'out':
for acl in sorted_acl_list[direction]: for acl in sorted_acl_list[direction]:
rule_prefix = "add rule inet filter vxlan{}-{} counter".format(self.vni, direction) rule_prefix = "add rule inet filter vxlan{}-{} counter".format(self.vni, direction)
rule_data = zkhandler.readdata(self.zk_conn, '/networks/{}/firewall_rules/{}/{}/rule'.format(self.vni, direction, acl)) rule_data = zkhandler.readdata(self.zk_conn, '/networks/{}/firewall_rules/{}/{}/rule'.format(self.vni, direction, acl))
@ -452,9 +452,8 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
firewall_rules += self.firewall_rules_v4 firewall_rules += self.firewall_rules_v4
output = "{}\n# User rules\n{}\n".format( output = "{}\n# User rules\n{}\n".format(
firewall_rules, firewall_rules,
'\n'.join(full_ordered_rules) '\n'.join(full_ordered_rules))
)
with open(self.nftables_netconf_filename, 'w') as nfnetfile: with open(self.nftables_netconf_filename, 'w') as nfnetfile:
nfnetfile.write(dedent(output)) nfnetfile.write(dedent(output))
@ -702,7 +701,7 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
else: else:
dhcp_configuration += dhcp_configuration_v4 dhcp_configuration += dhcp_configuration_v4
if self.dhcp4_flag: if self.dhcp4_flag:
dhcp_configuration += dhcp_configuration_v4_dhcp dhcp_configuration += dhcp_configuration_v4_dhcp
# Start the dnsmasq process in a thread # Start the dnsmasq process in a thread
print('/usr/sbin/dnsmasq {}'.format(' '.join(dhcp_configuration))) print('/usr/sbin/dnsmasq {}'.format(' '.join(dhcp_configuration)))
@ -802,7 +801,7 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
try: try:
os.remove(self.nftables_netconf_filename) os.remove(self.nftables_netconf_filename)
except: except Exception:
pass pass
# Reload firewall rules # Reload firewall rules

View File

@ -22,14 +22,13 @@
import subprocess import subprocess
import signal import signal
import time
from threading import Thread from threading import Thread
from shlex import split as shlex_split from shlex import split as shlex_split
import pvcnoded.log as log
import pvcnoded.zkhandler as zkhandler import pvcnoded.zkhandler as zkhandler
class OSDaemon(object): class OSDaemon(object):
def __init__(self, command_string, environment, logfile): def __init__(self, command_string, environment, logfile):
command = shlex_split(command_string) command = shlex_split(command_string)
@ -57,10 +56,12 @@ class OSDaemon(object):
} }
self.proc.send_signal(signal_map[sent_signal]) self.proc.send_signal(signal_map[sent_signal])
def run_os_daemon(command_string, environment=None, logfile=None): def run_os_daemon(command_string, environment=None, logfile=None):
daemon = OSDaemon(command_string, environment, logfile) daemon = OSDaemon(command_string, environment, logfile)
return daemon return daemon
# Run a oneshot command, optionally without blocking # Run a oneshot command, optionally without blocking
def run_os_command(command_string, background=False, environment=None, timeout=None): def run_os_command(command_string, background=False, environment=None, timeout=None):
command = shlex_split(command_string) command = shlex_split(command_string)
@ -94,14 +95,15 @@ def run_os_command(command_string, background=False, environment=None, timeout=N
try: try:
stdout = command_output.stdout.decode('ascii') stdout = command_output.stdout.decode('ascii')
except: except Exception:
stdout = '' stdout = ''
try: try:
stderr = command_output.stderr.decode('ascii') stderr = command_output.stderr.decode('ascii')
except: except Exception:
stderr = '' stderr = ''
return retcode, stdout, stderr return retcode, stdout, stderr
# Reload the firewall rules of the system # Reload the firewall rules of the system
def reload_firewall_rules(logger, rules_file): def reload_firewall_rules(logger, rules_file):
logger.out('Reloading firewall configuration', state='o') logger.out('Reloading firewall configuration', state='o')
@ -109,6 +111,7 @@ def reload_firewall_rules(logger, rules_file):
if retcode != 0: if retcode != 0:
logger.out('Failed to reload configuration: {}'.format(stderr), state='e') logger.out('Failed to reload configuration: {}'.format(stderr), state='e')
# Create IP address # Create IP address
def createIPAddress(ipaddr, cidrnetmask, dev): def createIPAddress(ipaddr, cidrnetmask, dev):
run_os_command( run_os_command(
@ -125,6 +128,7 @@ def createIPAddress(ipaddr, cidrnetmask, dev):
) )
) )
# Remove IP address # Remove IP address
def removeIPAddress(ipaddr, cidrnetmask, dev): def removeIPAddress(ipaddr, cidrnetmask, dev):
run_os_command( run_os_command(
@ -135,6 +139,7 @@ def removeIPAddress(ipaddr, cidrnetmask, dev):
) )
) )
# #
# Find a migration target # Find a migration target
# #
@ -144,20 +149,20 @@ def findTargetNode(zk_conn, config, logger, dom_uuid):
node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(dom_uuid)).split(',') node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(dom_uuid)).split(',')
if not any(node_limit): if not any(node_limit):
node_limit = '' node_limit = ''
except: except Exception:
node_limit = '' node_limit = ''
zkhandler.writedata(zk_conn, { '/domains/{}/node_limit'.format(dom_uuid): '' }) zkhandler.writedata(zk_conn, {'/domains/{}/node_limit'.format(dom_uuid): ''})
# Determine VM search field # Determine VM search field
try: try:
search_field = zkhandler.readdata(zk_conn, '/domains/{}/node_selector'.format(dom_uuid)) search_field = zkhandler.readdata(zk_conn, '/domains/{}/node_selector'.format(dom_uuid))
except Exception as e: except Exception:
search_field = None search_field = None
# If our search field is invalid, use and set the default (for next time) # If our search field is invalid, use and set the default (for next time)
if search_field is None or search_field == 'None': if search_field is None or search_field == 'None':
search_field = config['migration_target_selector'] search_field = config['migration_target_selector']
zkhandler.writedata(zk_conn, { '/domains/{}/node_selector'.format(dom_uuid): config['migration_target_selector'] }) zkhandler.writedata(zk_conn, {'/domains/{}/node_selector'.format(dom_uuid): config['migration_target_selector']})
if config['debug']: if config['debug']:
logger.out('Migrating VM {} with selector {}'.format(dom_uuid, search_field), state='d', prefix='node-flush') logger.out('Migrating VM {} with selector {}'.format(dom_uuid, search_field), state='d', prefix='node-flush')
@ -175,6 +180,7 @@ def findTargetNode(zk_conn, config, logger, dom_uuid):
# Nothing was found # Nothing was found
return None return None
# Get the list of valid target nodes # Get the list of valid target nodes
def getNodes(zk_conn, node_limit, dom_uuid): def getNodes(zk_conn, node_limit, dom_uuid):
valid_node_list = [] valid_node_list = []
@ -198,6 +204,7 @@ def getNodes(zk_conn, node_limit, dom_uuid):
return valid_node_list return valid_node_list
# via free memory (relative to allocated memory) # via free memory (relative to allocated memory)
def findTargetNodeMem(zk_conn, config, logger, node_limit, dom_uuid): def findTargetNodeMem(zk_conn, config, logger, node_limit, dom_uuid):
most_provfree = 0 most_provfree = 0
@ -224,6 +231,7 @@ def findTargetNodeMem(zk_conn, config, logger, node_limit, dom_uuid):
logger.out('Selected node {}'.format(target_node), state='d', prefix='node-flush') logger.out('Selected node {}'.format(target_node), state='d', prefix='node-flush')
return target_node return target_node
# via load average # via load average
def findTargetNodeLoad(zk_conn, config, logger, node_limit, dom_uuid): def findTargetNodeLoad(zk_conn, config, logger, node_limit, dom_uuid):
least_load = 9999.0 least_load = 9999.0
@ -246,6 +254,7 @@ def findTargetNodeLoad(zk_conn, config, logger, node_limit, dom_uuid):
logger.out('Selected node {}'.format(target_node), state='d', prefix='node-flush') logger.out('Selected node {}'.format(target_node), state='d', prefix='node-flush')
return target_node return target_node
# via total vCPUs # via total vCPUs
def findTargetNodeVCPUs(zk_conn, config, logger, node_limit, dom_uuid): def findTargetNodeVCPUs(zk_conn, config, logger, node_limit, dom_uuid):
least_vcpus = 9999 least_vcpus = 9999
@ -268,6 +277,7 @@ def findTargetNodeVCPUs(zk_conn, config, logger, node_limit, dom_uuid):
logger.out('Selected node {}'.format(target_node), state='d', prefix='node-flush') logger.out('Selected node {}'.format(target_node), state='d', prefix='node-flush')
return target_node return target_node
# via total VMs # via total VMs
def findTargetNodeVMs(zk_conn, config, logger, node_limit, dom_uuid): def findTargetNodeVMs(zk_conn, config, logger, node_limit, dom_uuid):
least_vms = 9999 least_vms = 9999

View File

@ -20,11 +20,13 @@
# #
############################################################################### ###############################################################################
import argparse import argparse
import os, sys import os
import sys
import kazoo.client import kazoo.client
import re import re
import yaml import yaml
# #
# Variables # Variables
# #
@ -39,30 +41,33 @@ def get_zookeeper_key():
print('ERROR: DNSMASQ_BRIDGE_INTERFACE environment variable not found: {}'.format(e), file=sys.stderr) print('ERROR: DNSMASQ_BRIDGE_INTERFACE environment variable not found: {}'.format(e), file=sys.stderr)
exit(1) exit(1)
# Get the ID of the interface (the digits) # Get the ID of the interface (the digits)
network_vni = re.findall('\d+', interface)[0] network_vni = re.findall(r'\d+', interface)[0]
# Create the key # Create the key
zookeeper_key = '/networks/{}/dhcp4_leases'.format(network_vni) zookeeper_key = '/networks/{}/dhcp4_leases'.format(network_vni)
return zookeeper_key return zookeeper_key
def get_lease_expiry(): def get_lease_expiry():
try: try:
expiry = os.environ['DNSMASQ_LEASE_EXPIRES'] expiry = os.environ['DNSMASQ_LEASE_EXPIRES']
except: except Exception:
expiry = '0' expiry = '0'
return expiry return expiry
def get_client_id(): def get_client_id():
try: try:
client_id = os.environ['DNSMASQ_CLIENT_ID'] client_id = os.environ['DNSMASQ_CLIENT_ID']
except: except Exception:
client_id = '*' client_id = '*'
return client_id return client_id
def connect_zookeeper(): def connect_zookeeper():
# We expect the environ to contain the config file # We expect the environ to contain the config file
try: try:
pvcnoded_config_file = os.environ['PVCD_CONFIG_FILE'] pvcnoded_config_file = os.environ['PVCD_CONFIG_FILE']
except: except Exception:
# Default place # Default place
pvcnoded_config_file = '/etc/pvc/pvcnoded.yaml' pvcnoded_config_file = '/etc/pvc/pvcnoded.yaml'
@ -82,9 +87,11 @@ def connect_zookeeper():
return zk_conn return zk_conn
def read_data(zk_conn, key): def read_data(zk_conn, key):
return zk_conn.get(key)[0].decode('ascii') return zk_conn.get(key)[0].decode('ascii')
def get_lease(zk_conn, zk_leases_key, macaddr): def get_lease(zk_conn, zk_leases_key, macaddr):
expiry = read_data(zk_conn, '{}/{}/expiry'.format(zk_leases_key, macaddr)) expiry = read_data(zk_conn, '{}/{}/expiry'.format(zk_leases_key, macaddr))
ipaddr = read_data(zk_conn, '{}/{}/ipaddr'.format(zk_leases_key, macaddr)) ipaddr = read_data(zk_conn, '{}/{}/ipaddr'.format(zk_leases_key, macaddr))
@ -92,6 +99,7 @@ def get_lease(zk_conn, zk_leases_key, macaddr):
clientid = read_data(zk_conn, '{}/{}/clientid'.format(zk_leases_key, macaddr)) clientid = read_data(zk_conn, '{}/{}/clientid'.format(zk_leases_key, macaddr))
return expiry, ipaddr, hostname, clientid return expiry, ipaddr, hostname, clientid
# #
# Command Functions # Command Functions
# #
@ -107,6 +115,7 @@ def read_lease_database(zk_conn, zk_leases_key):
# Output list # Output list
print('\n'.join(output_list)) print('\n'.join(output_list))
def add_lease(zk_conn, zk_leases_key, expiry, macaddr, ipaddr, hostname, clientid): def add_lease(zk_conn, zk_leases_key, expiry, macaddr, ipaddr, hostname, clientid):
if not hostname: if not hostname:
hostname = '' hostname = ''
@ -118,9 +127,11 @@ def add_lease(zk_conn, zk_leases_key, expiry, macaddr, ipaddr, hostname, clienti
transaction.create('{}/{}/clientid'.format(zk_leases_key, macaddr), clientid.encode('ascii')) transaction.create('{}/{}/clientid'.format(zk_leases_key, macaddr), clientid.encode('ascii'))
transaction.commit() transaction.commit()
def del_lease(zk_conn, zk_leases_key, macaddr, expiry): def del_lease(zk_conn, zk_leases_key, macaddr, expiry):
zk_conn.delete('{}/{}'.format(zk_leases_key, macaddr), recursive=True) zk_conn.delete('{}/{}'.format(zk_leases_key, macaddr), recursive=True)
# #
# Instantiate the parser # Instantiate the parser
# #

View File

@ -26,6 +26,7 @@ import pvcnoded.zkhandler as zkhandler
import pvcnoded.common as common import pvcnoded.common as common
import pvcnoded.VMInstance as VMInstance import pvcnoded.VMInstance as VMInstance
# #
# Fence thread entry function # Fence thread entry function
# #
@ -62,9 +63,9 @@ def fenceNode(node_name, zk_conn, config, logger):
# Force into secondary network state if needed # Force into secondary network state if needed
if node_name in config['coordinators']: if node_name in config['coordinators']:
logger.out('Forcing secondary status for node "{}"'.format(node_name), state='i') logger.out('Forcing secondary status for node "{}"'.format(node_name), state='i')
zkhandler.writedata(zk_conn, { '/nodes/{}/routerstate'.format(node_name): 'secondary' }) zkhandler.writedata(zk_conn, {'/nodes/{}/routerstate'.format(node_name): 'secondary'})
if zkhandler.readdata(zk_conn, '/primary_node') == node_name: if zkhandler.readdata(zk_conn, '/primary_node') == node_name:
zkhandler.writedata(zk_conn, { '/primary_node': 'none' }) zkhandler.writedata(zk_conn, {'/primary_node': 'none'})
# If the fence succeeded and successful_fence is migrate # If the fence succeeded and successful_fence is migrate
if fence_status and config['successful_fence'] == 'migrate': if fence_status and config['successful_fence'] == 'migrate':
@ -74,6 +75,7 @@ def fenceNode(node_name, zk_conn, config, logger):
if not fence_status and config['failed_fence'] == 'migrate' and config['suicide_intervals'] != '0': if not fence_status and config['failed_fence'] == 'migrate' and config['suicide_intervals'] != '0':
migrateFromFencedNode(zk_conn, node_name, config, logger) migrateFromFencedNode(zk_conn, node_name, config, logger)
# Migrate hosts away from a fenced node # Migrate hosts away from a fenced node
def migrateFromFencedNode(zk_conn, node_name, config, logger): def migrateFromFencedNode(zk_conn, node_name, config, logger):
logger.out('Migrating VMs from dead node "{}" to new hosts'.format(node_name), state='i') logger.out('Migrating VMs from dead node "{}" to new hosts'.format(node_name), state='i')
@ -82,7 +84,7 @@ def migrateFromFencedNode(zk_conn, node_name, config, logger):
dead_node_running_domains = zkhandler.readdata(zk_conn, '/nodes/{}/runningdomains'.format(node_name)).split() dead_node_running_domains = zkhandler.readdata(zk_conn, '/nodes/{}/runningdomains'.format(node_name)).split()
# Set the node to a custom domainstate so we know what's happening # Set the node to a custom domainstate so we know what's happening
zkhandler.writedata(zk_conn, { '/nodes/{}/domainstate'.format(node_name): 'fence-flush' }) zkhandler.writedata(zk_conn, {'/nodes/{}/domainstate'.format(node_name): 'fence-flush'})
# Migrate a VM after a flush # Migrate a VM after a flush
def fence_migrate_vm(dom_uuid): def fence_migrate_vm(dom_uuid):
@ -109,7 +111,8 @@ def migrateFromFencedNode(zk_conn, node_name, config, logger):
fence_migrate_vm(dom_uuid) fence_migrate_vm(dom_uuid)
# Set node in flushed state for easy remigrating when it comes back # Set node in flushed state for easy remigrating when it comes back
zkhandler.writedata(zk_conn, { '/nodes/{}/domainstate'.format(node_name): 'flushed' }) zkhandler.writedata(zk_conn, {'/nodes/{}/domainstate'.format(node_name): 'flushed'})
# #
# Perform an IPMI fence # Perform an IPMI fence
@ -145,6 +148,7 @@ def rebootViaIPMI(ipmi_hostname, ipmi_user, ipmi_password, logger):
print(ipmi_reset_stderr) print(ipmi_reset_stderr)
return False return False
# #
# Verify that IPMI connectivity to this host exists (used during node init) # Verify that IPMI connectivity to this host exists (used during node init)
# #

View File

@ -22,6 +22,7 @@
import datetime import datetime
class Logger(object): class Logger(object):
# Define a logger class for a daemon instance # Define a logger class for a daemon instance
# Keeps record of where to log, and is passed messages which are # Keeps record of where to log, and is passed messages which are
@ -35,7 +36,7 @@ class Logger(object):
fmt_purple = '\033[95m' fmt_purple = '\033[95m'
fmt_cyan = '\033[96m' fmt_cyan = '\033[96m'
fmt_white = '\033[97m' fmt_white = '\033[97m'
fmt_bold = '\033[1m' fmt_bold = '\033[1m'
fmt_end = '\033[0m' fmt_end = '\033[0m'
last_colour = '' last_colour = ''
@ -43,26 +44,26 @@ class Logger(object):
# Format maps # Format maps
format_map_colourized = { format_map_colourized = {
# Colourized formatting with chevron prompts (log_colours = True) # Colourized formatting with chevron prompts (log_colours = True)
'o': { 'colour': fmt_green, 'prompt': '>>> ' }, 'o': {'colour': fmt_green, 'prompt': '>>> '},
'e': { 'colour': fmt_red, 'prompt': '>>> ' }, 'e': {'colour': fmt_red, 'prompt': '>>> '},
'w': { 'colour': fmt_yellow, 'prompt': '>>> ' }, 'w': {'colour': fmt_yellow, 'prompt': '>>> '},
't': { 'colour': fmt_purple, 'prompt': '>>> ' }, 't': {'colour': fmt_purple, 'prompt': '>>> '},
'i': { 'colour': fmt_blue, 'prompt': '>>> ' }, 'i': {'colour': fmt_blue, 'prompt': '>>> '},
's': { 'colour': fmt_cyan, 'prompt': '>>> ' }, 's': {'colour': fmt_cyan, 'prompt': '>>> '},
'd': { 'colour': fmt_white, 'prompt': '>>> ' }, 'd': {'colour': fmt_white, 'prompt': '>>> '},
'x': { 'colour': last_colour, 'prompt': last_prompt } 'x': {'colour': last_colour, 'prompt': last_prompt}
} }
format_map_textual = { format_map_textual = {
# Uncolourized formatting with text prompts (log_colours = False) # Uncolourized formatting with text prompts (log_colours = False)
'o': { 'colour': '', 'prompt': 'ok: ' }, 'o': {'colour': '', 'prompt': 'ok: '},
'e': { 'colour': '', 'prompt': 'failed: ' }, 'e': {'colour': '', 'prompt': 'failed: '},
'w': { 'colour': '', 'prompt': 'warning: ' }, 'w': {'colour': '', 'prompt': 'warning: '},
't': { 'colour': '', 'prompt': 'tick: ' }, 't': {'colour': '', 'prompt': 'tick: '},
'i': { 'colour': '', 'prompt': 'info: ' }, 'i': {'colour': '', 'prompt': 'info: '},
's': { 'colour': '', 'prompt': 'system: ' }, 's': {'colour': '', 'prompt': 'system: '},
'd': { 'colour': '', 'prompt': 'debug: ' }, 'd': {'colour': '', 'prompt': 'debug: '},
'x': { 'colour': '', 'prompt': last_prompt } 'x': {'colour': '', 'prompt': last_prompt}
} }
# Initialization of instance # Initialization of instance

View File

@ -22,6 +22,7 @@
import uuid import uuid
# Child list function # Child list function
def listchildren(zk_conn, key): def listchildren(zk_conn, key):
try: try:
@ -30,6 +31,7 @@ def listchildren(zk_conn, key):
except Exception: except Exception:
return None return None
# Key deletion function # Key deletion function
def deletekey(zk_conn, key, recursive=True): def deletekey(zk_conn, key, recursive=True):
try: try:
@ -38,16 +40,17 @@ def deletekey(zk_conn, key, recursive=True):
except Exception: except Exception:
return False return False
# Data read function # Data read function
def readdata(zk_conn, key): def readdata(zk_conn, key):
try: try:
data_raw = zk_conn.get(key) data_raw = zk_conn.get(key)
data = data_raw[0].decode('utf8') data = data_raw[0].decode('utf8')
meta = data_raw[1]
return data return data
except Exception: except Exception:
return None return None
# Data write function # Data write function
def writedata(zk_conn, kv): def writedata(zk_conn, kv):
# Commit the transaction # Commit the transaction
@ -88,6 +91,7 @@ def writedata(zk_conn, kv):
except Exception: except Exception:
return False return False
# Key rename function # Key rename function
def renamekey(zk_conn, kv): def renamekey(zk_conn, kv):
# This one is not transactional because, inexplicably, transactions don't # This one is not transactional because, inexplicably, transactions don't
@ -102,8 +106,9 @@ def renamekey(zk_conn, kv):
old_data = zk_conn.get(old_name)[0] old_data = zk_conn.get(old_name)[0]
# Find the children of old_name recursively
child_keys = list() child_keys = list()
# Find the children of old_name recursively
def get_children(key): def get_children(key):
children = zk_conn.get_children(key) children = zk_conn.get_children(key)
if not children: if not children:
@ -133,6 +138,7 @@ def renamekey(zk_conn, kv):
except Exception: except Exception:
return False return False
# Write lock function # Write lock function
def writelock(zk_conn, key): def writelock(zk_conn, key):
count = 1 count = 1
@ -149,6 +155,7 @@ def writelock(zk_conn, key):
continue continue
return lock return lock
# Read lock function # Read lock function
def readlock(zk_conn, key): def readlock(zk_conn, key):
count = 1 count = 1
@ -165,6 +172,7 @@ def readlock(zk_conn, key):
continue continue
return lock return lock
# Exclusive lock function # Exclusive lock function
def exclusivelock(zk_conn, key): def exclusivelock(zk_conn, key):
count = 1 count = 1