Compare commits

..

15 Commits

Author SHA1 Message Date
0ccfc41398 Bump version to 0.9.13 2021-02-17 11:37:59 -05:00
ab05e0f3db Add B suffix back to bare bytes values 2021-02-17 11:37:36 -05:00
9291ce6ffc Correct output of fromhuman and add size compare
Ensures that the bytes_tohuman returns an integer to avoid the hacky
workaround of stripping off the B.

Adds a verification on the size of a new volume, that it is not larger
than the free space of the pool to prevent errors/excessively-large
volumes from being created.

Closes #120
2021-02-17 11:27:26 -05:00
dd87951642 Fix bad calls in pool element 2021-02-17 11:14:50 -05:00
0e4bece441 Add missing inc/dec of snapshot_count 2021-02-14 17:02:49 -05:00
b33c0ab0e2 Final final fix to snapshot ordering 2021-02-14 16:48:42 -05:00
094d25dafa Increase indent further 2021-02-14 16:43:51 -05:00
150c61d226 Actually fix sorting issue 2021-02-14 16:41:59 -05:00
f1c0c9325d Fix indentation issue with API daemon 2021-02-14 16:41:52 -05:00
26b0a8b5c1 Fix sorting bug with snapshot list 2021-02-14 16:34:43 -05:00
f22f291c8b Add additional field and info to Swagger 2021-02-09 01:49:15 -05:00
9100c63e99 Add stored_bytes to pool stats information 2021-02-09 01:46:01 -05:00
aba567d6c9 Add nice startup banners to both daemons
Add nicer easy-to-find (yay ASCII art) banners for the startup printouts
of both the node and API daemons. Also adds the safe loader to pvcnoded
to prevent hassle messages and a version string in the API daemon file.
2021-02-08 02:51:43 -05:00
0db8fd9da6 Bump version to 0.9.12 2021-01-28 16:29:58 -05:00
a44f134230 Remove systemd deps on zookeeper and libvirt
This caused a serious race condition, since the IPs managed by PVC had
not yet come up, but Zookeeper was trying to start and bind to them,
which of course failed.

Remove these dependencies entirely - the daemon itself starts these
services during initialization and they do not need to be started by
systemd first.
2021-01-28 16:25:02 -05:00
11 changed files with 150 additions and 32 deletions

View File

@ -20,6 +20,20 @@ To get started with PVC, please see the [About](https://parallelvirtualcluster.r
## Changelog
#### v0.9.13
* Adds nicer startup messages for daemons
* Adds additional API field for stored_bytes to pool stats
* Fixes sorting issues with snapshot lists
* Fixes missing increment/decrement of snapshot_count on volumes
* Fixes bad calls in pool element API endpoints
* Fixes inconsistent bytes_tohuman behaviour in daemons
* Adds validation and maximum volume size on creation (must be smaller than the pool free space)
#### v0.9.12
* Fixes a bug in the pvcnoded service unit file causing a Zookeeper startup race condition
#### v0.9.11
* Documentation updates

View File

@ -26,10 +26,31 @@ import pvcapid.flaskapi as pvc_api
# Entrypoint
##########################################################
# Version string for startup output
version = '0.9.11'
if pvc_api.config['ssl_enabled']:
context = (pvc_api.config['ssl_cert_file'], pvc_api.config['ssl_key_file'])
else:
context = None
print('Starting PVC API daemon at {}:{} with SSL={}, Authentication={}'.format(pvc_api.config['listen_address'], pvc_api.config['listen_port'], pvc_api.config['ssl_enabled'], pvc_api.config['auth_enabled']))
# Print our startup messages
print('')
print('|--------------------------------------------------|')
print('| ######## ## ## ###### |')
print('| ## ## ## ## ## ## |')
print('| ## ## ## ## ## |')
print('| ######## ## ## ## |')
print('| ## ## ## ## |')
print('| ## ## ## ## ## |')
print('| ## ### ###### |')
print('|--------------------------------------------------|')
print('| Parallel Virtual Cluster API daemon v{0: <11} |'.format(version))
print('| API version: v{0: <34} |'.format(pvc_api.API_VERSION))
print('| Listen: {0: <40} |'.format('{}:{}'.format(pvc_api.config['listen_address'], pvc_api.config['listen_port'])))
print('| SSL: {0: <43} |'.format(str(pvc_api.config['ssl_enabled'])))
print('| Authentication: {0: <32} |'.format(str(pvc_api.config['auth_enabled'])))
print('|--------------------------------------------------|')
print('')
pvc_api.app.run(pvc_api.config['listen_address'], pvc_api.config['listen_port'], threaded=True, ssl_context=context)

View File

@ -52,16 +52,16 @@ def strtobool(stringv):
# Parse the configuration file
try:
pvc_config_file = os.environ['PVC_CONFIG_FILE']
pvcapid_config_file = os.environ['PVC_CONFIG_FILE']
except Exception:
print('Error: The "PVC_CONFIG_FILE" environment variable must be set before starting pvcapid.')
exit(1)
print('Starting PVC API daemon')
print('Loading configuration from file "{}"'.format(pvcapid_config_file))
# Read in the config
try:
with open(pvc_config_file, 'r') as cfgfile:
with open(pvcapid_config_file, 'r') as cfgfile:
o_config = yaml.load(cfgfile, Loader=yaml.BaseLoader)
except Exception as e:
print('ERROR: Failed to parse configuration file: {}'.format(e))
@ -3337,12 +3337,15 @@ class API_Storage_Ceph_Pool_Root(Resource):
id:
type: integer
description: The Ceph pool ID
stored_bytes:
type: integer
description: The stored data size (in bytes, post-replicas)
free_bytes:
type: integer
description: The total free space (in bytes)
description: The total free space (in bytes. post-replicas)
used_bytes:
type: integer
description: The total used space (in bytes)
description: The total used space (in bytes, pre-replicas)
used_percent:
type: number
description: The ratio of used space to free space
@ -3464,7 +3467,7 @@ class API_Storage_Ceph_Pool_Element(Resource):
type: object
id: Message
"""
return api_helper, api_helper.ceph_pool_list(
return api_helper.ceph_pool_list(
pool,
is_fuzzy=False
)
@ -3508,7 +3511,7 @@ class API_Storage_Ceph_Pool_Element(Resource):
type: object
id: Message
"""
api_helper.ceph_pool_add(
return api_helper.ceph_pool_add(
pool,
reqargs.get('pgs', None),
reqargs.get('replcfg', None)

View File

@ -177,8 +177,8 @@ def upload_ova(pool, name, ova_size):
pvc_common.stopZKConnection(zk_conn)
# Normalize the OVA size to bytes
ova_size_bytes = int(pvc_ceph.format_bytes_fromhuman(ova_size)[:-1])
ova_size = pvc_ceph.format_bytes_fromhuman(ova_size)
ova_size_bytes = pvc_ceph.format_bytes_fromhuman(ova_size)
ova_size = '{}B'.format(ova_size_bytes)
# Verify that the cluster has enough space to store the OVA volumes (2x OVA size, temporarily, 1x permanently)
zk_conn = pvc_common.startZKConnection(config['coordinators'])
@ -274,7 +274,7 @@ def upload_ova(pool, name, ova_size):
vm_volume_size = disk.get('capacity')
# Normalize the dev size to bytes
dev_size = pvc_ceph.format_bytes_fromhuman(dev_size_raw)
dev_size = '{}B'.format(pvc_ceph.format_bytes_fromhuman(dev_size_raw))
def cleanup_img_maps():
zk_conn = pvc_common.startZKConnection(config['coordinators'])
@ -368,7 +368,7 @@ def upload_ova(pool, name, ova_size):
vm_volume_size = disk.get('capacity')
# The function always return XXXXB, so strip off the B and convert to an integer
vm_volume_size_bytes = int(pvc_ceph.format_bytes_fromhuman(vm_volume_size)[:-1])
vm_volume_size_bytes = pvc_ceph.format_bytes_fromhuman(vm_volume_size)
vm_volume_size_gb = math.ceil(vm_volume_size_bytes / 1024 / 1024 / 1024)
query = "INSERT INTO ova_volume (ova, pool, volume_name, volume_format, disk_id, disk_size_gb) VALUES (%s, %s, %s, %s, %s, %s);"

View File

@ -17,6 +17,7 @@ $EDITOR ${changelog_file}
changelog="$( cat ${changelog_file} | grep -v '^#' | sed 's/^*/ */' )"
sed -i "s,version = '${current_version}',version = '${new_version}'," node-daemon/pvcnoded/Daemon.py
sed -i "s,version = '${current_version}',version = '${new_version}'," api-daemon/pvcapid/Daemon.py
readme_tmpdir=$( mktemp -d )
cp README.md ${readme_tmpdir}/

View File

@ -122,7 +122,7 @@ def format_bytes_fromhuman(datahuman):
dataunit = 'B'
datasize = int(datahuman)
databytes = datasize * byte_unit_matrix[dataunit]
return '{}B'.format(databytes)
return databytes
# Format ops sizes to/from human-readable units
@ -475,7 +475,17 @@ def getVolumeInformation(zk_conn, pool, volume):
def add_volume(zk_conn, pool, name, size):
# 1. Create the volume
# 1. Verify the size of the volume
pool_information = getPoolInformation(zk_conn, pool)
size_bytes = format_bytes_fromhuman(size)
if size_bytes >= int(pool_information['stats']['free_bytes']):
return False, 'ERROR: Requested volume size is greater than the available free space in the pool'
# Add 'B' if the volume is in bytes
if re.match(r'^[0-9]+$', size):
size = '{}B'.format(size)
# 2. Create the volume
retcode, stdout, stderr = common.run_os_command('rbd create --size {} --image-feature layering,exclusive-lock {}/{}'.format(size, pool, name))
if retcode:
return False, 'ERROR: Failed to create RBD volume "{}": {}'.format(name, stderr)
@ -545,7 +555,7 @@ def resize_volume(zk_conn, pool, name, size):
target_lv_conn = libvirt.open(dest_lv)
target_vm_conn = target_lv_conn.lookupByName(vm_info['name'])
if target_vm_conn:
target_vm_conn.blockResize(volume_id, int(format_bytes_fromhuman(size)[:-1]), libvirt.VIR_DOMAIN_BLOCK_RESIZE_BYTES)
target_vm_conn.blockResize(volume_id, format_bytes_fromhuman(size), libvirt.VIR_DOMAIN_BLOCK_RESIZE_BYTES)
target_lv_conn.close()
except Exception:
pass
@ -715,6 +725,16 @@ def add_snapshot(zk_conn, pool, volume, name):
'/ceph/snapshots/{}/{}/{}/stats'.format(pool, volume, name): '{}'
})
# 3. Update the count of snapshots on this volume
volume_stats_raw = zkhandler.readdata(zk_conn, '/ceph/volumes/{}/{}/stats'.format(pool, volume))
volume_stats = dict(json.loads(volume_stats_raw))
# Format the size to something nicer
volume_stats['snapshot_count'] = volume_stats['snapshot_count'] + 1
volume_stats_raw = json.dumps(volume_stats)
zkhandler.writedata(zk_conn, {
'/ceph/volumes/{}/{}/stats'.format(pool, volume): volume_stats_raw
})
return True, 'Created RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
@ -751,6 +771,16 @@ def remove_snapshot(zk_conn, pool, volume, name):
# 2. Delete snapshot from Zookeeper
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}/{}'.format(pool, volume, name))
# 3. Update the count of snapshots on this volume
volume_stats_raw = zkhandler.readdata(zk_conn, '/ceph/volumes/{}/{}/stats'.format(pool, volume))
volume_stats = dict(json.loads(volume_stats_raw))
# Format the size to something nicer
volume_stats['snapshot_count'] = volume_stats['snapshot_count'] - 1
volume_stats_raw = json.dumps(volume_stats)
zkhandler.writedata(zk_conn, {
'/ceph/volumes/{}/{}/stats'.format(pool, volume): volume_stats_raw
})
return True, 'Removed RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
@ -783,4 +813,4 @@ def get_list_snapshot(zk_conn, pool, volume, limit, is_fuzzy=True):
else:
snapshot_list.append({'pool': pool_name, 'volume': volume_name, 'snapshot': snapshot_name})
return True, sorted(snapshot_list, key=lambda x: int(x['id']))
return True, sorted(snapshot_list, key=lambda x: str(x['snapshot']))

18
debian/changelog vendored
View File

@ -1,3 +1,21 @@
pvc (0.9.13-0) unstable; urgency=high
* Adds nicer startup messages for daemons
* Adds additional API field for stored_bytes to pool stats
* Fixes sorting issues with snapshot lists
* Fixes missing increment/decrement of snapshot_count on volumes
* Fixes bad calls in pool element API endpoints
* Fixes inconsistent bytes_tohuman behaviour in daemons
* Adds validation and maximum volume size on creation (must be smaller than the pool free space)
-- Joshua M. Boniface <joshua@boniface.me> Wed, 17 Feb 2021 11:33:28 -0500
pvc (0.9.12-0) unstable; urgency=high
* Fixes a bug in the pvcnoded service unit file causing a Zookeeper startup race condition
-- Joshua M. Boniface <joshua@boniface.me> Thu, 28 Jan 2021 16:29:58 -0500
pvc (0.9.11-0) unstable; urgency=high
* Documentation updates

View File

@ -18,6 +18,20 @@ To get started with PVC, please see the [About](https://parallelvirtualcluster.r
## Changelog
#### v0.9.13
* Adds nicer startup messages for daemons
* Adds additional API field for stored_bytes to pool stats
* Fixes sorting issues with snapshot lists
* Fixes missing increment/decrement of snapshot_count on volumes
* Fixes bad calls in pool element API endpoints
* Fixes inconsistent bytes_tohuman behaviour in daemons
* Adds validation and maximum volume size on creation (must be smaller than the pool free space)
#### v0.9.12
* Fixes a bug in the pvcnoded service unit file causing a Zookeeper startup race condition
#### v0.9.11
* Documentation updates

View File

@ -621,7 +621,7 @@
"stats": {
"properties": {
"free_bytes": {
"description": "The total free space (in bytes)",
"description": "The total free space (in bytes. post-replicas)",
"type": "integer"
},
"id": {
@ -660,8 +660,12 @@
"description": "The total read operations on the pool (pool-lifetime)",
"type": "integer"
},
"stored_bytes": {
"description": "The stored data size (in bytes, post-replicas)",
"type": "integer"
},
"used_bytes": {
"description": "The total used space (in bytes)",
"description": "The total used space (in bytes, pre-replicas)",
"type": "integer"
},
"used_percent": {

View File

@ -2,8 +2,7 @@
[Unit]
Description = Parallel Virtual Cluster node daemon
After = network-online.target zookeeper.service libvirtd.service
Wants = zookeeper.service libvirtd.service
After = network-online.target
PartOf = pvc.target
[Service]

View File

@ -54,7 +54,7 @@ import pvcnoded.CephInstance as CephInstance
import pvcnoded.MetadataAPIInstance as MetadataAPIInstance
# Version string for startup output
version = '0.9.11'
version = '0.9.13'
###############################################################################
# PVCD - node daemon startup program
@ -134,7 +134,7 @@ def readConfig(pvcnoded_config_file, myhostname):
with open(pvcnoded_config_file, 'r') as cfgfile:
try:
o_config = yaml.load(cfgfile)
o_config = yaml.load(cfgfile, Loader=yaml.SafeLoader)
except Exception as e:
print('ERROR: Failed to parse configuration file: {}'.format(e))
exit(1)
@ -331,16 +331,29 @@ if not os.path.exists(config['log_directory']):
logger = log.Logger(config)
# Print our startup messages
logger.out('Parallel Virtual Cluster node daemon v{}'.format(version))
logger.out('FQDN: {}'.format(myfqdn))
logger.out('Host: {}'.format(myhostname))
logger.out('ID: {}'.format(mynodeid))
logger.out('IPMI hostname: {}'.format(config['ipmi_hostname']))
logger.out('Machine details:')
logger.out(' CPUs: {}'.format(staticdata[0]))
logger.out(' Arch: {}'.format(staticdata[3]))
logger.out(' OS: {}'.format(staticdata[2]))
logger.out(' Kernel: {}'.format(staticdata[1]))
logger.out('')
logger.out('|--------------------------------------------------|')
logger.out('| ######## ## ## ###### |')
logger.out('| ## ## ## ## ## ## |')
logger.out('| ## ## ## ## ## |')
logger.out('| ######## ## ## ## |')
logger.out('| ## ## ## ## |')
logger.out('| ## ## ## ## ## |')
logger.out('| ## ### ###### |')
logger.out('|--------------------------------------------------|')
logger.out('| Parallel Virtual Cluster node daemon v{0: <10} |'.format(version))
logger.out('| FQDN: {0: <42} |'.format(myfqdn))
logger.out('| Host: {0: <42} |'.format(myhostname))
logger.out('| ID: {0: <44} |'.format(mynodeid))
logger.out('| IPMI hostname: {0: <33} |'.format(config['ipmi_hostname']))
logger.out('| Machine details: |')
logger.out('| CPUs: {0: <40} |'.format(staticdata[0]))
logger.out('| Arch: {0: <40} |'.format(staticdata[3]))
logger.out('| OS: {0: <42} |'.format(staticdata[2]))
logger.out('| Kernel: {0: <38} |'.format(staticdata[1]))
logger.out('|--------------------------------------------------|')
logger.out('')
logger.out('Starting pvcnoded on host {}'.format(myfqdn), state='s')
# Define some colours for future messages if applicable
@ -1142,6 +1155,7 @@ def collect_ceph_stats(queue):
# Assemble a useful data structure
pool_df = {
'id': pool['id'],
'stored_bytes': pool['stats']['stored'],
'free_bytes': pool['stats']['max_avail'],
'used_bytes': pool['stats']['bytes_used'],
'used_percent': pool['stats']['percent_used'],