Correct some bugs around new code

This commit is contained in:
Joshua Boniface 2019-06-19 00:23:14 -04:00
parent 01959cb9e3
commit a4ab3075ab
4 changed files with 47 additions and 19 deletions

View File

@ -1316,6 +1316,16 @@ def ceph_pool_list(limit):
retcode, retmsg = pvc_ceph.get_list_pool(zk_conn, limit)
cleanup(retcode, retmsg, zk_conn)
###############################################################################
# pvc ceph volume
###############################################################################
@click.group(name='volume', short_help='Manage RBD volumes in the PVC storage cluster.', context_settings=CONTEXT_SETTINGS)
def ceph_volume():
"""
Manage the Ceph RBD volumes of the PVC cluster.
"""
pass
###############################################################################
# pvc ceph volume add
###############################################################################
@ -1381,6 +1391,16 @@ def ceph_volume_list(pool, limit):
retcode, retmsg = pvc_ceph.get_list_volume(zk_conn, pool, limit)
cleanup(retcode, retmsg, zk_conn)
###############################################################################
# pvc ceph volume snapshot
###############################################################################
@click.group(name='snapshot', short_help='Manage RBD volume snapshots in the PVC storage cluster.', context_settings=CONTEXT_SETTINGS)
def ceph_volume_snapshot():
"""
Manage the Ceph RBD volume snapshots of the PVC cluster.
"""
pass
###############################################################################
# pvc ceph volume snapshot add
###############################################################################
@ -1394,7 +1414,7 @@ def ceph_volume_list(pool, limit):
@click.argument(
'name'
)
def ceph_snapshot_add(pool, volume, name):
def ceph_volume_snapshot_add(pool, volume, name):
"""
Add a snapshot of Ceph RBD volume VOLUME with name NAME.
"""
@ -1416,7 +1436,7 @@ def ceph_snapshot_add(pool, volume, name):
@click.argument(
'name'
)
def ceph_volume_remove(pool, volume, name):
def ceph_volume_snapshot_remove(pool, volume, name):
"""
Remove a Ceph RBD volume with name NAME from pool POOL.
"""
@ -1443,7 +1463,7 @@ def ceph_volume_remove(pool, volume, name):
@click.argument(
'limit', default=None, required=False
)
def ceph_volume_list(pool, volume, limit):
def ceph_volume_snapshot_list(pool, volume, limit):
"""
List all Ceph RBD volume snapshots, in the cluster or in pool POOL, for all volumes or volume VOLUME; optionally only match elements matching name regex LIMIT.
"""

View File

@ -650,6 +650,8 @@ def init_zookeeper(zk_host):
transaction.create('/ceph', ''.encode('ascii'))
transaction.create('/ceph/osds', ''.encode('ascii'))
transaction.create('/ceph/pools', ''.encode('ascii'))
transaction.create('/ceph/volumes', ''.encode('ascii'))
transaction.create('/ceph/snapshots', ''.encode('ascii'))
transaction.create('/locks', ''.encode('ascii'))
transaction.create('/locks/flush_lock', 'False'.encode('ascii'))
transaction.commit()

View File

@ -1384,7 +1384,7 @@ def remove_snapshot(zk_conn, pool, volume, name):
return False, 'ERROR: No snapshot with name "{}" is present of volume {} on pool {}.'.format(name, volume, pool)
# Tell the cluster to create a new snapshot
remove_snapshot_string = 'snapshot_remove {},{},{}'.format(pool, volume name)
remove_snapshot_string = 'snapshot_remove {},{},{}'.format(pool, volume, name)
zkhandler.writedata(zk_conn, {'/ceph/cmd': remove_snapshot_string})
# Wait 1/2 second for the cluster to get the message and start working
time.sleep(0.5)

View File

@ -235,17 +235,13 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
if not is_osd_up:
break
# 4. Delete OSD from ZK
logger.out('Deleting OSD disk with ID {} from Zookeeper'.format(osd_id), state='i')
zkhandler.deletekey(zk_conn, '/ceph/osds/{}'.format(osd_id))
# 5. Determine the block devices
# 4. Determine the block devices
retcode, stdout, stderr = common.run_os_command('readlink /var/lib/ceph/osd/ceph-{}/block'.format(osd_id))
vg_name = stdout.split('/')[-2] # e.g. /dev/ceph-<uuid>/osd-block-<uuid>
retcode, stdout, stderr = common.run_os_command('vgs --separator , --noheadings -o pv_name {}'.format(vg_name))
pv_block = stdout.strip()
# 6. Zap the volumes
# 5. Zap the volumes
logger.out('Zapping OSD disk with ID {} on {}'.format(osd_id, pv_block), state='i')
retcode, stdout, stderr = common.run_os_command('ceph-volume lvm zap --destroy {}'.format(pv_block))
if retcode:
@ -254,7 +250,7 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
print(stderr)
raise
# 7. Purge the OSD from Ceph
# 6. Purge the OSD from Ceph
logger.out('Purging OSD disk with ID {}'.format(osd_id), state='i')
retcode, stdout, stderr = common.run_os_command('ceph osd purge {} --yes-i-really-mean-it'.format(osd_id))
if retcode:
@ -263,6 +259,10 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
print(stderr)
raise
# 7. Delete OSD from ZK
logger.out('Deleting OSD disk with ID {} from Zookeeper'.format(osd_id), state='i')
zkhandler.deletekey(zk_conn, '/ceph/osds/{}'.format(osd_id))
# Log it
logger.out('Removed OSD disk with ID {}'.format(osd_id), state='o')
return True
@ -414,6 +414,8 @@ def add_pool(zk_conn, logger, name, pgs):
'/ceph/pools/{}'.format(name): '',
'/ceph/pools/{}/pgs'.format(name): pgs,
'/ceph/pools/{}/stats'.format(name): '{}'
'/ceph/volumes/{}'.format(name): '',
'/ceph/snapshots/{}'.format(name): '',
})
# Log it
@ -428,9 +430,6 @@ def remove_pool(zk_conn, logger, name):
# We are ready to create a new pool on this node
logger.out('Removing RBD pool {}'.format(name), state='i')
try:
# Delete pool from ZK
zkhandler.deletekey(zk_conn, '/ceph/pools/{}'.format(name))
# Remove the pool
retcode, stdout, stderr = common.run_os_command('ceph osd pool rm {pool} {pool} --yes-i-really-really-mean-it'.format(pool=name))
if retcode:
@ -439,6 +438,11 @@ def remove_pool(zk_conn, logger, name):
print(stderr)
raise
# Delete pool from ZK
zkhandler.deletekey(zk_conn, '/ceph/pools/{}'.format(name))
zkhandler.deletekey(zk_conn, '/ceph/volumes/{}'.format(name))
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}'.format(name))
# Log it
logger.out('Removed RBD pool {}'.format(name), state='o')
return True
@ -490,7 +494,7 @@ def add_volume(zk_conn, logger, pool, name, size):
# We are ready to create a new volume on this node
logger.out('Creating new RBD volume {} on pool {}'.format(name, pool), state='i')
try:
# 1. Create the volume
# Create the volume
sizeMiB = size * 1024
retcode, stdout, stderr = common.run_os_command('rbd create --size {} {}/{}'.format(sizeMiB, pool, name))
if retcode:
@ -499,11 +503,12 @@ def add_volume(zk_conn, logger, pool, name, size):
print(stderr)
raise
# 2. Add the new volume to ZK
# Add the new volume to ZK
zkhandler.writedata(zk_conn, {
'/ceph/volumes/{}/{}'.format(pool, name): '',
'/ceph/volumes/{}/{}/size'.format(pool, name): size,
'/ceph/volumes/{}/{}/stats'.format(pool, name): '{}'
'/ceph/snapshots/{}/{}'.format(pool, name): '',
})
# Log it
@ -518,9 +523,6 @@ def remove_volume(zk_conn, logger, pool, name):
# We are ready to create a new volume on this node
logger.out('Removing RBD volume {} from pool {}'.format(name, pool), state='i')
try:
# Delete volume from ZK
zkhandler.deletekey(zk_conn, '/ceph/volumes/{}/{}'.format(pool, name))
# Remove the volume
retcode, stdout, stderr = common.run_os_command('rbd rm {}/{}'.format(pool, name))
if retcode:
@ -529,6 +531,10 @@ def remove_volume(zk_conn, logger, pool, name):
print(stderr)
raise
# Delete volume from ZK
zkhandler.deletekey(zk_conn, '/ceph/volumes/{}/{}'.format(pool, name))
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}'.format(pool, name))
# Log it
logger.out('Removed RBD volume {} from pool {}'.format(name, pool), state='o')
return True