parent
eeb8879f73
commit
7ace5b5056
|
@ -205,6 +205,8 @@ def getOutputColoursOSD(osd_information):
|
||||||
|
|
||||||
return osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour
|
return osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour
|
||||||
|
|
||||||
|
# OSD addition and removal uses the /cmd/ceph pipe
|
||||||
|
# These actions must occur on the specific node they reference
|
||||||
def add_osd(zk_conn, node, device, weight):
|
def add_osd(zk_conn, node, device, weight):
|
||||||
# Verify the target node exists
|
# Verify the target node exists
|
||||||
if not common.verifyNode(zk_conn, node):
|
if not common.verifyNode(zk_conn, node):
|
||||||
|
@ -279,118 +281,35 @@ def in_osd(zk_conn, osd_id):
|
||||||
if not verifyOSD(zk_conn, osd_id):
|
if not verifyOSD(zk_conn, osd_id):
|
||||||
return False, 'ERROR: No OSD with ID "{}" is present in the cluster.'.format(osd_id)
|
return False, 'ERROR: No OSD with ID "{}" is present in the cluster.'.format(osd_id)
|
||||||
|
|
||||||
# Tell the cluster to online an OSD
|
retcode, stdout, stderr = common.run_os_command('ceph osd in {}'.format(osd_id))
|
||||||
in_osd_string = 'osd_in {}'.format(osd_id)
|
if retcode:
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': in_osd_string})
|
return False, 'ERROR: Failed to enable OSD {}: {}'.format(osd_id, stderr)
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-osd_in':
|
|
||||||
message = 'Set OSD {} online in the cluster.'.format(osd_id)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to set OSD online; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except:
|
|
||||||
success = False
|
|
||||||
message = 'ERROR Command ignored by node.'
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
return True, 'Set OSD {} online.'.format(osd_id)
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
time.sleep(0.5)
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
|
||||||
|
|
||||||
def out_osd(zk_conn, osd_id):
|
def out_osd(zk_conn, osd_id):
|
||||||
if not verifyOSD(zk_conn, osd_id):
|
if not verifyOSD(zk_conn, osd_id):
|
||||||
return False, 'ERROR: No OSD with ID "{}" is present in the cluster.'.format(osd_id)
|
return False, 'ERROR: No OSD with ID "{}" is present in the cluster.'.format(osd_id)
|
||||||
|
|
||||||
# Tell the cluster to offline an OSD
|
retcode, stdout, stderr = common.run_os_command('ceph osd out {}'.format(osd_id))
|
||||||
out_osd_string = 'osd_out {}'.format(osd_id)
|
if retcode:
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': out_osd_string})
|
return False, 'ERROR: Failed to disable OSD {}: {}'.format(osd_id, stderr)
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-osd_out':
|
|
||||||
message = 'Set OSD {} offline in the cluster.'.format(osd_id)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to set OSD offline; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except:
|
|
||||||
success = False
|
|
||||||
message = 'ERROR Command ignored by node.'
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
return True, 'Set OSD {} offline.'.format(osd_id)
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
time.sleep(0.5)
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
|
||||||
|
|
||||||
def set_osd(zk_conn, option):
|
def set_osd(zk_conn, option):
|
||||||
# Tell the cluster to set an OSD property
|
retcode, stdout, stderr = common.run_os_command('ceph osd set {}'.format(option))
|
||||||
set_osd_string = 'osd_set {}'.format(option)
|
if retcode:
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': set_osd_string})
|
return False, 'ERROR: Failed to set property "{}": {}'.format(option, stderr)
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-osd_set':
|
|
||||||
message = 'Set OSD property {} on the cluster.'.format(option)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to set OSD property; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except:
|
|
||||||
success = False
|
|
||||||
message = 'ERROR Command ignored by node.'
|
|
||||||
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
return True, 'Set OSD property "{}".'.format(option)
|
||||||
return success, message
|
|
||||||
|
|
||||||
def unset_osd(zk_conn, option):
|
def unset_osd(zk_conn, option):
|
||||||
# Tell the cluster to unset an OSD property
|
retcode, stdout, stderr = common.run_os_command('ceph osd unset {}'.format(option))
|
||||||
unset_osd_string = 'osd_unset {}'.format(option)
|
if retcode:
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': unset_osd_string})
|
return False, 'ERROR: Failed to unset property "{}": {}'.format(option, stderr)
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-osd_unset':
|
|
||||||
message = 'Unset OSD property {} on the cluster.'.format(option)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to unset OSD property; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except:
|
|
||||||
success = False
|
|
||||||
message = 'ERROR Command ignored by node.'
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
return True, 'Unset OSD property "{}".'.format(option)
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
time.sleep(0.5)
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
|
||||||
|
|
||||||
def get_list_osd(zk_conn, limit, is_fuzzy=True):
|
def get_list_osd(zk_conn, limit, is_fuzzy=True):
|
||||||
osd_list = []
|
osd_list = []
|
||||||
|
@ -664,65 +583,66 @@ def getPoolInformation(zk_conn, pool):
|
||||||
return pool_information
|
return pool_information
|
||||||
|
|
||||||
def add_pool(zk_conn, name, pgs, replcfg):
|
def add_pool(zk_conn, name, pgs, replcfg):
|
||||||
# Tell the cluster to create a new pool
|
# Prepare the copies/mincopies variables
|
||||||
add_pool_string = 'pool_add {},{},{}'.format(name, pgs, replcfg)
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': add_pool_string})
|
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
try:
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
copies, mincopies = replcfg.split(',')
|
||||||
if result == 'success-pool_add':
|
copies = int(copies.replace('copies=', ''))
|
||||||
message = 'Created new RBD pool "{}" with "{}" PGs and replication configuration {}.'.format(name, pgs, replcfg)
|
mincopies = int(mincopies.replace('mincopies=', ''))
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to create new pool; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except:
|
except:
|
||||||
message = 'ERROR: Command ignored by node.'
|
copies = None
|
||||||
success = False
|
mincopies = None
|
||||||
|
if not copies or not mincopies:
|
||||||
|
return False, 'ERROR: Replication configuration "{}" is not valid.'.format(replcfg)
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
# 1. Create the pool
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
retcode, stdout, stderr = common.run_os_command('ceph osd pool create {} {} replicated'.format(name, pgs))
|
||||||
with lock:
|
if retcode:
|
||||||
time.sleep(0.5)
|
return False, 'ERROR: Failed to create pool "{}" with {} PGs: {}'.format(name, pgs, stderr)
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
# 2. Set the size and minsize
|
||||||
|
retcode, stdout, stderr = common.run_os_command('ceph osd pool set {} size {}'.format(name, copies))
|
||||||
|
if retcode:
|
||||||
|
return False, 'ERROR: Failed to set pool "{}" size of {}: {}'.format(name, copies, stderr)
|
||||||
|
|
||||||
|
retcode, stdout, stderr = common.run_os_command('ceph osd pool set {} min_size {}'.format(name, mincopies))
|
||||||
|
if retcode:
|
||||||
|
return False, 'ERROR: Failed to set pool "{}" minimum size of {}: {}'.format(name, mincopies, stderr)
|
||||||
|
|
||||||
|
# 3. Enable RBD application
|
||||||
|
retcode, stdout, stderr = common.run_os_command('ceph osd pool application enable {} rbd'.format(name))
|
||||||
|
if retcode:
|
||||||
|
return False, 'ERROR: Failed to enable RBD application on pool "{}" : {}'.format(name, stderr)
|
||||||
|
|
||||||
|
# 4. Add the new pool to Zookeeper
|
||||||
|
zkhandler.writedata(zk_conn, {
|
||||||
|
'/ceph/pools/{}'.format(name): '',
|
||||||
|
'/ceph/pools/{}/pgs'.format(name): pgs,
|
||||||
|
'/ceph/pools/{}/stats'.format(name): '{}',
|
||||||
|
'/ceph/volumes/{}'.format(name): '',
|
||||||
|
'/ceph/snapshots/{}'.format(name): '',
|
||||||
|
})
|
||||||
|
|
||||||
|
return True, 'Created RBD pool "{}" with {} PGs'.format(name, pgs)
|
||||||
|
|
||||||
def remove_pool(zk_conn, name):
|
def remove_pool(zk_conn, name):
|
||||||
if not verifyPool(zk_conn, name):
|
if not verifyPool(zk_conn, name):
|
||||||
return False, 'ERROR: No pool with name "{}" is present in the cluster.'.format(name)
|
return False, 'ERROR: No pool with name "{}" is present in the cluster.'.format(name)
|
||||||
|
|
||||||
# Tell the cluster to create a new pool
|
# 1. Remove pool volumes
|
||||||
remove_pool_string = 'pool_remove {}'.format(name)
|
for volume in zkhandler.listchildren(zk_conn, '/ceph/volumes/{}'.format(name)):
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': remove_pool_string})
|
remove_volume(zk_conn, logger, name, volume)
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-pool_remove':
|
|
||||||
message = 'Removed RBD pool "{}" and all volumes.'.format(name)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to remove pool; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except Exception as e:
|
|
||||||
message = 'ERROR: Command ignored by node: {}'.format(e)
|
|
||||||
success = False
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
# 2. Remove the pool
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
retcode, stdout, stderr = common.run_os_command('ceph osd pool rm {pool} {pool} --yes-i-really-really-mean-it'.format(pool=name))
|
||||||
with lock:
|
if retcode:
|
||||||
time.sleep(0.5)
|
return False, 'ERROR: Failed to remove pool "{}": {}'.format(name, stderr)
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
# 3. Delete pool from Zookeeper
|
||||||
|
zkhandler.deletekey(zk_conn, '/ceph/pools/{}'.format(name))
|
||||||
|
zkhandler.deletekey(zk_conn, '/ceph/volumes/{}'.format(name))
|
||||||
|
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}'.format(name))
|
||||||
|
|
||||||
|
return True, 'Removed RBD pool "{}" and all volumes.'.format(name)
|
||||||
|
|
||||||
def get_list_pool(zk_conn, limit, is_fuzzy=True):
|
def get_list_pool(zk_conn, limit, is_fuzzy=True):
|
||||||
pool_list = []
|
pool_list = []
|
||||||
|
@ -967,154 +887,112 @@ def getVolumeInformation(zk_conn, pool, volume):
|
||||||
return volume_information
|
return volume_information
|
||||||
|
|
||||||
def add_volume(zk_conn, pool, name, size):
|
def add_volume(zk_conn, pool, name, size):
|
||||||
# Tell the cluster to create a new volume
|
# 1. Create the volume
|
||||||
databytes = format_bytes_fromhuman(size)
|
retcode, stdout, stderr = common.run_os_command('rbd create --size {} --image-feature layering,exclusive-lock {}/{}'.format(size, pool, name))
|
||||||
add_volume_string = 'volume_add {},{},{}'.format(pool, name, databytes)
|
if retcode:
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': add_volume_string})
|
return False, 'ERROR: Failed to create RBD volume "{}": {}'.format(name, stderr)
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-volume_add':
|
|
||||||
message = 'Created new RBD volume "{}" of size "{}" on pool "{}".'.format(name, size, pool)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to create new volume; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except:
|
|
||||||
message = 'ERROR: Command ignored by node.'
|
|
||||||
success = False
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
# 2. Get volume stats
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, name))
|
||||||
with lock:
|
volstats = stdout
|
||||||
time.sleep(0.5)
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
# 3. Add the new volume to Zookeeper
|
||||||
|
zkhandler.writedata(zk_conn, {
|
||||||
|
'/ceph/volumes/{}/{}'.format(pool, name): '',
|
||||||
|
'/ceph/volumes/{}/{}/stats'.format(pool, name): volstats,
|
||||||
|
'/ceph/snapshots/{}/{}'.format(pool, name): '',
|
||||||
|
})
|
||||||
|
|
||||||
|
return True, 'Created RBD volume "{}/{}" ({})'.format(pool, name, size)
|
||||||
|
|
||||||
|
def clone_volume(zk_conn, pool, name_src, name_new):
|
||||||
|
if not verifyVolume(zk_conn, pool, name_src):
|
||||||
|
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name_src, pool)
|
||||||
|
|
||||||
|
# 1. Clone the volume
|
||||||
|
retcode, stdout, stderr = common.run_os_command('rbd copy {}/{} {}/{}'.format(pool, name_src, pool, name_new))
|
||||||
|
if retcode:
|
||||||
|
return False, 'ERROR: Failed to clone RBD volume "{}" to "{}" in pool "{}": {}'.format(name_src, new_name, pool, stderr)
|
||||||
|
|
||||||
|
# 2. Get volume stats
|
||||||
|
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, name_new))
|
||||||
|
volstats = stdout
|
||||||
|
|
||||||
|
# 3. Add the new volume to Zookeeper
|
||||||
|
zkhandler.writedata(zk_conn, {
|
||||||
|
'/ceph/volumes/{}/{}'.format(pool, name_new): '',
|
||||||
|
'/ceph/volumes/{}/{}/stats'.format(pool, name_new): volstats,
|
||||||
|
'/ceph/snapshots/{}/{}'.format(pool, name_new): '',
|
||||||
|
})
|
||||||
|
|
||||||
|
return True, 'Cloned RBD volume "{}" to "{}" in pool "{}"'.format(name, name_new, pool)
|
||||||
|
|
||||||
def resize_volume(zk_conn, pool, name, size):
|
def resize_volume(zk_conn, pool, name, size):
|
||||||
# Tell the cluster to resize the volume
|
if not verifyVolume(zk_conn, pool, name):
|
||||||
databytes = format_bytes_fromhuman(size)
|
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool)
|
||||||
resize_volume_string = 'volume_resize {},{},{}'.format(pool, name, databytes)
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': resize_volume_string})
|
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-volume_resize':
|
|
||||||
message = 'Resized RBD volume "{}" to size "{}" on pool "{}".'.format(name, size, pool)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to resize volume; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except:
|
|
||||||
message = 'ERROR: Command ignored by node.'
|
|
||||||
success = False
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
# 1. Resize the volume
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
retcode, stdout, stderr = common.run_os_command('rbd resize --size {} {}/{}'.format(size, pool, name))
|
||||||
with lock:
|
if retcode:
|
||||||
time.sleep(0.5)
|
return False, 'ERROR: Failed to resize RBD volume "{}" to size "{}" in pool "{}": {}'.format(name, size, pool, stderr)
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
# 2. Get volume stats
|
||||||
|
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, name))
|
||||||
|
volstats = stdout
|
||||||
|
|
||||||
|
# 3. Add the new volume to Zookeeper
|
||||||
|
zkhandler.writedata(zk_conn, {
|
||||||
|
'/ceph/volumes/{}/{}'.format(pool, name): '',
|
||||||
|
'/ceph/volumes/{}/{}/stats'.format(pool, name): volstats,
|
||||||
|
'/ceph/snapshots/{}/{}'.format(pool, name): '',
|
||||||
|
})
|
||||||
|
|
||||||
|
return True, 'Resized RBD volume "{}" to size "{}" in pool "{}".'.format(name, size, pool)
|
||||||
|
|
||||||
def rename_volume(zk_conn, pool, name, new_name):
|
def rename_volume(zk_conn, pool, name, new_name):
|
||||||
# Tell the cluster to rename
|
if not verifyVolume(zk_conn, pool, name):
|
||||||
rename_volume_string = 'volume_rename {},{},{}'.format(pool, name, new_name)
|
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool)
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': rename_volume_string})
|
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-volume_rename':
|
|
||||||
message = 'Renamed RBD volume "{}" to "{}" on pool "{}".'.format(name, new_name, pool)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to rename volume {} to {}; check node logs for details.'.format(name, new_name)
|
|
||||||
success = False
|
|
||||||
except:
|
|
||||||
message = 'ERROR: Command ignored by node.'
|
|
||||||
success = False
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
# 1. Rename the volume
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
retcode, stdout, stderr = common.run_os_command('rbd rename {}/{} {}'.format(pool, name, new_name))
|
||||||
with lock:
|
if retcode:
|
||||||
time.sleep(0.5)
|
return False, 'ERROR: Failed to rename volume "{}" to "{}" in pool "{}": {}'.format(name, new_name, pool, stderr)
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
# 2. Rename the volume in Zookeeper
|
||||||
|
zkhandler.renamekey(zk_conn, {
|
||||||
|
'/ceph/volumes/{}/{}'.format(pool, name): '/ceph/volumes/{}/{}'.format(pool, new_name),
|
||||||
|
'/ceph/snapshots/{}/{}'.format(pool, name): '/ceph/snapshots/{}/{}'.format(pool, new_name),
|
||||||
|
})
|
||||||
|
|
||||||
def clone_volume(zk_conn, pool, name, new_name):
|
# 3. Get volume stats
|
||||||
# Tell the cluster to clone
|
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, new_name))
|
||||||
clone_volume_string = 'volume_clone {},{},{}'.format(pool, name, new_name)
|
volstats = stdout
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': clone_volume_string})
|
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-volume_clone':
|
|
||||||
message = 'Cloned RBD volume "{}" to "{}" on pool "{}".'.format(name, new_name, pool)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to clone volume {} to {}; check node logs for details.'.format(name, new_name)
|
|
||||||
success = False
|
|
||||||
except:
|
|
||||||
message = 'ERROR: Command ignored by node.'
|
|
||||||
success = False
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
# 4. Update the volume stats in Zookeeper
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
zkhandler.writedata(zk_conn, {
|
||||||
with lock:
|
'/ceph/volumes/{}/{}/stats'.format(pool, new_name): volstats,
|
||||||
time.sleep(0.5)
|
})
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
return True, 'Renamed RBD volume "{}" to "{}" in pool "{}".'.format(name, new_name, pool)
|
||||||
|
|
||||||
def remove_volume(zk_conn, pool, name):
|
def remove_volume(zk_conn, pool, name):
|
||||||
if not verifyVolume(zk_conn, pool, name):
|
if not verifyVolume(zk_conn, pool, name):
|
||||||
return False, 'ERROR: No volume with name "{}" is present in pool {}.'.format(name, pool)
|
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool)
|
||||||
|
|
||||||
# Tell the cluster to create a new volume
|
# 1. Remove volume snapshots
|
||||||
remove_volume_string = 'volume_remove {},{}'.format(pool, name)
|
for snapshot in zkhandler.listchildren(zk_conn, '/ceph/snapshots/{}/{}'.format(pool, name)):
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': remove_volume_string})
|
remove_snapshot(zk_conn, logger, pool, volume, snapshot)
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-volume_remove':
|
|
||||||
message = 'Removed RBD volume "{}" in pool "{}".'.format(name, pool)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to remove volume; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except Exception as e:
|
|
||||||
message = 'ERROR: Command ignored by node: {}'.format(e)
|
|
||||||
success = False
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
# 2. Remove the volume
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
retcode, stdout, stderr = common.run_os_command('rbd rm {}/{}'.format(pool, name))
|
||||||
with lock:
|
if retcode:
|
||||||
time.sleep(0.5)
|
return False, 'ERROR: Failed to remove RBD volume "{}" in pool "{}": {}'.format(name, pool, stderr)
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
# 3. Delete volume from Zookeeper
|
||||||
|
zkhandler.deletekey(zk_conn, '/ceph/volumes/{}/{}'.format(pool, name))
|
||||||
|
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}'.format(pool, name))
|
||||||
|
|
||||||
|
return True, 'Removed RBD volume "{}" in pool "{}".'.format(name, pool)
|
||||||
|
|
||||||
def get_list_volume(zk_conn, pool, limit, is_fuzzy=True):
|
def get_list_volume(zk_conn, pool, limit, is_fuzzy=True):
|
||||||
volume_list = []
|
volume_list = []
|
||||||
|
@ -1276,94 +1154,55 @@ def getCephSnapshots(zk_conn, pool, volume):
|
||||||
return snapshot_list
|
return snapshot_list
|
||||||
|
|
||||||
def add_snapshot(zk_conn, pool, volume, name):
|
def add_snapshot(zk_conn, pool, volume, name):
|
||||||
# Tell the cluster to create a new snapshot
|
if not verifyVolume(zk_conn, pool, volume):
|
||||||
add_snapshot_string = 'snapshot_add {},{},{}'.format(pool, volume, name)
|
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(volume, pool)
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': add_snapshot_string})
|
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-snapshot_add':
|
|
||||||
message = 'Created new RBD snapshot "{}" of volume "{}" on pool "{}".'.format(name, volume, pool)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to create new snapshot; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except:
|
|
||||||
message = 'ERROR: Command ignored by node.'
|
|
||||||
success = False
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
# 1. Create the snapshot
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
retcode, stdout, stderr = common.run_os_command('rbd snap create {}/{}@{}'.format(pool, volume, name))
|
||||||
with lock:
|
if retcode:
|
||||||
time.sleep(0.5)
|
return False, 'ERROR: Failed to create RBD snapshot "{}" of volume "{}" in pool "{}": {}'.format(name, volume, pool, stderr)
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
# 2. Add the snapshot to Zookeeper
|
||||||
|
zkhandler.writedata(zk_conn, {
|
||||||
|
'/ceph/snapshots/{}/{}/{}'.format(pool, volume, name): '',
|
||||||
|
'/ceph/snapshots/{}/{}/{}/stats'.format(pool, volume, name): '{}'
|
||||||
|
})
|
||||||
|
|
||||||
|
return True, 'Created RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
|
||||||
|
|
||||||
def rename_snapshot(zk_conn, pool, volume, name, new_name):
|
def rename_snapshot(zk_conn, pool, volume, name, new_name):
|
||||||
# Tell the cluster to rename
|
if not verifyVolume(zk_conn, pool, volume):
|
||||||
rename_snapshot_string = 'snapshot_rename {},{},{}'.format(pool, name, new_name)
|
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(volume, pool)
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': rename_snapshot_string})
|
if not verifySnapshot(zk_conn, pool, volume, name):
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
return False, 'ERROR: No snapshot with name "{}" is present for volume "{}" in pool "{}".'.format(name, volume, pool)
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-snapshot_rename':
|
|
||||||
message = 'Renamed RBD volume snapshot "{}" to "{}" for volume {} on pool "{}".'.format(name, new_name, volume, pool)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to rename volume {} to {}; check node logs for details.'.format(name, new_name)
|
|
||||||
success = False
|
|
||||||
except:
|
|
||||||
message = 'ERROR: Command ignored by node.'
|
|
||||||
success = False
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
# 1. Rename the snapshot
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
retcode, stdout, stderr = common.run_os_command('rbd snap rename {}/{}@{} {}'.format(pool, volume, name, new_name))
|
||||||
with lock:
|
if retcode:
|
||||||
time.sleep(0.5)
|
return False, 'ERROR: Failed to rename RBD snapshot "{}" to "{}" for volume "{}" in pool "{}": {}'.format(name, new_name, volume, pool, stderr)
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
# 2. Rename the snapshot in ZK
|
||||||
|
zkhandler.renamekey(zk_conn, {
|
||||||
|
'/ceph/snapshots/{}/{}/{}'.format(pool, volume, name): '/ceph/snapshots/{}/{}/{}'.format(pool, volume, new_name)
|
||||||
|
})
|
||||||
|
|
||||||
|
return True, 'Renamed RBD snapshot "{}" to "{}" for volume "{}" in pool "{}".'.format(name, new_name, volume, pool)
|
||||||
|
|
||||||
def remove_snapshot(zk_conn, pool, volume, name):
|
def remove_snapshot(zk_conn, pool, volume, name):
|
||||||
|
if not verifyVolume(zk_conn, pool, volume):
|
||||||
|
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(volume, pool)
|
||||||
if not verifySnapshot(zk_conn, pool, volume, name):
|
if not verifySnapshot(zk_conn, pool, volume, name):
|
||||||
return False, 'ERROR: No snapshot with name "{}" is present of volume {} on pool {}.'.format(name, volume, pool)
|
return False, 'ERROR: No snapshot with name "{}" is present of volume {} in pool {}.'.format(name, volume, pool)
|
||||||
|
|
||||||
# Tell the cluster to create a new snapshot
|
# 1. Remove the snapshot
|
||||||
remove_snapshot_string = 'snapshot_remove {},{},{}'.format(pool, volume, name)
|
retcode, stdout, stderr = common.run_os_command('rbd snap rm {}/{}@{}'.format(pool, volume, name))
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': remove_snapshot_string})
|
if retcode:
|
||||||
# Wait 1/2 second for the cluster to get the message and start working
|
return False, 'Failed to remove RBD snapshot "{}" of volume "{}" in pool "{}": {}'.format(name, volume, pool, stderr)
|
||||||
time.sleep(0.5)
|
|
||||||
# Acquire a read lock, so we get the return exclusively
|
|
||||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
|
||||||
with lock:
|
|
||||||
try:
|
|
||||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
|
||||||
if result == 'success-snapshot_remove':
|
|
||||||
message = 'Removed RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
|
|
||||||
success = True
|
|
||||||
else:
|
|
||||||
message = 'ERROR: Failed to remove snapshot; check node logs for details.'
|
|
||||||
success = False
|
|
||||||
except Exception as e:
|
|
||||||
message = 'ERROR: Command ignored by node: {}'.format(e)
|
|
||||||
success = False
|
|
||||||
|
|
||||||
# Acquire a write lock to ensure things go smoothly
|
# 2. Delete snapshot from Zookeeper
|
||||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}/{}'.format(pool, volume, name))
|
||||||
with lock:
|
|
||||||
time.sleep(0.5)
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
|
||||||
|
|
||||||
return success, message
|
return True, 'Removed RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
|
||||||
|
|
||||||
def get_list_snapshot(zk_conn, pool, volume, limit, is_fuzzy=True):
|
def get_list_snapshot(zk_conn, pool, volume, limit, is_fuzzy=True):
|
||||||
snapshot_list = []
|
snapshot_list = []
|
||||||
|
|
|
@ -283,86 +283,6 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
|
||||||
logger.out('Failed to purge OSD disk with ID {}: {}'.format(osd_id, e), state='e')
|
logger.out('Failed to purge OSD disk with ID {}: {}'.format(osd_id, e), state='e')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def in_osd(zk_conn, logger, osd_id):
|
|
||||||
# We are ready to create a new pool on this node
|
|
||||||
logger.out('Setting OSD {} in'.format(osd_id), state='i')
|
|
||||||
try:
|
|
||||||
# 1. Set it in
|
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd in {}'.format(osd_id))
|
|
||||||
if retcode:
|
|
||||||
print('ceph osd in')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Set OSD {} in'.format(osd_id), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to set OSD {} in: {}'.format(osd_id, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
def out_osd(zk_conn, logger, osd_id):
|
|
||||||
# We are ready to create a new pool on this node
|
|
||||||
logger.out('Settoutg OSD {} out'.format(osd_id), state='i')
|
|
||||||
try:
|
|
||||||
# 1. Set it out
|
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd out {}'.format(osd_id))
|
|
||||||
if retcode:
|
|
||||||
proutt('ceph osd out')
|
|
||||||
proutt(stdout)
|
|
||||||
proutt(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Set OSD {} out'.format(osd_id), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to set OSD {} out: {}'.format(osd_id, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
def set_property(zk_conn, logger, option):
|
|
||||||
# We are ready to create a new pool on this node
|
|
||||||
logger.out('Setting OSD property {}'.format(option), state='i')
|
|
||||||
try:
|
|
||||||
# 1. Set it in
|
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd set {}'.format(option))
|
|
||||||
if retcode:
|
|
||||||
prsett('ceph osd set')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Set OSD property {}'.format(option), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to set OSD property {}: {}'.format(option, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
def unset_property(zk_conn, logger, option):
|
|
||||||
# We are ready to create a new pool on this node
|
|
||||||
logger.out('Unsetting OSD property {}'.format(option), state='i')
|
|
||||||
try:
|
|
||||||
# 1. Set it in
|
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd unset {}'.format(option))
|
|
||||||
if retcode:
|
|
||||||
prunsett('ceph osd unset')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Unset OSD property {}'.format(option), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to unset OSD property {}: {}'.format(option, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
class CephPoolInstance(object):
|
class CephPoolInstance(object):
|
||||||
def __init__(self, zk_conn, this_node, name):
|
def __init__(self, zk_conn, this_node, name):
|
||||||
self.zk_conn = zk_conn
|
self.zk_conn = zk_conn
|
||||||
|
@ -401,86 +321,6 @@ class CephPoolInstance(object):
|
||||||
if data and data != self.stats:
|
if data and data != self.stats:
|
||||||
self.stats = json.loads(data)
|
self.stats = json.loads(data)
|
||||||
|
|
||||||
def add_pool(zk_conn, logger, name, pgs, copies, mincopies):
|
|
||||||
# We are ready to create a new pool on this node
|
|
||||||
logger.out('Creating new RBD pool {}'.format(name), state='i')
|
|
||||||
try:
|
|
||||||
# 1. Create the pool
|
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd pool create {} {} replicated'.format(name, pgs))
|
|
||||||
if retcode:
|
|
||||||
print('ceph osd pool create')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# 2. Set the size and min_size
|
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd pool set {} size {}'.format(name, copies))
|
|
||||||
if retcode:
|
|
||||||
print('ceph osd pool set size')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd pool set {} min_size {}'.format(name, mincopies))
|
|
||||||
if retcode:
|
|
||||||
print('ceph osd pool set min_size')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# 3. Enable RBD application
|
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd pool application enable {} rbd'.format(name))
|
|
||||||
if retcode:
|
|
||||||
print('ceph osd pool application enable')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# 4. Add the new pool to ZK
|
|
||||||
zkhandler.writedata(zk_conn, {
|
|
||||||
'/ceph/pools/{}'.format(name): '',
|
|
||||||
'/ceph/pools/{}/pgs'.format(name): pgs,
|
|
||||||
'/ceph/pools/{}/stats'.format(name): '{}',
|
|
||||||
'/ceph/volumes/{}'.format(name): '',
|
|
||||||
'/ceph/snapshots/{}'.format(name): '',
|
|
||||||
})
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Created new RBD pool {}'.format(name), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to create new RBD pool {}: {}'.format(name, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
def remove_pool(zk_conn, logger, name):
|
|
||||||
# We are ready to create a new pool on this node
|
|
||||||
logger.out('Removing RBD pool {}'.format(name), state='i')
|
|
||||||
try:
|
|
||||||
# Remove pool volumes first
|
|
||||||
for volume in zkhandler.listchildren(zk_conn, '/ceph/volumes/{}'.format(name)):
|
|
||||||
remove_volume(zk_conn, logger, name, volume)
|
|
||||||
|
|
||||||
# Remove the pool
|
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd pool rm {pool} {pool} --yes-i-really-really-mean-it'.format(pool=name))
|
|
||||||
if retcode:
|
|
||||||
print('ceph osd pool rm')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Delete pool from ZK
|
|
||||||
zkhandler.deletekey(zk_conn, '/ceph/pools/{}'.format(name))
|
|
||||||
zkhandler.deletekey(zk_conn, '/ceph/volumes/{}'.format(name))
|
|
||||||
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}'.format(name))
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Removed RBD pool {}'.format(name), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to remove RBD pool {}: {}'.format(name, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
class CephVolumeInstance(object):
|
class CephVolumeInstance(object):
|
||||||
def __init__(self, zk_conn, this_node, pool, name):
|
def __init__(self, zk_conn, this_node, pool, name):
|
||||||
self.zk_conn = zk_conn
|
self.zk_conn = zk_conn
|
||||||
|
@ -504,158 +344,6 @@ class CephVolumeInstance(object):
|
||||||
if data and data != self.stats:
|
if data and data != self.stats:
|
||||||
self.stats = json.loads(data)
|
self.stats = json.loads(data)
|
||||||
|
|
||||||
def add_volume(zk_conn, logger, pool, name, size):
|
|
||||||
# We are ready to create a new volume on this node
|
|
||||||
logger.out('Creating new RBD volume {} on pool {} of size {}'.format(name, pool, size), state='i')
|
|
||||||
try:
|
|
||||||
# Create the volume
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd create --size {} --image-feature layering,exclusive-lock {}/{}'.format(size, pool, name))
|
|
||||||
if retcode:
|
|
||||||
print('rbd create')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Get volume stats
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, name))
|
|
||||||
volstats = stdout
|
|
||||||
|
|
||||||
# Add the new volume to ZK
|
|
||||||
zkhandler.writedata(zk_conn, {
|
|
||||||
'/ceph/volumes/{}/{}'.format(pool, name): '',
|
|
||||||
'/ceph/volumes/{}/{}/stats'.format(pool, name): volstats,
|
|
||||||
'/ceph/snapshots/{}/{}'.format(pool, name): '',
|
|
||||||
})
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Created new RBD volume {} on pool {}'.format(name, pool), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to create new RBD volume {} on pool {}: {}'.format(name, pool, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
def clone_volume(zk_conn, logger, pool, name_orig, name_new, prefix):
|
|
||||||
if not prefix:
|
|
||||||
# Ensure that prefix is just an empty string if it isn't set
|
|
||||||
prefix = ''
|
|
||||||
logger.out('Cloning RBD volume {} to {}{} on pool {}'.format(name_orig, prefix, name_new, pool), state='i')
|
|
||||||
try:
|
|
||||||
# Clone the volume
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd copy {}/{} {}/{}{}'.format(pool, name_orig, pool, prefix, name_new))
|
|
||||||
if retcode:
|
|
||||||
print('rbd copy')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Get volume stats
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}{}'.format(pool, prefix, name_new))
|
|
||||||
volstats = stdout
|
|
||||||
|
|
||||||
# Add the new volume to ZK
|
|
||||||
zkhandler.writedata(zk_conn, {
|
|
||||||
'/ceph/volumes/{}/{}{}'.format(pool, prefix, name_new): '',
|
|
||||||
'/ceph/volumes/{}/{}{}/stats'.format(pool, prefix, name_new): volstats,
|
|
||||||
'/ceph/snapshots/{}/{}{}'.format(pool, prefix, name_new): '',
|
|
||||||
})
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Cloned RBD volume {} to {}{} on pool {}'.format(name_orig, prefix, name_new, pool), stats='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to clone RBD volume {} to {}{} on pool {}: {}'.format(name_orig, prefix, name_new, pool, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
def resize_volume(zk_conn, logger, pool, name, size):
|
|
||||||
logger.out('Resizing RBD volume {} on pool {} to size {}'.format(name, pool, size), state='i')
|
|
||||||
try:
|
|
||||||
# Resize the volume
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd resize --size {} {}/{}'.format(size, pool, name))
|
|
||||||
if retcode:
|
|
||||||
print('rbd resize')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Get volume stats
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, name))
|
|
||||||
volstats = stdout
|
|
||||||
|
|
||||||
# Update the volume to ZK
|
|
||||||
zkhandler.writedata(zk_conn, {
|
|
||||||
'/ceph/volumes/{}/{}'.format(pool, name): '',
|
|
||||||
'/ceph/volumes/{}/{}/stats'.format(pool, name): volstats,
|
|
||||||
'/ceph/snapshots/{}/{}'.format(pool, name): '',
|
|
||||||
})
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Created new RBD volume {} on pool {}'.format(name, pool), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to resize RBD volume {} on pool {}: {}'.format(name, pool, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
def rename_volume(zk_conn, logger, pool, name, new_name):
|
|
||||||
logger.out('Renaming RBD volume {} to {} on pool {}'.format(name, new_name, pool))
|
|
||||||
try:
|
|
||||||
# Rename the volume
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd rename {}/{} {}'.format(pool, name, new_name))
|
|
||||||
if retcode:
|
|
||||||
print('rbd rename')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Rename the volume in ZK
|
|
||||||
zkhandler.renamekey(zk_conn, {
|
|
||||||
'/ceph/volumes/{}/{}'.format(pool, name): '/ceph/volumes/{}/{}'.format(pool, new_name),
|
|
||||||
'/ceph/snapshots/{}/{}'.format(pool, name): '/ceph/snapshots/{}/{}'.format(pool, new_name),
|
|
||||||
})
|
|
||||||
|
|
||||||
# Get volume stats
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, new_name))
|
|
||||||
volstats = stdout
|
|
||||||
|
|
||||||
# Update the volume stats in ZK
|
|
||||||
zkhandler.writedata(zk_conn, {
|
|
||||||
'/ceph/volumes/{}/{}/stats'.format(pool, new_name): volstats,
|
|
||||||
})
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Renamed RBD volume {} to {} on pool {}'.format(name, new_name, pool), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to rename RBD volume {} on pool {}: {}'.format(name, pool, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
def remove_volume(zk_conn, logger, pool, name):
|
|
||||||
# We are ready to create a new volume on this node
|
|
||||||
logger.out('Removing RBD volume {} from pool {}'.format(name, pool), state='i')
|
|
||||||
try:
|
|
||||||
# Remove the volume
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd rm {}/{}'.format(pool, name))
|
|
||||||
if retcode:
|
|
||||||
print('ceph osd volume rm')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Delete volume from ZK
|
|
||||||
zkhandler.deletekey(zk_conn, '/ceph/volumes/{}/{}'.format(pool, name))
|
|
||||||
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}'.format(pool, name))
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Removed RBD volume {} from pool {}'.format(name, pool), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to remove RBD volume {} from pool {}: {}'.format(name, pool, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
class CephSnapshotInstance(object):
|
class CephSnapshotInstance(object):
|
||||||
def __init__(self, zk_conn, this_node, name):
|
def __init__(self, zk_conn, this_node, name):
|
||||||
self.zk_conn = zk_conn
|
self.zk_conn = zk_conn
|
||||||
|
@ -680,85 +368,8 @@ class CephSnapshotInstance(object):
|
||||||
if data and data != self.stats:
|
if data and data != self.stats:
|
||||||
self.stats = json.loads(data)
|
self.stats = json.loads(data)
|
||||||
|
|
||||||
def add_snapshot(zk_conn, logger, pool, volume, name):
|
|
||||||
# We are ready to create a new snapshot on this node
|
|
||||||
logger.out('Creating new RBD snapshot {} of volume {} on pool {}'.format(name, volume, pool), state='i')
|
|
||||||
try:
|
|
||||||
# 1. Create the snapshot
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd snap create {}/{}@{}'.format(pool, volume, name))
|
|
||||||
if retcode:
|
|
||||||
print('rbd snap create')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# 2. Add the new snapshot to ZK
|
|
||||||
zkhandler.writedata(zk_conn, {
|
|
||||||
'/ceph/snapshots/{}/{}/{}'.format(pool, volume, name): '',
|
|
||||||
'/ceph/snapshots/{}/{}/{}/stats'.format(pool, volume, name): '{}'
|
|
||||||
})
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Created new RBD snapshot {} of volume {} on pool {}'.format(name, volume, pool), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to create new RBD snapshot {} of volume {} on pool {}: {}'.format(name, volume, pool, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
def rename_snapshot(zk_conn, logger, pool, volume, name, new_name):
|
|
||||||
logger.out('Renaming RBD volume snapshot {} to {} for volume {} on pool {}'.format(name, new_name, volume, pool))
|
|
||||||
try:
|
|
||||||
# Rename the volume
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd snap rename {}/{}@{} {}'.format(pool, volume, name, new_name))
|
|
||||||
if retcode:
|
|
||||||
print('rbd snap rename')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Rename the snapshot in ZK
|
|
||||||
zkhandler.renamekey(zk_conn, {
|
|
||||||
'/ceph/snapshots/{}/{}/{}'.format(pool, volume, name): '/ceph/snapshots/{}/{}/{}'.format(pool, volume, new_name)
|
|
||||||
})
|
|
||||||
|
|
||||||
# Update the snapshot stats in ZK
|
|
||||||
zkhandler.writedata(zk_conn, {
|
|
||||||
'/ceph/snapshots/{}/{}/{}/stats'.format(pool, volume, new_name): '{}',
|
|
||||||
})
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Renamed RBD volume snapshot {} to {} for volume {} on pool {}'.format(name, new_name, volume, pool), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to rename RBD volume snapshot {} for volume {} on pool {}: {}'.format(name, volume, pool, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
def remove_snapshot(zk_conn, logger, pool, volume, name):
|
|
||||||
# We are ready to create a new snapshot on this node
|
|
||||||
logger.out('Removing RBD snapshot {} of volume {} on pool {}'.format(name, volume, pool), state='i')
|
|
||||||
try:
|
|
||||||
# Delete snapshot from ZK
|
|
||||||
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}/{}'.format(pool, volume, name))
|
|
||||||
|
|
||||||
# Remove the snapshot
|
|
||||||
retcode, stdout, stderr = common.run_os_command('rbd snap rm {}/{}@{}'.format(pool, volume, name))
|
|
||||||
if retcode:
|
|
||||||
print('rbd snap rm')
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Log it
|
|
||||||
logger.out('Removed RBD snapshot {} of volume {} on pool {}'.format(name, volume, pool), state='o')
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
# Log it
|
|
||||||
logger.out('Failed to remove RBD snapshot {} of volume {} on pool {}: {}'.format(name, volume, pool, e), state='e')
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Primary command function
|
# Primary command function
|
||||||
|
# This command pipe is only used for OSD adds and removes
|
||||||
def run_command(zk_conn, logger, this_node, data, d_osd):
|
def run_command(zk_conn, logger, this_node, data, d_osd):
|
||||||
# Get the command and args
|
# Get the command and args
|
||||||
command, args = data.split()
|
command, args = data.split()
|
||||||
|
@ -804,301 +415,3 @@ def run_command(zk_conn, logger, this_node, data, d_osd):
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
# Online an OSD
|
|
||||||
elif command == 'osd_in':
|
|
||||||
osd_id = args
|
|
||||||
|
|
||||||
# Verify osd_id is in the list
|
|
||||||
if d_osd[osd_id] and d_osd[osd_id].node == this_node.name:
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Online the OSD
|
|
||||||
result = in_osd(zk_conn, logger, osd_id)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Offline an OSD
|
|
||||||
elif command == 'osd_out':
|
|
||||||
osd_id = args
|
|
||||||
|
|
||||||
# Verify osd_id is in the list
|
|
||||||
if d_osd[osd_id] and d_osd[osd_id].node == this_node.name:
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Offline the OSD
|
|
||||||
result = out_osd(zk_conn, logger, osd_id)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Set a property
|
|
||||||
elif command == 'osd_set':
|
|
||||||
option = args
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Set the property
|
|
||||||
result = set_property(zk_conn, logger, option)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Unset a property
|
|
||||||
elif command == 'osd_unset':
|
|
||||||
option = args
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Unset the property
|
|
||||||
result = unset_property(zk_conn, logger, option)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Adding a new pool
|
|
||||||
elif command == 'pool_add':
|
|
||||||
name, pgs, copies, mincopies = args.split(',')
|
|
||||||
copies = copies.replace('copies=','')
|
|
||||||
mincopies = mincopies.replace('mincopies=','')
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Add the pool
|
|
||||||
result = add_pool(zk_conn, logger, name, pgs, copies, mincopies)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Removing a pool
|
|
||||||
elif command == 'pool_remove':
|
|
||||||
name = args
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Remove the pool
|
|
||||||
result = remove_pool(zk_conn, logger, name)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Adding a new volume
|
|
||||||
elif command == 'volume_add':
|
|
||||||
pool, name, size = args.split(',')
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Add the volume
|
|
||||||
result = add_volume(zk_conn, logger, pool, name, size)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Cloning a volume
|
|
||||||
elif command == 'volume_clone':
|
|
||||||
pool, name_orig, name_new, prefix = args.split(',')
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Clone the volume
|
|
||||||
result = clone_volume(zk_conn, logger, pool, name_orig, name_new, prefix)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Resizing a volume
|
|
||||||
elif command == 'volume_resize':
|
|
||||||
pool, name, size = args.split(',')
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Add the volume
|
|
||||||
result = resize_volume(zk_conn, logger, pool, name, size)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Renaming a new volume
|
|
||||||
elif command == 'volume_rename':
|
|
||||||
pool, name, new_name = args.split(',')
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Add the volume
|
|
||||||
result = rename_volume(zk_conn, logger, pool, name, new_name)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Removing a volume
|
|
||||||
elif command == 'volume_remove':
|
|
||||||
pool, name = args.split(',')
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Remove the volume
|
|
||||||
result = remove_volume(zk_conn, logger, pool, name)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Adding a new snapshot
|
|
||||||
elif command == 'snapshot_add':
|
|
||||||
pool, volume, name = args.split(',')
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Add the snapshot
|
|
||||||
result = add_snapshot(zk_conn, logger, pool, volume, name)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Renaming a snapshot
|
|
||||||
elif command == 'snapshot_rename':
|
|
||||||
pool, volume, name, new_name = args.split(',')
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Add the snapshot
|
|
||||||
result = rename_snapshot(zk_conn, logger, pool, volume, name, new_name)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Removing a snapshot
|
|
||||||
elif command == 'snapshot_remove':
|
|
||||||
pool, volume, name = args.split(',')
|
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
|
||||||
# Lock the command queue
|
|
||||||
zk_lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
|
||||||
with zk_lock:
|
|
||||||
# Remove the snapshot
|
|
||||||
result = remove_snapshot(zk_conn, logger, pool, volume, name)
|
|
||||||
# Command succeeded
|
|
||||||
if result:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'success-{}'.format(data)})
|
|
||||||
# Command failed
|
|
||||||
else:
|
|
||||||
# Update the command queue
|
|
||||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': 'failure-{}'.format(data)})
|
|
||||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
Loading…
Reference in New Issue