Add pool PGs count modification
Allows an administrator to adjust the PG count of a given pool. This can be used to increase the PGs (for example after adding more OSDs) or decrease it (to remove OSDs, reduce CPU load, etc.).
This commit is contained in:
parent
a0fccf83f7
commit
b6d689b769
|
@ -4459,6 +4459,50 @@ class API_Storage_Ceph_Pool_Element(Resource):
|
|||
reqargs.get("tier", None),
|
||||
)
|
||||
|
||||
@RequestParser(
|
||||
[
|
||||
{
|
||||
"name": "pgs",
|
||||
"required": True,
|
||||
"helptext": "A placement group count must be specified.",
|
||||
},
|
||||
]
|
||||
)
|
||||
@Authenticator
|
||||
def put(self, pool, reqargs):
|
||||
"""
|
||||
Adjust Ceph pool {pool}'s placement group count
|
||||
---
|
||||
tags:
|
||||
- storage / ceph
|
||||
parameters:
|
||||
- in: query
|
||||
name: pgs
|
||||
type: integer
|
||||
required: true
|
||||
description: The new number of placement groups (PGs) for the pool
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
404:
|
||||
description: Not found
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
400:
|
||||
description: Bad request
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
"""
|
||||
return api_helper.ceph_pool_set_pgs(
|
||||
pool,
|
||||
reqargs.get("pgs", 0),
|
||||
)
|
||||
|
||||
@RequestParser(
|
||||
[
|
||||
{
|
||||
|
|
|
@ -1434,6 +1434,22 @@ def ceph_pool_remove(zkhandler, name):
|
|||
return output, retcode
|
||||
|
||||
|
||||
@ZKConnection(config)
|
||||
def ceph_pool_set_pgs(zkhandler, name, pgs):
|
||||
"""
|
||||
Set the PGs of a ceph RBD pool.
|
||||
"""
|
||||
retflag, retdata = pvc_ceph.set_pgs_pool(zkhandler, name, pgs)
|
||||
|
||||
if retflag:
|
||||
retcode = 200
|
||||
else:
|
||||
retcode = 400
|
||||
|
||||
output = {"message": retdata.replace('"', "'")}
|
||||
return output, retcode
|
||||
|
||||
|
||||
@pvc_common.Profiler(config)
|
||||
@ZKConnection(config)
|
||||
def ceph_volume_list(zkhandler, pool=None, limit=None, is_fuzzy=True):
|
||||
|
|
|
@ -708,7 +708,7 @@ def ceph_pool_info(config, pool):
|
|||
|
||||
def ceph_pool_list(config, limit):
|
||||
"""
|
||||
Get list information about Ceph OSDs (limited by {limit})
|
||||
Get list information about Ceph pools (limited by {limit})
|
||||
|
||||
API endpoint: GET /api/v1/storage/ceph/pool
|
||||
API arguments: limit={limit}
|
||||
|
@ -728,7 +728,7 @@ def ceph_pool_list(config, limit):
|
|||
|
||||
def ceph_pool_add(config, pool, pgs, replcfg, tier):
|
||||
"""
|
||||
Add new Ceph OSD
|
||||
Add new Ceph pool
|
||||
|
||||
API endpoint: POST /api/v1/storage/ceph/pool
|
||||
API arguments: pool={pool}, pgs={pgs}, replcfg={replcfg}, tier={tier}
|
||||
|
@ -747,7 +747,7 @@ def ceph_pool_add(config, pool, pgs, replcfg, tier):
|
|||
|
||||
def ceph_pool_remove(config, pool):
|
||||
"""
|
||||
Remove Ceph OSD
|
||||
Remove Ceph pool
|
||||
|
||||
API endpoint: DELETE /api/v1/storage/ceph/pool/{pool}
|
||||
API arguments:
|
||||
|
@ -766,6 +766,27 @@ def ceph_pool_remove(config, pool):
|
|||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def ceph_pool_set_pgs(config, pool, pgs):
|
||||
"""
|
||||
Set the PGs of a Ceph pool
|
||||
|
||||
API endpoint: PUT /api/v1/storage/ceph/pool/{pool}
|
||||
API arguments: {"pgs": "{pgs}"}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
params = {"pgs": pgs}
|
||||
response = call_api(
|
||||
config, "put", "/storage/ceph/pool/{pool}".format(pool=pool), params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def format_list_pool(pool_list):
|
||||
# Handle empty list
|
||||
if not pool_list:
|
||||
|
|
|
@ -3532,6 +3532,8 @@ def ceph_pool():
|
|||
def ceph_pool_add(name, pgs, tier, replcfg):
|
||||
"""
|
||||
Add a new Ceph RBD pool with name NAME and PGS placement groups.
|
||||
|
||||
The placement group count must be a non-zero power of 2.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_pool_add(config, name, pgs, replcfg, tier)
|
||||
|
@ -3570,6 +3572,26 @@ def ceph_pool_remove(name, confirm_flag):
|
|||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# pvc storage pool set-pgs
|
||||
###############################################################################
|
||||
@click.command(name="set-pgs", short_help="Set PGs of an RBD pool.")
|
||||
@click.argument("name")
|
||||
@click.argument("pgs")
|
||||
@cluster_req
|
||||
def ceph_pool_set_pgs(name, pgs):
|
||||
"""
|
||||
Set the placement groups (PGs) count for the pool NAME to PGS.
|
||||
|
||||
The placement group count must be a non-zero power of 2.
|
||||
|
||||
Placement group counts may be increased or decreased as required though frequent alteration is not recommended.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_pool_set_pgs(config, name, pgs)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# pvc storage pool list
|
||||
###############################################################################
|
||||
|
@ -5844,6 +5866,7 @@ ceph_osd.add_command(ceph_osd_list)
|
|||
|
||||
ceph_pool.add_command(ceph_pool_add)
|
||||
ceph_pool.add_command(ceph_pool_remove)
|
||||
ceph_pool.add_command(ceph_pool_set_pgs)
|
||||
ceph_pool.add_command(ceph_pool_list)
|
||||
|
||||
ceph_volume.add_command(ceph_volume_add)
|
||||
|
|
|
@ -513,6 +513,47 @@ def remove_pool(zkhandler, name):
|
|||
return True, 'Removed RBD pool "{}" and all volumes.'.format(name)
|
||||
|
||||
|
||||
def set_pgs_pool(zkhandler, name, pgs):
|
||||
if not verifyPool(zkhandler, name):
|
||||
return False, f'ERROR: No pool with name "{name}" is present in the cluster.'
|
||||
|
||||
# Validate new PGs count
|
||||
pgs = int(pgs)
|
||||
if (pgs == 0) or (pgs & (pgs - 1) != 0):
|
||||
return (
|
||||
False,
|
||||
f'ERROR: Invalid PGs number "{pgs}": must be a non-zero power of 2.',
|
||||
)
|
||||
|
||||
# Set the new pgs number
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd pool set {name} pg_num {pgs}"
|
||||
)
|
||||
if retcode:
|
||||
return False, f"ERROR: Failed to set pg_num on pool {name} to {pgs}: {stderr}"
|
||||
|
||||
# Set the new pgps number if increasing
|
||||
current_pgs = int(zkhandler.read(("pool.pgs", name)))
|
||||
if current_pgs >= pgs:
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd pool set {name} pgp_num {pgs}"
|
||||
)
|
||||
if retcode:
|
||||
return (
|
||||
False,
|
||||
f"ERROR: Failed to set pg_num on pool {name} to {pgs}: {stderr}",
|
||||
)
|
||||
|
||||
# Update Zookeeper count
|
||||
zkhandler.write(
|
||||
[
|
||||
(("pool.pgs", name), pgs),
|
||||
]
|
||||
)
|
||||
|
||||
return True, f'Set PGs count to {pgs} for RBD pool "{name}".'
|
||||
|
||||
|
||||
def get_list_pool(zkhandler, limit, is_fuzzy=True):
|
||||
full_pool_list = zkhandler.children("base.pool")
|
||||
|
||||
|
|
|
@ -5414,6 +5414,42 @@
|
|||
"tags": [
|
||||
"storage / ceph"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"description": "",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The new number of placement groups (PGs) for the pool",
|
||||
"in": "query",
|
||||
"name": "pgs",
|
||||
"required": true,
|
||||
"type": "integer"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not found",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Adjust Ceph pool {pool}'s placement group count",
|
||||
"tags": [
|
||||
"storage / ceph"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/storage/ceph/snapshot": {
|
||||
|
|
Loading…
Reference in New Issue