Move config parser into daemon_lib
And reformat/add config values for API.
This commit is contained in:
parent
4df5fdbca6
commit
03a738f878
|
@ -19,13 +19,13 @@
|
||||||
#
|
#
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
import os
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from ssl import SSLContext, TLSVersion
|
from ssl import SSLContext, TLSVersion
|
||||||
|
|
||||||
from distutils.util import strtobool as dustrtobool
|
from distutils.util import strtobool as dustrtobool
|
||||||
|
|
||||||
|
import daemon_lib.config as cfg
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.82"
|
version = "0.9.82"
|
||||||
|
|
||||||
|
@ -53,160 +53,13 @@ def strtobool(stringv):
|
||||||
# Configuration Parsing
|
# Configuration Parsing
|
||||||
##########################################################
|
##########################################################
|
||||||
|
|
||||||
# Parse the configuration file
|
|
||||||
config_file = None
|
|
||||||
try:
|
|
||||||
_config_file = "/etc/pvc/pvcapid.yaml"
|
|
||||||
if not os.path.exists(_config_file):
|
|
||||||
raise
|
|
||||||
config_file = _config_file
|
|
||||||
config_type = "legacy"
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
_config_file = os.environ["PVC_CONFIG_FILE"]
|
|
||||||
if not os.path.exists(_config_file):
|
|
||||||
raise
|
|
||||||
config_file = _config_file
|
|
||||||
config_type = "current"
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if not config_file:
|
# Get our configuration
|
||||||
print(
|
config = cfg.get_configuration()
|
||||||
'Error: The "PVC_CONFIG_FILE" environment variable must be set before starting pvcapid.'
|
config["daemon_name"] = "pvcapid"
|
||||||
)
|
config["daemon_version"] = version
|
||||||
exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def load_configuration_file(config_file):
|
|
||||||
print('Loading configuration from file "{}"'.format(config_file))
|
|
||||||
|
|
||||||
# Read in the config
|
|
||||||
try:
|
|
||||||
with open(config_file, "r") as cfgfile:
|
|
||||||
o_config = yaml.load(cfgfile, Loader=yaml.BaseLoader)
|
|
||||||
except Exception as e:
|
|
||||||
print("ERROR: Failed to parse configuration file: {}".format(e))
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
return o_config
|
|
||||||
|
|
||||||
|
|
||||||
def get_configuration_current(config_file):
|
|
||||||
o_config = load_configuration_file(config_file)
|
|
||||||
try:
|
|
||||||
# Create the config object
|
|
||||||
config = {
|
|
||||||
"debug": strtobool(o_config["logging"].get("debug_logging", "False")),
|
|
||||||
"all_nodes": o_config["cluster"]["all_nodes"],
|
|
||||||
"coordinators": o_config["cluster"]["coordinator_nodes"],
|
|
||||||
"listen_address": o_config["api"]["listen"]["address"],
|
|
||||||
"listen_port": int(o_config["api"]["listen"]["port"]),
|
|
||||||
"auth_enabled": strtobool(
|
|
||||||
o_config["api"]["authentication"].get("enabled", "False")
|
|
||||||
),
|
|
||||||
"auth_secret_key": o_config["api"]["authentication"]["secret_key"],
|
|
||||||
"auth_source": o_config["api"]["authentication"]["source"],
|
|
||||||
"ssl_enabled": strtobool(o_config["api"]["ssl"].get("enabled", "False")),
|
|
||||||
"ssl_cert_file": o_config["api"]["ssl"]["certificate"],
|
|
||||||
"ssl_key_file": o_config["api"]["ssl"]["private_key"],
|
|
||||||
"database_port": o_config["database"]["postgres"]["port"],
|
|
||||||
"database_host": o_config["database"]["postgres"]["hostname"],
|
|
||||||
"database_name": o_config["database"]["postgres"]["credentials"]["api"][
|
|
||||||
"database"
|
|
||||||
],
|
|
||||||
"database_user": o_config["database"]["postgres"]["credentials"]["api"][
|
|
||||||
"username"
|
|
||||||
],
|
|
||||||
"database_password": o_config["database"]["postgres"]["credentials"]["api"][
|
|
||||||
"password"
|
|
||||||
],
|
|
||||||
"queue_port": o_config["database"]["keydb"]["port"],
|
|
||||||
"queue_host": o_config["database"]["keydb"]["hostname"],
|
|
||||||
"queue_path": o_config["database"]["keydb"]["path"],
|
|
||||||
"storage_domain": o_config["cluster"]["networks"]["storage"]["domain"],
|
|
||||||
"storage_hosts": o_config["ceph"].get("monitor_hosts", None),
|
|
||||||
"ceph_monitor_port": o_config["ceph"]["monitor_port"],
|
|
||||||
"ceph_storage_secret_uuid": o_config["ceph"]["secret_uuid"],
|
|
||||||
}
|
|
||||||
|
|
||||||
# Use coordinators as storage hosts if not explicitly specified
|
|
||||||
if not config["storage_hosts"] or len(config["storage_hosts"]) < 1:
|
|
||||||
config["storage_hosts"] = config["coordinators"]
|
|
||||||
|
|
||||||
# Set up our token list if specified
|
|
||||||
if config["auth_source"] == "token":
|
|
||||||
config["auth_tokens"] = o_config["api"]["token"]
|
|
||||||
else:
|
|
||||||
if config["auth_enabled"]:
|
|
||||||
print(
|
|
||||||
"WARNING: No authentication method provided; disabling authentication."
|
|
||||||
)
|
|
||||||
config["auth_enabled"] = False
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Failed to load configuration: {e}")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def get_configuration_legacy(config_file):
|
|
||||||
o_config = load_configuration_file(config_file)
|
|
||||||
try:
|
|
||||||
# Create the config object
|
|
||||||
config = {
|
|
||||||
"debug": strtobool(o_config["pvc"]["debug"]),
|
|
||||||
"coordinators": o_config["pvc"]["coordinators"],
|
|
||||||
"listen_address": o_config["pvc"]["api"]["listen_address"],
|
|
||||||
"listen_port": int(o_config["pvc"]["api"]["listen_port"]),
|
|
||||||
"auth_enabled": strtobool(
|
|
||||||
o_config["pvc"]["api"]["authentication"]["enabled"]
|
|
||||||
),
|
|
||||||
"auth_secret_key": o_config["pvc"]["api"]["authentication"]["secret_key"],
|
|
||||||
"auth_tokens": o_config["pvc"]["api"]["authentication"]["tokens"],
|
|
||||||
"ssl_enabled": strtobool(o_config["pvc"]["api"]["ssl"]["enabled"]),
|
|
||||||
"ssl_key_file": o_config["pvc"]["api"]["ssl"]["key_file"],
|
|
||||||
"ssl_cert_file": o_config["pvc"]["api"]["ssl"]["cert_file"],
|
|
||||||
"database_host": o_config["pvc"]["provisioner"]["database"]["host"],
|
|
||||||
"database_port": int(o_config["pvc"]["provisioner"]["database"]["port"]),
|
|
||||||
"database_name": o_config["pvc"]["provisioner"]["database"]["name"],
|
|
||||||
"database_user": o_config["pvc"]["provisioner"]["database"]["user"],
|
|
||||||
"database_password": o_config["pvc"]["provisioner"]["database"]["pass"],
|
|
||||||
"queue_host": o_config["pvc"]["provisioner"]["queue"]["host"],
|
|
||||||
"queue_port": o_config["pvc"]["provisioner"]["queue"]["port"],
|
|
||||||
"queue_path": o_config["pvc"]["provisioner"]["queue"]["path"],
|
|
||||||
"storage_hosts": o_config["pvc"]["provisioner"]["ceph_cluster"][
|
|
||||||
"storage_hosts"
|
|
||||||
],
|
|
||||||
"storage_domain": o_config["pvc"]["provisioner"]["ceph_cluster"][
|
|
||||||
"storage_domain"
|
|
||||||
],
|
|
||||||
"ceph_monitor_port": o_config["pvc"]["provisioner"]["ceph_cluster"][
|
|
||||||
"ceph_monitor_port"
|
|
||||||
],
|
|
||||||
"ceph_storage_secret_uuid": o_config["pvc"]["provisioner"]["ceph_cluster"][
|
|
||||||
"ceph_storage_secret_uuid"
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
# Use coordinators as storage hosts if not explicitly specified
|
|
||||||
if not config["storage_hosts"]:
|
|
||||||
config["storage_hosts"] = config["coordinators"]
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print("ERROR: Failed to load configuration: {}".format(e))
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
if config_type == "legacy":
|
|
||||||
config = get_configuration_legacy(config_file)
|
|
||||||
else:
|
|
||||||
config = get_configuration_current(config_file)
|
|
||||||
|
|
||||||
##########################################################
|
##########################################################
|
||||||
# Entrypoint
|
# Entrypoint
|
||||||
##########################################################
|
##########################################################
|
||||||
|
@ -215,11 +68,13 @@ else:
|
||||||
def entrypoint():
|
def entrypoint():
|
||||||
import pvcapid.flaskapi as pvc_api # noqa: E402
|
import pvcapid.flaskapi as pvc_api # noqa: E402
|
||||||
|
|
||||||
if config["ssl_enabled"]:
|
if config["api_ssl_enabled"]:
|
||||||
context = SSLContext()
|
context = SSLContext()
|
||||||
context.minimum_version = TLSVersion.TLSv1
|
context.minimum_version = TLSVersion.TLSv1
|
||||||
context.get_ca_certs()
|
context.get_ca_certs()
|
||||||
context.load_cert_chain(config["ssl_cert_file"], keyfile=config["ssl_key_file"])
|
context.load_cert_chain(
|
||||||
|
config["api_ssl_cert_file"], keyfile=config["api_ssl_key_file"]
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
context = None
|
context = None
|
||||||
|
|
||||||
|
@ -238,18 +93,18 @@ def entrypoint():
|
||||||
print("| API version: v{0: <46} |".format(API_VERSION))
|
print("| API version: v{0: <46} |".format(API_VERSION))
|
||||||
print(
|
print(
|
||||||
"| Listen: {0: <52} |".format(
|
"| Listen: {0: <52} |".format(
|
||||||
"{}:{}".format(config["listen_address"], config["listen_port"])
|
"{}:{}".format(config["api_listen_address"], config["api_listen_port"])
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
print("| SSL: {0: <55} |".format(str(config["ssl_enabled"])))
|
print("| SSL: {0: <55} |".format(str(config["api_ssl_enabled"])))
|
||||||
print("| Authentication: {0: <44} |".format(str(config["auth_enabled"])))
|
print("| Authentication: {0: <44} |".format(str(config["api_auth_enabled"])))
|
||||||
print("|--------------------------------------------------------------|")
|
print("|--------------------------------------------------------------|")
|
||||||
print("")
|
print("")
|
||||||
|
|
||||||
pvc_api.celery_startup()
|
pvc_api.celery_startup()
|
||||||
pvc_api.app.run(
|
pvc_api.app.run(
|
||||||
config["listen_address"],
|
config["api_listen_address"],
|
||||||
config["listen_port"],
|
config["api_listen_port"],
|
||||||
threaded=True,
|
threaded=True,
|
||||||
ssl_context=context,
|
ssl_context=context,
|
||||||
)
|
)
|
||||||
|
|
|
@ -135,11 +135,11 @@ def cleanup(job_name, db_conn=None, db_cur=None, zkhandler=None):
|
||||||
# Database connections
|
# Database connections
|
||||||
def open_database(config):
|
def open_database(config):
|
||||||
conn = psycopg2.connect(
|
conn = psycopg2.connect(
|
||||||
host=config["database_host"],
|
host=config["api_postgresql_host"],
|
||||||
port=config["database_port"],
|
port=config["api_postgresql_port"],
|
||||||
dbname=config["database_name"],
|
dbname=config["api_postgresql_name"],
|
||||||
user=config["database_user"],
|
user=config["api_postgresql_user"],
|
||||||
password=config["database_password"],
|
password=config["api_postgresql_password"],
|
||||||
)
|
)
|
||||||
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||||
return conn, cur
|
return conn, cur
|
||||||
|
|
|
@ -61,11 +61,11 @@ app = flask.Flask(__name__)
|
||||||
# Set up SQLAlchemy backend
|
# Set up SQLAlchemy backend
|
||||||
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
|
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
|
||||||
app.config["SQLALCHEMY_DATABASE_URI"] = "postgresql://{}:{}@{}:{}/{}".format(
|
app.config["SQLALCHEMY_DATABASE_URI"] = "postgresql://{}:{}@{}:{}/{}".format(
|
||||||
config["database_user"],
|
config["api_postgresql_user"],
|
||||||
config["database_password"],
|
config["api_postgresql_password"],
|
||||||
config["database_host"],
|
config["api_postgresql_host"],
|
||||||
config["database_port"],
|
config["api_postgresql_port"],
|
||||||
config["database_name"],
|
config["api_postgresql_dbname"],
|
||||||
)
|
)
|
||||||
|
|
||||||
if config["debug"]:
|
if config["debug"]:
|
||||||
|
@ -73,8 +73,8 @@ if config["debug"]:
|
||||||
else:
|
else:
|
||||||
app.config["DEBUG"] = False
|
app.config["DEBUG"] = False
|
||||||
|
|
||||||
if config["auth_enabled"]:
|
if config["api_auth_enabled"]:
|
||||||
app.config["SECRET_KEY"] = config["auth_secret_key"]
|
app.config["SECRET_KEY"] = config["api_auth_secret_key"]
|
||||||
|
|
||||||
# Create SQLAlchemy database
|
# Create SQLAlchemy database
|
||||||
db = SQLAlchemy(app)
|
db = SQLAlchemy(app)
|
||||||
|
@ -133,7 +133,7 @@ def run_celery_task(task_def, **kwargs):
|
||||||
|
|
||||||
# Create celery definition
|
# Create celery definition
|
||||||
celery_task_uri = "redis://{}:{}{}".format(
|
celery_task_uri = "redis://{}:{}{}".format(
|
||||||
config["queue_host"], config["queue_port"], config["queue_path"]
|
config["keydb_host"], config["keydb_port"], config["keydb_path"]
|
||||||
)
|
)
|
||||||
celery = Celery(
|
celery = Celery(
|
||||||
app.name,
|
app.name,
|
||||||
|
@ -199,7 +199,7 @@ def Authenticator(function):
|
||||||
@wraps(function)
|
@wraps(function)
|
||||||
def authenticate(*args, **kwargs):
|
def authenticate(*args, **kwargs):
|
||||||
# No authentication required
|
# No authentication required
|
||||||
if not config["auth_enabled"]:
|
if not config["api_auth_enabled"]:
|
||||||
return function(*args, **kwargs)
|
return function(*args, **kwargs)
|
||||||
# Session-based authentication
|
# Session-based authentication
|
||||||
if "token" in flask.session:
|
if "token" in flask.session:
|
||||||
|
@ -208,7 +208,7 @@ def Authenticator(function):
|
||||||
if "X-Api-Key" in flask.request.headers:
|
if "X-Api-Key" in flask.request.headers:
|
||||||
if any(
|
if any(
|
||||||
token
|
token
|
||||||
for token in config["auth_tokens"]
|
for token in config["api_auth_tokens"]
|
||||||
if flask.request.headers.get("X-Api-Key") == token.get("token")
|
if flask.request.headers.get("X-Api-Key") == token.get("token")
|
||||||
):
|
):
|
||||||
return function(*args, **kwargs)
|
return function(*args, **kwargs)
|
||||||
|
@ -469,12 +469,12 @@ class API_Login(Resource):
|
||||||
type: object
|
type: object
|
||||||
id: Message
|
id: Message
|
||||||
"""
|
"""
|
||||||
if not config["auth_enabled"]:
|
if not config["api_auth_enabled"]:
|
||||||
return flask.redirect(Api.url_for(api, API_Root))
|
return flask.redirect(Api.url_for(api, API_Root))
|
||||||
|
|
||||||
if any(
|
if any(
|
||||||
token
|
token
|
||||||
for token in config["auth_tokens"]
|
for token in config["api_auth_tokens"]
|
||||||
if flask.request.values["token"] in token["token"]
|
if flask.request.values["token"] in token["token"]
|
||||||
):
|
):
|
||||||
flask.session["token"] = flask.request.form["token"]
|
flask.session["token"] = flask.request.form["token"]
|
||||||
|
@ -503,7 +503,7 @@ class API_Logout(Resource):
|
||||||
302:
|
302:
|
||||||
description: Authentication disabled
|
description: Authentication disabled
|
||||||
"""
|
"""
|
||||||
if not config["auth_enabled"]:
|
if not config["api_auth_enabled"]:
|
||||||
return flask.redirect(Api.url_for(api, API_Root))
|
return flask.redirect(Api.url_for(api, API_Root))
|
||||||
|
|
||||||
flask.session.pop("token", None)
|
flask.session.pop("token", None)
|
||||||
|
|
|
@ -48,11 +48,11 @@ import pvcapid.provisioner as provisioner
|
||||||
# Database connections
|
# Database connections
|
||||||
def open_database(config):
|
def open_database(config):
|
||||||
conn = psycopg2.connect(
|
conn = psycopg2.connect(
|
||||||
host=config["database_host"],
|
host=config["api_postgresql_host"],
|
||||||
port=config["database_port"],
|
port=config["api_postgresql_port"],
|
||||||
dbname=config["database_name"],
|
dbname=config["api_postgresql_name"],
|
||||||
user=config["database_user"],
|
user=config["api_postgresql_user"],
|
||||||
password=config["database_password"],
|
password=config["api_postgresql_password"],
|
||||||
)
|
)
|
||||||
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||||
return conn, cur
|
return conn, cur
|
||||||
|
|
|
@ -63,11 +63,11 @@ class ProvisioningError(Exception):
|
||||||
# Database connections
|
# Database connections
|
||||||
def open_database(config):
|
def open_database(config):
|
||||||
conn = psycopg2.connect(
|
conn = psycopg2.connect(
|
||||||
host=config["database_host"],
|
host=config["api_postgresql_host"],
|
||||||
port=config["database_port"],
|
port=config["api_postgresql_port"],
|
||||||
dbname=config["database_name"],
|
dbname=config["api_postgresql_dbname"],
|
||||||
user=config["database_user"],
|
user=config["api_postgresql_user"],
|
||||||
password=config["database_password"],
|
password=config["api_postgresql_password"],
|
||||||
)
|
)
|
||||||
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||||
return conn, cur
|
return conn, cur
|
||||||
|
|
|
@ -167,11 +167,11 @@ def chroot(destination):
|
||||||
def open_db(config):
|
def open_db(config):
|
||||||
try:
|
try:
|
||||||
conn = psycopg2.connect(
|
conn = psycopg2.connect(
|
||||||
host=config["database_host"],
|
host=config["api_postgresql_host"],
|
||||||
port=config["database_port"],
|
port=config["api_postgresql_port"],
|
||||||
dbname=config["database_name"],
|
dbname=config["api_postgresql_name"],
|
||||||
user=config["database_user"],
|
user=config["api_postgresql_user"],
|
||||||
password=config["database_password"],
|
password=config["api_postgresql_password"],
|
||||||
)
|
)
|
||||||
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -326,9 +326,9 @@ def create_vm(
|
||||||
vm_data["system_architecture"] = stdout.strip()
|
vm_data["system_architecture"] = stdout.strip()
|
||||||
|
|
||||||
monitor_list = list()
|
monitor_list = list()
|
||||||
coordinator_names = config["storage_hosts"]
|
monitor_names = config["storage_hosts"]
|
||||||
for coordinator in coordinator_names:
|
for monitor in monitor_names:
|
||||||
monitor_list.append("{}.{}".format(coordinator, config["storage_domain"]))
|
monitor_list.append("{}.{}".format(monitor, config["storage_domain"]))
|
||||||
vm_data["ceph_monitor_list"] = monitor_list
|
vm_data["ceph_monitor_list"] = monitor_list
|
||||||
vm_data["ceph_monitor_port"] = config["ceph_monitor_port"]
|
vm_data["ceph_monitor_port"] = config["ceph_monitor_port"]
|
||||||
vm_data["ceph_monitor_secret"] = config["ceph_storage_secret_uuid"]
|
vm_data["ceph_monitor_secret"] = config["ceph_storage_secret_uuid"]
|
||||||
|
|
|
@ -271,17 +271,17 @@ def get_configuration_current(config_file):
|
||||||
"keydb_port": o_database["keydb"]["port"],
|
"keydb_port": o_database["keydb"]["port"],
|
||||||
"keydb_host": o_database["keydb"]["hostname"],
|
"keydb_host": o_database["keydb"]["hostname"],
|
||||||
"keydb_path": o_database["keydb"]["path"],
|
"keydb_path": o_database["keydb"]["path"],
|
||||||
"metadata_postgresql_port": o_database["postgres"]["port"],
|
"api_postgresql_port": o_database["postgres"]["port"],
|
||||||
"metadata_postgresql_host": o_database["postgres"]["hostname"],
|
"api_postgresql_host": o_database["postgres"]["hostname"],
|
||||||
"metadata_postgresql_dbname": o_database["postgres"]["credentials"]["api"][
|
"api_postgresql_dbname": o_database["postgres"]["credentials"]["api"][
|
||||||
"database"
|
"database"
|
||||||
],
|
],
|
||||||
"metadata_postgresql_user": o_database["postgres"]["credentials"]["api"][
|
"api_postgresql_user": o_database["postgres"]["credentials"]["api"][
|
||||||
"username"
|
"username"
|
||||||
],
|
],
|
||||||
"metadata_postgresql_password": o_database["postgres"]["credentials"][
|
"api_postgresql_password": o_database["postgres"]["credentials"]["api"][
|
||||||
"api"
|
"password"
|
||||||
]["password"],
|
],
|
||||||
"pdns_postgresql_port": o_database["postgres"]["port"],
|
"pdns_postgresql_port": o_database["postgres"]["port"],
|
||||||
"pdns_postgresql_host": o_database["postgres"]["hostname"],
|
"pdns_postgresql_host": o_database["postgres"]["hostname"],
|
||||||
"pdns_postgresql_dbname": o_database["postgres"]["credentials"]["dns"][
|
"pdns_postgresql_dbname": o_database["postgres"]["credentials"]["dns"][
|
||||||
|
@ -335,9 +335,7 @@ def get_configuration_current(config_file):
|
||||||
"log_keepalive_cluster_details": o_logging.get(
|
"log_keepalive_cluster_details": o_logging.get(
|
||||||
"log_cluster_details", False
|
"log_cluster_details", False
|
||||||
),
|
),
|
||||||
"log_keepalive_plugin_details": o_logging.get(
|
"log_monitoring_details": o_logging.get("log_monitoring_details", False),
|
||||||
"log_monitoring_details", False
|
|
||||||
),
|
|
||||||
"console_log_lines": o_logging.get("console_log_lines", False),
|
"console_log_lines": o_logging.get("console_log_lines", False),
|
||||||
"node_log_lines": o_logging.get("node_log_lines", False),
|
"node_log_lines": o_logging.get("node_log_lines", False),
|
||||||
}
|
}
|
||||||
|
@ -362,9 +360,49 @@ def get_configuration_current(config_file):
|
||||||
+ o_ceph["ceph_keyring_file"],
|
+ o_ceph["ceph_keyring_file"],
|
||||||
"ceph_monitor_port": o_ceph["monitor_port"],
|
"ceph_monitor_port": o_ceph["monitor_port"],
|
||||||
"ceph_secret_uuid": o_ceph["secret_uuid"],
|
"ceph_secret_uuid": o_ceph["secret_uuid"],
|
||||||
|
"storage_hosts": o_ceph.get("monitor_hosts", None),
|
||||||
}
|
}
|
||||||
config = {**config, **config_ceph}
|
config = {**config, **config_ceph}
|
||||||
|
|
||||||
|
o_api = o_config["api"]
|
||||||
|
|
||||||
|
o_api_listen = o_api["listen"]
|
||||||
|
config_api_listen = {
|
||||||
|
"api_listen_address": o_api_listen["address"],
|
||||||
|
"api_listen_port": o_api_listen["port"],
|
||||||
|
}
|
||||||
|
config = {**config, **config_api_listen}
|
||||||
|
|
||||||
|
o_api_authentication = o_api["authentication"]
|
||||||
|
config_api_authentication = {
|
||||||
|
"api_auth_enabled": o_api_authentication.get("enabled", False),
|
||||||
|
"api_auth_secret_key": o_api_authentication.get("secret_key", ""),
|
||||||
|
"api_auth_source": o_api_authentication.get("source", "token"),
|
||||||
|
}
|
||||||
|
config = {**config, **config_api_authentication}
|
||||||
|
|
||||||
|
o_api_ssl = o_api["ssl"]
|
||||||
|
config_api_ssl = {
|
||||||
|
"api_ssl_enabled": o_api_ssl.get("enabled", False),
|
||||||
|
"api_ssl_cert_file": o_api_ssl.get("certificate", None),
|
||||||
|
"api_ssl_key_file": o_api_ssl.get("private_key", None),
|
||||||
|
}
|
||||||
|
config = {**config, **config_api_ssl}
|
||||||
|
|
||||||
|
# Use coordinators as storage hosts if not explicitly specified
|
||||||
|
if not config["storage_hosts"] or len(config["storage_hosts"]) < 1:
|
||||||
|
config["storage_hosts"] = config["coordinators"]
|
||||||
|
|
||||||
|
# Set up our token list if specified
|
||||||
|
if config["api_auth_source"] == "token":
|
||||||
|
config["api_auth_tokens"] = o_api["token"]
|
||||||
|
else:
|
||||||
|
if config["api_auth_enabled"]:
|
||||||
|
print(
|
||||||
|
"WARNING: No authentication method provided; disabling API authentication."
|
||||||
|
)
|
||||||
|
config["api_auth_enabled"] = False
|
||||||
|
|
||||||
# Add our node static data to the config
|
# Add our node static data to the config
|
||||||
config["static_data"] = get_static_data()
|
config["static_data"] = get_static_data()
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
#
|
#
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
import pvchealthd.util.config
|
|
||||||
import pvchealthd.util.zookeeper
|
import pvchealthd.util.zookeeper
|
||||||
|
|
||||||
import pvchealthd.objects.MonitoringInstance as MonitoringInstance
|
import pvchealthd.objects.MonitoringInstance as MonitoringInstance
|
||||||
import pvchealthd.objects.NodeInstance as NodeInstance
|
import pvchealthd.objects.NodeInstance as NodeInstance
|
||||||
|
|
||||||
|
import daemon_lib.config as cfg
|
||||||
import daemon_lib.log as log
|
import daemon_lib.log as log
|
||||||
|
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
@ -45,7 +45,7 @@ def entrypoint():
|
||||||
monitoring_instance = None
|
monitoring_instance = None
|
||||||
|
|
||||||
# Get our configuration
|
# Get our configuration
|
||||||
config = pvchealthd.util.config.get_configuration()
|
config = cfg.get_configuration()
|
||||||
config["daemon_name"] = "pvchealthd"
|
config["daemon_name"] = "pvchealthd"
|
||||||
config["daemon_version"] = version
|
config["daemon_version"] = version
|
||||||
|
|
||||||
|
|
|
@ -1,686 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# config.py - Utility functions for pvcnoded configuration parsing
|
|
||||||
# Part of the Parallel Virtual Cluster (PVC) system
|
|
||||||
#
|
|
||||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
|
||||||
#
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, version 3.
|
|
||||||
#
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
import daemon_lib.common as common
|
|
||||||
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from socket import gethostname
|
|
||||||
from re import findall
|
|
||||||
from psutil import cpu_count
|
|
||||||
from ipaddress import ip_address, ip_network
|
|
||||||
from json import loads
|
|
||||||
|
|
||||||
|
|
||||||
class MalformedConfigurationError(Exception):
|
|
||||||
"""
|
|
||||||
An except when parsing the PVC Node daemon configuration file
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, error=None):
|
|
||||||
self.msg = f"ERROR: Configuration file is malformed: {error}"
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return str(self.msg)
|
|
||||||
|
|
||||||
|
|
||||||
def get_static_data():
|
|
||||||
"""
|
|
||||||
Data that is obtained once at node startup for use later
|
|
||||||
"""
|
|
||||||
staticdata = list()
|
|
||||||
staticdata.append(str(cpu_count())) # CPU count
|
|
||||||
staticdata.append(
|
|
||||||
subprocess.run(["uname", "-r"], stdout=subprocess.PIPE)
|
|
||||||
.stdout.decode("ascii")
|
|
||||||
.strip()
|
|
||||||
)
|
|
||||||
staticdata.append(
|
|
||||||
subprocess.run(["uname", "-o"], stdout=subprocess.PIPE)
|
|
||||||
.stdout.decode("ascii")
|
|
||||||
.strip()
|
|
||||||
)
|
|
||||||
staticdata.append(
|
|
||||||
subprocess.run(["uname", "-m"], stdout=subprocess.PIPE)
|
|
||||||
.stdout.decode("ascii")
|
|
||||||
.strip()
|
|
||||||
)
|
|
||||||
|
|
||||||
return staticdata
|
|
||||||
|
|
||||||
|
|
||||||
def get_configuration_path():
|
|
||||||
config_file = None
|
|
||||||
try:
|
|
||||||
_config_file = "/etc/pvc/pvcnoded.yaml"
|
|
||||||
if not os.path.exists(_config_file):
|
|
||||||
raise
|
|
||||||
config_file = _config_file
|
|
||||||
config_type = "legacy"
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
_config_file = os.environ["PVC_CONFIG_FILE"]
|
|
||||||
if not os.path.exists(_config_file):
|
|
||||||
raise
|
|
||||||
config_file = _config_file
|
|
||||||
config_type = "current"
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if not config_file:
|
|
||||||
print('ERROR: The "PVC_CONFIG_FILE" environment variable must be set.')
|
|
||||||
os._exit(1)
|
|
||||||
|
|
||||||
return config_file, config_type
|
|
||||||
|
|
||||||
|
|
||||||
def get_hostname():
|
|
||||||
node_fqdn = gethostname()
|
|
||||||
node_hostname = node_fqdn.split(".", 1)[0]
|
|
||||||
node_domain = "".join(node_fqdn.split(".", 1)[1:])
|
|
||||||
try:
|
|
||||||
node_id = findall(r"\d+", node_hostname)[-1]
|
|
||||||
except IndexError:
|
|
||||||
node_id = 0
|
|
||||||
|
|
||||||
return node_fqdn, node_hostname, node_domain, node_id
|
|
||||||
|
|
||||||
|
|
||||||
def validate_floating_ip(config, network):
|
|
||||||
if network not in ["cluster", "storage", "upstream"]:
|
|
||||||
return False, f'Specified network type "{network}" is not valid'
|
|
||||||
|
|
||||||
floating_key = f"{network}_floating_ip"
|
|
||||||
network_key = f"{network}_network"
|
|
||||||
|
|
||||||
# Verify the network provided is valid
|
|
||||||
try:
|
|
||||||
network = ip_network(config[network_key])
|
|
||||||
except Exception:
|
|
||||||
return (
|
|
||||||
False,
|
|
||||||
f"Network address {config[network_key]} for {network_key} is not valid",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Verify that the floating IP is valid (and in the network)
|
|
||||||
try:
|
|
||||||
floating_address = ip_address(config[floating_key].split("/")[0])
|
|
||||||
if floating_address not in list(network.hosts()):
|
|
||||||
raise
|
|
||||||
except Exception:
|
|
||||||
return (
|
|
||||||
False,
|
|
||||||
f"Floating address {config[floating_key]} for {floating_key} is not valid",
|
|
||||||
)
|
|
||||||
|
|
||||||
return True, ""
|
|
||||||
|
|
||||||
|
|
||||||
def get_configuration_current(config_file):
|
|
||||||
print('Loading configuration from file "{}"'.format(config_file))
|
|
||||||
|
|
||||||
with open(config_file, "r") as cfgfh:
|
|
||||||
try:
|
|
||||||
o_config = yaml.load(cfgfh, Loader=yaml.SafeLoader)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"ERROR: Failed to parse configuration file: {e}")
|
|
||||||
os._exit(1)
|
|
||||||
|
|
||||||
config = dict()
|
|
||||||
|
|
||||||
node_fqdn, node_hostname, node_domain, node_id = get_hostname()
|
|
||||||
|
|
||||||
config_thisnode = {
|
|
||||||
"node": node_hostname,
|
|
||||||
"node_hostname": node_hostname,
|
|
||||||
"node_fqdn": node_fqdn,
|
|
||||||
"node_domain": node_domain,
|
|
||||||
"node_id": node_id,
|
|
||||||
}
|
|
||||||
config = {**config, **config_thisnode}
|
|
||||||
|
|
||||||
try:
|
|
||||||
o_path = o_config["path"]
|
|
||||||
config_path = {
|
|
||||||
"plugin_directory": o_path.get(
|
|
||||||
"plugin_directory", "/usr/share/pvc/plugins"
|
|
||||||
),
|
|
||||||
"dynamic_directory": o_path["dynamic_directory"],
|
|
||||||
"log_directory": o_path["system_log_directory"],
|
|
||||||
"console_log_directory": o_path["console_log_directory"],
|
|
||||||
"ceph_directory": o_path["ceph_directory"],
|
|
||||||
}
|
|
||||||
# Define our dynamic directory schema
|
|
||||||
config_path["dnsmasq_dynamic_directory"] = (
|
|
||||||
config_path["dynamic_directory"] + "/dnsmasq"
|
|
||||||
)
|
|
||||||
config_path["pdns_dynamic_directory"] = (
|
|
||||||
config_path["dynamic_directory"] + "/pdns"
|
|
||||||
)
|
|
||||||
config_path["nft_dynamic_directory"] = config_path["dynamic_directory"] + "/nft"
|
|
||||||
# Define our log directory schema
|
|
||||||
config_path["dnsmasq_log_directory"] = config_path["log_directory"] + "/dnsmasq"
|
|
||||||
config_path["pdns_log_directory"] = config_path["log_directory"] + "/pdns"
|
|
||||||
config_path["nft_log_directory"] = config_path["log_directory"] + "/nft"
|
|
||||||
config = {**config, **config_path}
|
|
||||||
|
|
||||||
o_subsystem = o_config["subsystem"]
|
|
||||||
config_subsystem = {
|
|
||||||
"enable_hypervisor": o_subsystem.get("enable_hypervisor", True),
|
|
||||||
"enable_networking": o_subsystem.get("enable_networking", True),
|
|
||||||
"enable_storage": o_subsystem.get("enable_storage", True),
|
|
||||||
"enable_worker": o_subsystem.get("enable_worker", True),
|
|
||||||
"enable_api": o_subsystem.get("enable_api", True),
|
|
||||||
}
|
|
||||||
config = {**config, **config_subsystem}
|
|
||||||
|
|
||||||
o_cluster = o_config["cluster"]
|
|
||||||
config_cluster = {
|
|
||||||
"cluster_name": o_cluster["name"],
|
|
||||||
"all_nodes": o_cluster["all_nodes"],
|
|
||||||
"coordinators": o_cluster["coordinator_nodes"],
|
|
||||||
}
|
|
||||||
config = {**config, **config_cluster}
|
|
||||||
|
|
||||||
o_cluster_networks = o_cluster["networks"]
|
|
||||||
for network_type in ["cluster", "storage", "upstream"]:
|
|
||||||
o_cluster_networks_specific = o_cluster_networks[network_type]
|
|
||||||
config_cluster_networks_specific = {
|
|
||||||
f"{network_type}_domain": o_cluster_networks_specific["domain"],
|
|
||||||
f"{network_type}_dev": o_cluster_networks_specific["device"],
|
|
||||||
f"{network_type}_mtu": o_cluster_networks_specific["mtu"],
|
|
||||||
f"{network_type}_network": o_cluster_networks_specific["ipv4"][
|
|
||||||
"network_address"
|
|
||||||
]
|
|
||||||
+ "/"
|
|
||||||
+ str(o_cluster_networks_specific["ipv4"]["netmask"]),
|
|
||||||
f"{network_type}_floating_ip": o_cluster_networks_specific["ipv4"][
|
|
||||||
"floating_address"
|
|
||||||
]
|
|
||||||
+ "/"
|
|
||||||
+ str(o_cluster_networks_specific["ipv4"]["netmask"]),
|
|
||||||
f"{network_type}_node_ip_selection": o_cluster_networks_specific[
|
|
||||||
"node_ip_selection"
|
|
||||||
],
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
|
||||||
o_cluster_networks_specific["ipv4"].get("gateway_address", None)
|
|
||||||
is not None
|
|
||||||
):
|
|
||||||
config[f"{network_type}_gateway"] = o_cluster_networks_specific["ipv4"][
|
|
||||||
"gateway_address"
|
|
||||||
]
|
|
||||||
|
|
||||||
result, msg = validate_floating_ip(
|
|
||||||
config_cluster_networks_specific, network_type
|
|
||||||
)
|
|
||||||
if not result:
|
|
||||||
raise MalformedConfigurationError(msg)
|
|
||||||
|
|
||||||
network = ip_network(
|
|
||||||
config_cluster_networks_specific[f"{network_type}_network"]
|
|
||||||
)
|
|
||||||
|
|
||||||
if (
|
|
||||||
config_cluster_networks_specific[f"{network_type}_node_ip_selection"]
|
|
||||||
== "by-id"
|
|
||||||
):
|
|
||||||
address_id = int(node_id) - 1
|
|
||||||
else:
|
|
||||||
# This roundabout solution ensures the given IP is in the subnet and is something valid
|
|
||||||
address_id = [
|
|
||||||
idx
|
|
||||||
for idx, ip in enumerate(list(network.hosts()))
|
|
||||||
if str(ip)
|
|
||||||
== config_cluster_networks_specific[
|
|
||||||
f"{network_type}_node_ip_selection"
|
|
||||||
]
|
|
||||||
][0]
|
|
||||||
|
|
||||||
config_cluster_networks_specific[
|
|
||||||
f"{network_type}_dev_ip"
|
|
||||||
] = f"{list(network.hosts())[address_id]}/{network.prefixlen}"
|
|
||||||
|
|
||||||
config = {**config, **config_cluster_networks_specific}
|
|
||||||
|
|
||||||
o_database = o_config["database"]
|
|
||||||
config_database = {
|
|
||||||
"zookeeper_port": o_database["zookeeper"]["port"],
|
|
||||||
"keydb_port": o_database["keydb"]["port"],
|
|
||||||
"keydb_host": o_database["keydb"]["hostname"],
|
|
||||||
"keydb_path": o_database["keydb"]["path"],
|
|
||||||
"metadata_postgresql_port": o_database["postgres"]["port"],
|
|
||||||
"metadata_postgresql_host": o_database["postgres"]["hostname"],
|
|
||||||
"metadata_postgresql_dbname": o_database["postgres"]["credentials"]["api"][
|
|
||||||
"database"
|
|
||||||
],
|
|
||||||
"metadata_postgresql_user": o_database["postgres"]["credentials"]["api"][
|
|
||||||
"username"
|
|
||||||
],
|
|
||||||
"metadata_postgresql_password": o_database["postgres"]["credentials"][
|
|
||||||
"api"
|
|
||||||
]["password"],
|
|
||||||
"pdns_postgresql_port": o_database["postgres"]["port"],
|
|
||||||
"pdns_postgresql_host": o_database["postgres"]["hostname"],
|
|
||||||
"pdns_postgresql_dbname": o_database["postgres"]["credentials"]["dns"][
|
|
||||||
"database"
|
|
||||||
],
|
|
||||||
"pdns_postgresql_user": o_database["postgres"]["credentials"]["dns"][
|
|
||||||
"username"
|
|
||||||
],
|
|
||||||
"pdns_postgresql_password": o_database["postgres"]["credentials"]["dns"][
|
|
||||||
"password"
|
|
||||||
],
|
|
||||||
}
|
|
||||||
config = {**config, **config_database}
|
|
||||||
|
|
||||||
o_timer = o_config["timer"]
|
|
||||||
config_timer = {
|
|
||||||
"vm_shutdown_timeout": int(o_timer.get("vm_shutdown_timeout", 180)),
|
|
||||||
"keepalive_interval": int(o_timer.get("keepalive_interval", 5)),
|
|
||||||
"monitoring_interval": int(o_timer.get("monitoring_interval", 60)),
|
|
||||||
}
|
|
||||||
config = {**config, **config_timer}
|
|
||||||
|
|
||||||
o_fencing = o_config["fencing"]
|
|
||||||
config_fencing = {
|
|
||||||
"disable_on_ipmi_failure": o_fencing["disable_on_ipmi_failure"],
|
|
||||||
"fence_intervals": int(o_fencing["intervals"].get("fence_intervals", 6)),
|
|
||||||
"suicide_intervals": int(o_fencing["intervals"].get("suicide_interval", 0)),
|
|
||||||
"successful_fence": o_fencing["actions"].get("successful_fence", None),
|
|
||||||
"failed_fence": o_fencing["actions"].get("failed_fence", None),
|
|
||||||
"ipmi_hostname": o_fencing["ipmi"]["hostname"].format(node_id=node_id),
|
|
||||||
"ipmi_username": o_fencing["ipmi"]["username"],
|
|
||||||
"ipmi_password": o_fencing["ipmi"]["password"],
|
|
||||||
}
|
|
||||||
config = {**config, **config_fencing}
|
|
||||||
|
|
||||||
o_migration = o_config["migration"]
|
|
||||||
config_migration = {
|
|
||||||
"migration_target_selector": o_migration.get("target_selector", "mem"),
|
|
||||||
}
|
|
||||||
config = {**config, **config_migration}
|
|
||||||
|
|
||||||
o_logging = o_config["logging"]
|
|
||||||
config_logging = {
|
|
||||||
"debug": o_logging.get("debug_logging", False),
|
|
||||||
"file_logging": o_logging.get("file_logging", False),
|
|
||||||
"stdout_logging": o_logging.get("stdout_logging", False),
|
|
||||||
"zookeeper_logging": o_logging.get("zookeeper_logging", False),
|
|
||||||
"log_colours": o_logging.get("log_colours", False),
|
|
||||||
"log_dates": o_logging.get("log_dates", False),
|
|
||||||
"log_keepalives": o_logging.get("log_keepalives", False),
|
|
||||||
"log_monitoring_details": o_logging.get("log_monitoring_details", False),
|
|
||||||
"console_log_lines": o_logging.get("console_log_lines", False),
|
|
||||||
"node_log_lines": o_logging.get("node_log_lines", False),
|
|
||||||
}
|
|
||||||
config = {**config, **config_logging}
|
|
||||||
|
|
||||||
o_guest_networking = o_config["guest_networking"]
|
|
||||||
config_guest_networking = {
|
|
||||||
"bridge_dev": o_guest_networking["bridge_device"],
|
|
||||||
"bridge_mtu": o_guest_networking["bridge_mtu"],
|
|
||||||
"enable_sriov": o_guest_networking.get("sriov_enable", False),
|
|
||||||
"sriov_device": o_guest_networking.get("sriov_device", list()),
|
|
||||||
}
|
|
||||||
config = {**config, **config_guest_networking}
|
|
||||||
|
|
||||||
o_ceph = o_config["ceph"]
|
|
||||||
config_ceph = {
|
|
||||||
"ceph_config_file": config["ceph_directory"]
|
|
||||||
+ "/"
|
|
||||||
+ o_ceph["ceph_config_file"],
|
|
||||||
"ceph_admin_keyring": config["ceph_directory"]
|
|
||||||
+ "/"
|
|
||||||
+ o_ceph["ceph_keyring_file"],
|
|
||||||
"ceph_monitor_port": o_ceph["monitor_port"],
|
|
||||||
"ceph_secret_uuid": o_ceph["secret_uuid"],
|
|
||||||
}
|
|
||||||
config = {**config, **config_ceph}
|
|
||||||
|
|
||||||
# Add our node static data to the config
|
|
||||||
config["static_data"] = get_static_data()
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def get_configuration_legacy(pvcnoded_config_file):
|
|
||||||
print('Loading configuration from file "{}"'.format(pvcnoded_config_file))
|
|
||||||
|
|
||||||
with open(pvcnoded_config_file, "r") as cfgfile:
|
|
||||||
try:
|
|
||||||
o_config = yaml.load(cfgfile, Loader=yaml.SafeLoader)
|
|
||||||
except Exception as e:
|
|
||||||
print("ERROR: Failed to parse configuration file: {}".format(e))
|
|
||||||
os._exit(1)
|
|
||||||
|
|
||||||
node_fqdn, node_hostname, node_domain, node_id = get_hostname()
|
|
||||||
|
|
||||||
# Create the configuration dictionary
|
|
||||||
config = dict()
|
|
||||||
|
|
||||||
# Get the initial base configuration
|
|
||||||
try:
|
|
||||||
o_base = o_config["pvc"]
|
|
||||||
o_cluster = o_config["pvc"]["cluster"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_general = {
|
|
||||||
"node": o_base.get("node", node_hostname),
|
|
||||||
"node_hostname": node_hostname,
|
|
||||||
"node_fqdn": node_fqdn,
|
|
||||||
"node_domain": node_domain,
|
|
||||||
"node_id": node_id,
|
|
||||||
"coordinators": o_cluster.get("coordinators", list()),
|
|
||||||
"debug": o_base.get("debug", False),
|
|
||||||
}
|
|
||||||
|
|
||||||
config = {**config, **config_general}
|
|
||||||
|
|
||||||
# Get the functions configuration
|
|
||||||
try:
|
|
||||||
o_functions = o_config["pvc"]["functions"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_functions = {
|
|
||||||
"enable_hypervisor": o_functions.get("enable_hypervisor", False),
|
|
||||||
"enable_networking": o_functions.get("enable_networking", False),
|
|
||||||
"enable_storage": o_functions.get("enable_storage", False),
|
|
||||||
"enable_worker": o_functions.get("enable_worker", True),
|
|
||||||
"enable_api": o_functions.get("enable_api", False),
|
|
||||||
}
|
|
||||||
|
|
||||||
config = {**config, **config_functions}
|
|
||||||
|
|
||||||
# Get the directory configuration
|
|
||||||
try:
|
|
||||||
o_directories = o_config["pvc"]["system"]["configuration"]["directories"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_directories = {
|
|
||||||
"plugin_directory": o_directories.get(
|
|
||||||
"plugin_directory", "/usr/share/pvc/plugins"
|
|
||||||
),
|
|
||||||
"dynamic_directory": o_directories.get("dynamic_directory", None),
|
|
||||||
"log_directory": o_directories.get("log_directory", None),
|
|
||||||
"console_log_directory": o_directories.get("console_log_directory", None),
|
|
||||||
}
|
|
||||||
|
|
||||||
# Define our dynamic directory schema
|
|
||||||
config_directories["dnsmasq_dynamic_directory"] = (
|
|
||||||
config_directories["dynamic_directory"] + "/dnsmasq"
|
|
||||||
)
|
|
||||||
config_directories["pdns_dynamic_directory"] = (
|
|
||||||
config_directories["dynamic_directory"] + "/pdns"
|
|
||||||
)
|
|
||||||
config_directories["nft_dynamic_directory"] = (
|
|
||||||
config_directories["dynamic_directory"] + "/nft"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Define our log directory schema
|
|
||||||
config_directories["dnsmasq_log_directory"] = (
|
|
||||||
config_directories["log_directory"] + "/dnsmasq"
|
|
||||||
)
|
|
||||||
config_directories["pdns_log_directory"] = (
|
|
||||||
config_directories["log_directory"] + "/pdns"
|
|
||||||
)
|
|
||||||
config_directories["nft_log_directory"] = (
|
|
||||||
config_directories["log_directory"] + "/nft"
|
|
||||||
)
|
|
||||||
|
|
||||||
config = {**config, **config_directories}
|
|
||||||
|
|
||||||
# Get the logging configuration
|
|
||||||
try:
|
|
||||||
o_logging = o_config["pvc"]["system"]["configuration"]["logging"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_logging = {
|
|
||||||
"file_logging": o_logging.get("file_logging", False),
|
|
||||||
"stdout_logging": o_logging.get("stdout_logging", False),
|
|
||||||
"zookeeper_logging": o_logging.get("zookeeper_logging", False),
|
|
||||||
"log_colours": o_logging.get("log_colours", False),
|
|
||||||
"log_dates": o_logging.get("log_dates", False),
|
|
||||||
"log_keepalives": o_logging.get("log_keepalives", False),
|
|
||||||
"log_monitoring_details": o_logging.get("log_keepalive_plugin_details", False),
|
|
||||||
"console_log_lines": o_logging.get("console_log_lines", False),
|
|
||||||
"node_log_lines": o_logging.get("node_log_lines", False),
|
|
||||||
}
|
|
||||||
|
|
||||||
config = {**config, **config_logging}
|
|
||||||
|
|
||||||
# Get the interval configuration
|
|
||||||
try:
|
|
||||||
o_intervals = o_config["pvc"]["system"]["intervals"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_intervals = {
|
|
||||||
"vm_shutdown_timeout": int(o_intervals.get("vm_shutdown_timeout", 60)),
|
|
||||||
"keepalive_interval": int(o_intervals.get("keepalive_interval", 5)),
|
|
||||||
"monitoring_interval": int(o_intervals.get("monitoring_interval", 60)),
|
|
||||||
"fence_intervals": int(o_intervals.get("fence_intervals", 6)),
|
|
||||||
"suicide_intervals": int(o_intervals.get("suicide_interval", 0)),
|
|
||||||
}
|
|
||||||
|
|
||||||
config = {**config, **config_intervals}
|
|
||||||
|
|
||||||
# Get the fencing configuration
|
|
||||||
try:
|
|
||||||
o_fencing = o_config["pvc"]["system"]["fencing"]
|
|
||||||
o_fencing_actions = o_fencing["actions"]
|
|
||||||
o_fencing_ipmi = o_fencing["ipmi"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_fencing = {
|
|
||||||
"successful_fence": o_fencing_actions.get("successful_fence", None),
|
|
||||||
"failed_fence": o_fencing_actions.get("failed_fence", None),
|
|
||||||
"ipmi_hostname": o_fencing_ipmi.get(
|
|
||||||
"host", f"{node_hostname}-lom.{node_domain}"
|
|
||||||
),
|
|
||||||
"ipmi_username": o_fencing_ipmi.get("user", "null"),
|
|
||||||
"ipmi_password": o_fencing_ipmi.get("pass", "null"),
|
|
||||||
}
|
|
||||||
|
|
||||||
config = {**config, **config_fencing}
|
|
||||||
|
|
||||||
# Get the migration configuration
|
|
||||||
try:
|
|
||||||
o_migration = o_config["pvc"]["system"]["migration"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_migration = {
|
|
||||||
"migration_target_selector": o_migration.get("target_selector", "mem"),
|
|
||||||
}
|
|
||||||
|
|
||||||
config = {**config, **config_migration}
|
|
||||||
|
|
||||||
if config["enable_networking"]:
|
|
||||||
# Get the node networks configuration
|
|
||||||
try:
|
|
||||||
o_networks = o_config["pvc"]["cluster"]["networks"]
|
|
||||||
o_network_cluster = o_networks["cluster"]
|
|
||||||
o_network_storage = o_networks["storage"]
|
|
||||||
o_network_upstream = o_networks["upstream"]
|
|
||||||
o_sysnetworks = o_config["pvc"]["system"]["configuration"]["networking"]
|
|
||||||
o_sysnetwork_cluster = o_sysnetworks["cluster"]
|
|
||||||
o_sysnetwork_storage = o_sysnetworks["storage"]
|
|
||||||
o_sysnetwork_upstream = o_sysnetworks["upstream"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_networks = {
|
|
||||||
"cluster_domain": o_network_cluster.get("domain", None),
|
|
||||||
"cluster_network": o_network_cluster.get("network", None),
|
|
||||||
"cluster_floating_ip": o_network_cluster.get("floating_ip", None),
|
|
||||||
"cluster_dev": o_sysnetwork_cluster.get("device", None),
|
|
||||||
"cluster_mtu": o_sysnetwork_cluster.get("mtu", None),
|
|
||||||
"cluster_dev_ip": o_sysnetwork_cluster.get("address", None),
|
|
||||||
"storage_domain": o_network_storage.get("domain", None),
|
|
||||||
"storage_network": o_network_storage.get("network", None),
|
|
||||||
"storage_floating_ip": o_network_storage.get("floating_ip", None),
|
|
||||||
"storage_dev": o_sysnetwork_storage.get("device", None),
|
|
||||||
"storage_mtu": o_sysnetwork_storage.get("mtu", None),
|
|
||||||
"storage_dev_ip": o_sysnetwork_storage.get("address", None),
|
|
||||||
"upstream_domain": o_network_upstream.get("domain", None),
|
|
||||||
"upstream_network": o_network_upstream.get("network", None),
|
|
||||||
"upstream_floating_ip": o_network_upstream.get("floating_ip", None),
|
|
||||||
"upstream_gateway": o_network_upstream.get("gateway", None),
|
|
||||||
"upstream_dev": o_sysnetwork_upstream.get("device", None),
|
|
||||||
"upstream_mtu": o_sysnetwork_upstream.get("mtu", None),
|
|
||||||
"upstream_dev_ip": o_sysnetwork_upstream.get("address", None),
|
|
||||||
"bridge_dev": o_sysnetworks.get("bridge_device", None),
|
|
||||||
"bridge_mtu": o_sysnetworks.get("bridge_mtu", None),
|
|
||||||
"enable_sriov": o_sysnetworks.get("sriov_enable", False),
|
|
||||||
"sriov_device": o_sysnetworks.get("sriov_device", list()),
|
|
||||||
}
|
|
||||||
|
|
||||||
if config_networks["bridge_mtu"] is None:
|
|
||||||
# Read the current MTU of bridge_dev and set bridge_mtu to it; avoids weird resets
|
|
||||||
retcode, stdout, stderr = common.run_os_command(
|
|
||||||
f"ip -json link show dev {config_networks['bridge_dev']}"
|
|
||||||
)
|
|
||||||
current_bridge_mtu = loads(stdout)[0]["mtu"]
|
|
||||||
print(
|
|
||||||
f"Config key bridge_mtu not explicitly set; using live MTU {current_bridge_mtu} from {config_networks['bridge_dev']}"
|
|
||||||
)
|
|
||||||
config_networks["bridge_mtu"] = current_bridge_mtu
|
|
||||||
|
|
||||||
config = {**config, **config_networks}
|
|
||||||
|
|
||||||
for network_type in ["cluster", "storage", "upstream"]:
|
|
||||||
result, msg = validate_floating_ip(config, network_type)
|
|
||||||
if not result:
|
|
||||||
raise MalformedConfigurationError(msg)
|
|
||||||
|
|
||||||
address_key = "{}_dev_ip".format(network_type)
|
|
||||||
network_key = f"{network_type}_network"
|
|
||||||
network = ip_network(config[network_key])
|
|
||||||
# With autoselection of addresses, construct an IP from the relevant network
|
|
||||||
if config[address_key] == "by-id":
|
|
||||||
# The NodeID starts at 1, but indexes start at 0
|
|
||||||
address_id = int(config["node_id"]) - 1
|
|
||||||
# Grab the nth address from the network
|
|
||||||
config[address_key] = "{}/{}".format(
|
|
||||||
list(network.hosts())[address_id], network.prefixlen
|
|
||||||
)
|
|
||||||
# Validate the provided IP instead
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
address = ip_address(config[address_key].split("/")[0])
|
|
||||||
if address not in list(network.hosts()):
|
|
||||||
raise
|
|
||||||
except Exception:
|
|
||||||
raise MalformedConfigurationError(
|
|
||||||
f"IP address {config[address_key]} for {address_key} is not valid"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get the PowerDNS aggregator database configuration
|
|
||||||
try:
|
|
||||||
o_pdnsdb = o_config["pvc"]["coordinator"]["dns"]["database"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_pdnsdb = {
|
|
||||||
"pdns_postgresql_host": o_pdnsdb.get("host", None),
|
|
||||||
"pdns_postgresql_port": o_pdnsdb.get("port", None),
|
|
||||||
"pdns_postgresql_dbname": o_pdnsdb.get("name", None),
|
|
||||||
"pdns_postgresql_user": o_pdnsdb.get("user", None),
|
|
||||||
"pdns_postgresql_password": o_pdnsdb.get("pass", None),
|
|
||||||
}
|
|
||||||
|
|
||||||
config = {**config, **config_pdnsdb}
|
|
||||||
|
|
||||||
# Get the Cloud-Init Metadata database configuration
|
|
||||||
try:
|
|
||||||
o_metadatadb = o_config["pvc"]["coordinator"]["metadata"]["database"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_metadatadb = {
|
|
||||||
"metadata_postgresql_host": o_metadatadb.get("host", None),
|
|
||||||
"metadata_postgresql_port": o_metadatadb.get("port", None),
|
|
||||||
"metadata_postgresql_dbname": o_metadatadb.get("name", None),
|
|
||||||
"metadata_postgresql_user": o_metadatadb.get("user", None),
|
|
||||||
"metadata_postgresql_password": o_metadatadb.get("pass", None),
|
|
||||||
}
|
|
||||||
|
|
||||||
config = {**config, **config_metadatadb}
|
|
||||||
|
|
||||||
if config["enable_storage"]:
|
|
||||||
# Get the storage configuration
|
|
||||||
try:
|
|
||||||
o_storage = o_config["pvc"]["system"]["configuration"]["storage"]
|
|
||||||
except Exception as e:
|
|
||||||
raise MalformedConfigurationError(e)
|
|
||||||
|
|
||||||
config_storage = {
|
|
||||||
"ceph_config_file": o_storage.get("ceph_config_file", None),
|
|
||||||
"ceph_admin_keyring": o_storage.get("ceph_admin_keyring", None),
|
|
||||||
}
|
|
||||||
|
|
||||||
config = {**config, **config_storage}
|
|
||||||
|
|
||||||
# Add our node static data to the config
|
|
||||||
config["static_data"] = get_static_data()
|
|
||||||
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def get_configuration():
|
|
||||||
"""
|
|
||||||
Parse the configuration of the node daemon.
|
|
||||||
"""
|
|
||||||
pvc_config_file, pvc_config_type = get_configuration_path()
|
|
||||||
|
|
||||||
if pvc_config_type == "legacy":
|
|
||||||
config = get_configuration_legacy(pvc_config_file)
|
|
||||||
else:
|
|
||||||
config = get_configuration_current(pvc_config_file)
|
|
||||||
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def validate_directories(config):
|
|
||||||
if not os.path.exists(config["dynamic_directory"]):
|
|
||||||
os.makedirs(config["dynamic_directory"])
|
|
||||||
os.makedirs(config["dnsmasq_dynamic_directory"])
|
|
||||||
os.makedirs(config["pdns_dynamic_directory"])
|
|
||||||
os.makedirs(config["nft_dynamic_directory"])
|
|
||||||
|
|
||||||
if not os.path.exists(config["log_directory"]):
|
|
||||||
os.makedirs(config["log_directory"])
|
|
||||||
os.makedirs(config["dnsmasq_log_directory"])
|
|
||||||
os.makedirs(config["pdns_log_directory"])
|
|
||||||
os.makedirs(config["nft_log_directory"])
|
|
|
@ -20,7 +20,6 @@
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
import pvcnoded.util.keepalive
|
import pvcnoded.util.keepalive
|
||||||
import pvcnoded.util.config
|
|
||||||
import pvcnoded.util.fencing
|
import pvcnoded.util.fencing
|
||||||
import pvcnoded.util.networking
|
import pvcnoded.util.networking
|
||||||
import pvcnoded.util.services
|
import pvcnoded.util.services
|
||||||
|
@ -35,6 +34,7 @@ import pvcnoded.objects.VXNetworkInstance as VXNetworkInstance
|
||||||
import pvcnoded.objects.SRIOVVFInstance as SRIOVVFInstance
|
import pvcnoded.objects.SRIOVVFInstance as SRIOVVFInstance
|
||||||
import pvcnoded.objects.CephInstance as CephInstance
|
import pvcnoded.objects.CephInstance as CephInstance
|
||||||
|
|
||||||
|
import daemon_lib.config as cfg
|
||||||
import daemon_lib.log as log
|
import daemon_lib.log as log
|
||||||
import daemon_lib.common as common
|
import daemon_lib.common as common
|
||||||
|
|
||||||
|
@ -60,12 +60,12 @@ def entrypoint():
|
||||||
keepalive_timer = None
|
keepalive_timer = None
|
||||||
|
|
||||||
# Get our configuration
|
# Get our configuration
|
||||||
config = pvcnoded.util.config.get_configuration()
|
config = cfg.get_configuration()
|
||||||
config["daemon_name"] = "pvcnoded"
|
config["daemon_name"] = "pvcnoded"
|
||||||
config["daemon_version"] = version
|
config["daemon_version"] = version
|
||||||
|
|
||||||
# Create and validate our directories
|
# Create and validate our directories
|
||||||
pvcnoded.util.config.validate_directories(config)
|
cfg.validate_directories(config)
|
||||||
|
|
||||||
# Set up the logger instance
|
# Set up the logger instance
|
||||||
logger = log.Logger(config)
|
logger = log.Logger(config)
|
||||||
|
|
|
@ -131,11 +131,11 @@ class MetadataAPIInstance(object):
|
||||||
# Helper functions
|
# Helper functions
|
||||||
def open_database(self):
|
def open_database(self):
|
||||||
conn = psycopg2.connect(
|
conn = psycopg2.connect(
|
||||||
host=self.config["metadata_postgresql_host"],
|
host=self.config["api_postgresql_host"],
|
||||||
port=self.config["metadata_postgresql_port"],
|
port=self.config["api_postgresql_port"],
|
||||||
dbname=self.config["metadata_postgresql_dbname"],
|
dbname=self.config["api_postgresql_dbname"],
|
||||||
user=self.config["metadata_postgresql_user"],
|
user=self.config["api_postgresql_user"],
|
||||||
password=self.config["metadata_postgresql_password"],
|
password=self.config["api_postgresql_password"],
|
||||||
)
|
)
|
||||||
cur = conn.cursor(cursor_factory=RealDictCursor)
|
cur = conn.cursor(cursor_factory=RealDictCursor)
|
||||||
return conn, cur
|
return conn, cur
|
||||||
|
|
Loading…
Reference in New Issue