Compare commits

...

4 Commits

Author SHA1 Message Date
2c15036f86 Add KeyDB to node startup services
Also ensure API worker starts on all nodes, not just coordinators.
2023-11-05 19:26:38 -05:00
42ed6f6420 Remove redis as a dependency 2023-11-05 18:23:34 -05:00
3dc1f57de2 Revert "Switch to ZK+PG over Redis for Celery queue"
This reverts commit 54215bab6c.
2023-11-05 17:10:46 -05:00
b99b4e64b2 Ensure store path is passed properly 2023-11-05 16:48:47 -05:00
6 changed files with 34 additions and 28 deletions

View File

@ -45,8 +45,8 @@ pvc:
provisioner:
# database: Backend database configuration
database:
# host: PostgreSQL hostname, the primary coordinator floating IP in the cluster network
host: pvchvP
# host: PostgreSQL hostname, usually 'localhost'
host: localhost
# port: PostgreSQL port, invariably '5432'
port: 5432
# name: PostgreSQL database name, invariably 'pvcapi'
@ -55,6 +55,14 @@ pvc:
user: pvcapi
# pass: PostgreSQL user password, randomly generated
pass: pvcapi
# queue: Celery backend queue using the PVC Zookeeper cluster
queue:
# host: Redis hostname, usually 'localhost'
host: localhost
# port: Redis port, invariably '6279'
port: 6379
# path: Redis queue path, invariably '/0'
path: /0
# ceph_cluster: Information about the Ceph storage cluster
ceph_cluster:
# storage_hosts: The list of hosts that the Ceph monitors are valid on; if empty (the default),

View File

@ -90,6 +90,9 @@ try:
"database_name": o_config["pvc"]["provisioner"]["database"]["name"],
"database_user": o_config["pvc"]["provisioner"]["database"]["user"],
"database_password": o_config["pvc"]["provisioner"]["database"]["pass"],
"queue_host": o_config["pvc"]["provisioner"]["queue"]["host"],
"queue_port": o_config["pvc"]["provisioner"]["queue"]["port"],
"queue_path": o_config["pvc"]["provisioner"]["queue"]["path"],
"storage_hosts": o_config["pvc"]["provisioner"]["ceph_cluster"][
"storage_hosts"
],

View File

@ -43,23 +43,11 @@ from flask_sqlalchemy import SQLAlchemy
# Create Flask app and set config values
app = flask.Flask(__name__)
# Set up Celery queue
# Default Zookeeper port; not configurable
queue_port = 2181
# Default Zookeeper path, not configurable
queue_path = "/apibroker"
# Join the coordinator hostnames with the queue port, semicolon separated for Celery
queue_hostport_pairs = ";".join([f"{h}:{queue_port}" for h in config["coordinators"]])
app.config["CELERY_BROKER_URL"] = "zookeeper://{}{}".format(
queue_hostport_pairs, queue_path
app.config["CELERY_BROKER_URL"] = "redis://{}:{}{}".format(
config["queue_host"], config["queue_port"], config["queue_path"]
)
app.config["CELERY_RESULT_BACKEND"] = "db+postgresql://{}:{}@{}:{}/{}".format(
config["database_user"],
config["database_password"],
config["database_host"],
config["database_port"],
config["database_name"],
app.config["CELERY_RESULT_BACKEND"] = "redis://{}:{}{}".format(
config["queue_host"], config["queue_port"], config["queue_path"]
)
# Set up Celery queues
@ -111,13 +99,11 @@ app.config["SQLALCHEMY_DATABASE_URI"] = "postgresql://{}:{}@{}:{}/{}".format(
config["database_name"],
)
# Set up debugging
if config["debug"]:
app.config["DEBUG"] = True
else:
app.config["DEBUG"] = False
# Set up authentication
if config["auth_enabled"]:
app.config["SECRET_KEY"] = config["auth_secret_key"]

View File

@ -5664,7 +5664,7 @@ def cli_connection_add(
scheme = "https" if ssl_flag else "http"
# Get the store data
connections_config = get_store(store_path)
connections_config = get_store(CLI_CONFIG["store_path"])
# Add (or update) the new connection details
connections_config[name] = {
@ -5676,7 +5676,7 @@ def cli_connection_add(
}
# Update the store data
update_store(store_path, connections_config)
update_store(CLI_CONFIG["store_path"], connections_config)
finish(
True,
@ -5700,7 +5700,7 @@ def cli_connection_remove(
"""
# Get the store data
connections_config = get_store(store_path)
connections_config = get_store(CLI_CONFIG["store_path"])
# Remove the entry matching the name
try:
@ -5709,7 +5709,7 @@ def cli_connection_remove(
finish(False, f"""No connection found with name "{name}" in local database""")
# Update the store data
update_store(store_path, connections_config)
update_store(CLI_CONFIG["store_path"], connections_config)
finish(True, f"""Removed connection "{name}" from client database""")
@ -5752,7 +5752,7 @@ def cli_connection_list(
"json-pretty": Output in formatted JSON.
"""
connections_config = get_store(store_path)
connections_config = get_store(CLI_CONFIG["store_path"])
connections_data = cli_connection_list_parser(connections_config, show_keys_flag)
finish(True, connections_data, format_function)
@ -5790,7 +5790,7 @@ def cli_connection_detail(
newline=False,
stderr=True,
)
connections_config = get_store(store_path)
connections_config = get_store(CLI_CONFIG["store_path"])
connections_data = cli_connection_detail_parser(connections_config)
echo(CLI_CONFIG, "done.", stderr=True)
echo(CLI_CONFIG, "", stderr=True)
@ -5930,6 +5930,7 @@ def cli(
CLI_CONFIG["colour"] = _colour
CLI_CONFIG["quiet"] = _quiet
CLI_CONFIG["silent"] = _silent
CLI_CONFIG["store_path"] = store_path
audit()

2
debian/control vendored
View File

@ -16,7 +16,7 @@ Description: Parallel Virtual Cluster node daemon (Python 3)
Package: pvc-daemon-api
Architecture: all
Depends: systemd, pvc-daemon-common, python3-yaml, python3-flask, python3-flask-restful, python3-celery, python-celery-common, python3-distutils, python3-lxml, python3-flask-migrate, fio
Depends: systemd, pvc-daemon-common, python3-yaml, python3-flask, python3-flask-restful, python3-celery, python-celery-common, python3-distutils, python3-redis, python3-lxml, python3-flask-migrate, fio
Description: Parallel Virtual Cluster API daemon (Python 3)
A KVM/Zookeeper/Ceph-based VM and private cloud manager
.

View File

@ -69,8 +69,15 @@ def start_ceph_mgr(logger, config):
)
def start_api_worker(logger, config):
def start_keydb(logger, config):
if config["enable_api"] and config["daemon_mode"] == "coordinator":
logger.out("Starting KeyDB daemon", state="i")
# TODO: Move our handling out of Systemd and integrate it directly as a subprocess?
common.run_os_command("systemctl start keydb-server.service")
def start_api_worker(logger, config):
if config["enable_api"]:
logger.out("Starting API worker daemon", state="i")
# TODO: Move our handling out of Systemd and integrate it directly as a subprocess?
common.run_os_command("systemctl start pvcapid-worker.service")
@ -83,6 +90,7 @@ def start_system_services(logger, config):
start_frrouting(logger, config)
start_ceph_mon(logger, config)
start_ceph_mgr(logger, config)
start_keydb(logger, config)
start_api_worker(logger, config)
logger.out("Waiting 10 seconds for daemons to start", state="s")