Compare commits
8 Commits
2545a7b744
...
74a416165d
Author | SHA1 | Date | |
---|---|---|---|
74a416165d | |||
83ceb41138 | |||
2e5958640a | |||
d65b18f15b | |||
7abc697c8a | |||
1adc3674b6 | |||
bd811408f9 | |||
6090b286fe |
@ -46,14 +46,20 @@ $SUDO dpkg -i ../pvc-client*_${version}*.deb &>/dev/null
|
||||
echo " done".
|
||||
|
||||
for HOST in ${HOSTS[@]}; do
|
||||
echo "> Deploying packages to host ${HOST}"
|
||||
echo -n "Copying packages..."
|
||||
echo -n "Copying packages to host ${HOST}..."
|
||||
ssh $HOST $SUDO rm -rf /tmp/pvc &>/dev/null
|
||||
ssh $HOST mkdir /tmp/pvc &>/dev/null
|
||||
scp ../pvc-*_${version}*.deb $HOST:/tmp/pvc/ &>/dev/null
|
||||
echo " done."
|
||||
done
|
||||
if [[ -z ${KEEP_ARTIFACTS} ]]; then
|
||||
rm ../pvc*_${version}*
|
||||
fi
|
||||
|
||||
for HOST in ${HOSTS[@]}; do
|
||||
echo "> Deploying packages to host ${HOST}"
|
||||
echo -n "Installing packages..."
|
||||
ssh $HOST $SUDO dpkg -i /tmp/pvc/{pvc-client-cli,pvc-daemon-common,pvc-daemon-api,pvc-daemon-node}*.deb &>/dev/null
|
||||
ssh $HOST $SUDO dpkg -i /tmp/pvc/*.deb &>/dev/null
|
||||
ssh $HOST rm -rf /tmp/pvc &>/dev/null
|
||||
echo " done."
|
||||
echo -n "Restarting PVC daemons..."
|
||||
@ -68,8 +74,5 @@ for HOST in ${HOSTS[@]}; do
|
||||
done
|
||||
echo " done."
|
||||
done
|
||||
if [[ -z ${KEEP_ARTIFACTS} ]]; then
|
||||
rm ../pvc*_${version}*
|
||||
fi
|
||||
|
||||
popd &>/dev/null
|
||||
|
@ -44,7 +44,7 @@ DEFAULT_STORE_DATA = {"cfgfile": "/etc/pvc/pvcapid.yaml"}
|
||||
DEFAULT_STORE_FILENAME = "pvc.json"
|
||||
DEFAULT_API_PREFIX = "/api/v1"
|
||||
DEFAULT_NODE_HOSTNAME = gethostname().split(".")[0]
|
||||
DEFAULT_AUTOBACKUP_FILENAME = "/etc/pvc/autobackup.yaml"
|
||||
DEFAULT_AUTOBACKUP_FILENAME = "/etc/pvc/pvc.conf"
|
||||
MAX_CONTENT_WIDTH = 120
|
||||
|
||||
|
||||
|
@ -21,6 +21,8 @@
|
||||
|
||||
import time
|
||||
|
||||
from collections import deque
|
||||
|
||||
import pvc.lib.ansiprint as ansiprint
|
||||
from pvc.lib.common import call_api
|
||||
|
||||
@ -107,7 +109,9 @@ def follow_node_log(config, node, lines=10):
|
||||
API schema: {"name":"{nodename}","data":"{node_log}"}
|
||||
"""
|
||||
# We always grab 200 to match the follow call, but only _show_ `lines` number
|
||||
params = {"lines": 200}
|
||||
max_lines = 200
|
||||
|
||||
params = {"lines": max_lines}
|
||||
response = call_api(
|
||||
config, "get", "/node/{node}/log".format(node=node), params=params
|
||||
)
|
||||
@ -117,43 +121,50 @@ def follow_node_log(config, node, lines=10):
|
||||
|
||||
# Shrink the log buffer to length lines
|
||||
node_log = response.json()["data"]
|
||||
shrunk_log = node_log.split("\n")[-int(lines) :]
|
||||
loglines = "\n".join(shrunk_log)
|
||||
full_log = node_log.split("\n")
|
||||
shrunk_log = full_log[-int(lines) :]
|
||||
|
||||
# Print the initial data and begin following
|
||||
print(loglines, end="")
|
||||
print("\n", end="")
|
||||
for line in shrunk_log:
|
||||
print(line)
|
||||
|
||||
# Create the deque we'll use to buffer loglines
|
||||
loglines = deque(full_log, max_lines)
|
||||
|
||||
while True:
|
||||
# Wait half a second
|
||||
time.sleep(0.5)
|
||||
|
||||
# Grab the next line set (200 is a reasonable number of lines per half-second; any more are skipped)
|
||||
try:
|
||||
params = {"lines": 200}
|
||||
params = {"lines": max_lines}
|
||||
response = call_api(
|
||||
config, "get", "/node/{node}/log".format(node=node), params=params
|
||||
)
|
||||
new_node_log = response.json()["data"]
|
||||
except Exception:
|
||||
break
|
||||
|
||||
# Split the new and old log strings into constitutent lines
|
||||
old_node_loglines = node_log.split("\n")
|
||||
new_node_loglines = new_node_log.split("\n")
|
||||
|
||||
# Set the node log to the new log value for the next iteration
|
||||
node_log = new_node_log
|
||||
# Find where in the new lines the last entryin the ongoing deque is
|
||||
start_idx = 0
|
||||
for idx, line in enumerate(new_node_loglines):
|
||||
if line == loglines[-1]:
|
||||
start_idx = idx
|
||||
|
||||
# Get the difference between the two sets of lines
|
||||
old_node_loglines_set = set(old_node_loglines)
|
||||
diff_node_loglines = [
|
||||
x for x in new_node_loglines if x not in old_node_loglines_set
|
||||
]
|
||||
# Get the new lines starting from the found index plus one
|
||||
diff_node_loglines = new_node_loglines[start_idx + 1 :]
|
||||
|
||||
# If there's a difference, print it out
|
||||
# If there's a difference, add the lines to the ongling deque and print then out
|
||||
if len(diff_node_loglines) > 0:
|
||||
print("\n".join(diff_node_loglines), end="")
|
||||
print("\n", end="")
|
||||
for line in diff_node_loglines:
|
||||
loglines.append(line)
|
||||
print(line)
|
||||
|
||||
# Wait half a second
|
||||
time.sleep(0.5)
|
||||
del new_node_loglines
|
||||
del diff_node_loglines
|
||||
|
||||
return True, ""
|
||||
|
||||
|
@ -76,7 +76,9 @@ class Logger(object):
|
||||
self.config = config
|
||||
|
||||
if self.config["file_logging"]:
|
||||
self.logfile = self.config["log_directory"] + "/pvc.log"
|
||||
self.logfile = (
|
||||
self.config["log_directory"] + "/" + self.config["daemon_name"] + ".log"
|
||||
)
|
||||
# We open the logfile for the duration of our session, but have a hup function
|
||||
self.writer = open(self.logfile, "a", buffering=0)
|
||||
|
||||
@ -98,7 +100,7 @@ class Logger(object):
|
||||
if self.config["file_logging"]:
|
||||
self.writer.close()
|
||||
if self.config["zookeeper_logging"]:
|
||||
self.out("Waiting 15s for Zookeeper message queue to drain", state="s")
|
||||
self.out("Waiting for Zookeeper message queue to drain", state="s")
|
||||
|
||||
tick_count = 0
|
||||
while not self.zookeeper_queue.empty():
|
||||
@ -139,20 +141,25 @@ class Logger(object):
|
||||
if prefix != "":
|
||||
prefix = prefix + " - "
|
||||
|
||||
# Assemble message string
|
||||
message = colour + prompt + endc + date + prefix + message
|
||||
|
||||
# Log to stdout
|
||||
if self.config["stdout_logging"]:
|
||||
print(message)
|
||||
# Assemble output string
|
||||
output = colour + prompt + endc + date + prefix + message
|
||||
print(output)
|
||||
|
||||
# Log to file
|
||||
if self.config["file_logging"]:
|
||||
self.writer.write(message + "\n")
|
||||
# Assemble output string
|
||||
output = colour + prompt + endc + date + prefix + message
|
||||
self.writer.write(output + "\n")
|
||||
|
||||
# Log to Zookeeper
|
||||
if self.config["zookeeper_logging"]:
|
||||
self.zookeeper_queue.put(message)
|
||||
# Set the daemon value (only used here as others do not overlap with different daemons)
|
||||
daemon = f"{self.config['daemon_name']}: "
|
||||
# Assemble output string
|
||||
output = daemon + colour + prompt + endc + date + prefix + message
|
||||
self.zookeeper_queue.put(output)
|
||||
|
||||
# Set last message variables
|
||||
self.last_colour = colour
|
||||
@ -198,16 +205,9 @@ class ZookeeperLogger(Thread):
|
||||
self.zkhandler.write([("base.logs", ""), (("logs", self.node), "")])
|
||||
|
||||
def run(self):
|
||||
while not self.connected:
|
||||
self.start_zkhandler()
|
||||
sleep(1)
|
||||
|
||||
self.running = True
|
||||
# Get the logs that are currently in Zookeeper and populate our deque
|
||||
raw_logs = self.zkhandler.read(("logs.messages", self.node))
|
||||
if raw_logs is None:
|
||||
raw_logs = ""
|
||||
logs = deque(raw_logs.split("\n"), self.max_lines)
|
||||
|
||||
while self.running:
|
||||
# Get a new message
|
||||
try:
|
||||
@ -222,25 +222,26 @@ class ZookeeperLogger(Thread):
|
||||
date = "{} ".format(datetime.now().strftime("%Y/%m/%d %H:%M:%S.%f"))
|
||||
else:
|
||||
date = ""
|
||||
|
||||
try:
|
||||
with self.zkhandler.writelock(("logs.messages", self.node)):
|
||||
# Get the logs that are currently in Zookeeper and populate our deque
|
||||
cur_logs = self.zkhandler.read(("logs.messages", self.node))
|
||||
if cur_logs is None:
|
||||
cur_logs = ""
|
||||
|
||||
logs = deque(cur_logs.split("\n"), self.max_lines - 1)
|
||||
|
||||
# Add the message to the deque
|
||||
logs.append(f"{date}{message}")
|
||||
|
||||
tick_count = 0
|
||||
while True:
|
||||
try:
|
||||
# Write the updated messages into Zookeeper
|
||||
self.zkhandler.write(
|
||||
[(("logs.messages", self.node), "\n".join(logs))]
|
||||
)
|
||||
break
|
||||
except Exception:
|
||||
# The write failed (connection loss, etc.) so retry for 15 seconds
|
||||
sleep(0.5)
|
||||
tick_count += 1
|
||||
if tick_count > 30:
|
||||
break
|
||||
else:
|
||||
continue
|
||||
|
||||
return
|
||||
|
||||
def stop(self):
|
||||
|
8
debian/control
vendored
8
debian/control
vendored
@ -9,7 +9,7 @@ X-Python3-Version: >= 3.2
|
||||
Package: pvc-daemon-node
|
||||
Architecture: all
|
||||
Depends: systemd, pvc-daemon-common, python3-kazoo, python3-psutil, python3-apscheduler, python3-libvirt, python3-psycopg2, python3-dnspython, python3-yaml, python3-distutils, python3-rados, python3-gevent, ipmitool, libvirt-daemon-system, arping, vlan, bridge-utils, dnsmasq, nftables, pdns-server, pdns-backend-pgsql
|
||||
Description: Parallel Virtual Cluster node daemon (Python 3)
|
||||
Description: Parallel Virtual Cluster node daemon
|
||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||
.
|
||||
This package installs the PVC node daemon
|
||||
@ -17,7 +17,7 @@ Description: Parallel Virtual Cluster node daemon (Python 3)
|
||||
Package: pvc-daemon-api
|
||||
Architecture: all
|
||||
Depends: systemd, pvc-daemon-common, python3-yaml, python3-flask, python3-flask-restful, python3-celery, python-celery-common, python3-distutils, python3-redis, python3-lxml, python3-flask-migrate, fio
|
||||
Description: Parallel Virtual Cluster API daemon (Python 3)
|
||||
Description: Parallel Virtual Cluster API daemon
|
||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||
.
|
||||
This package installs the PVC API daemon
|
||||
@ -25,7 +25,7 @@ Description: Parallel Virtual Cluster API daemon (Python 3)
|
||||
Package: pvc-daemon-common
|
||||
Architecture: all
|
||||
Depends: python3-kazoo, python3-psutil, python3-click, python3-lxml
|
||||
Description: Parallel Virtual Cluster common libraries (Python 3)
|
||||
Description: Parallel Virtual Cluster common libraries
|
||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||
.
|
||||
This package installs the common libraries for the daemon and API
|
||||
@ -33,7 +33,7 @@ Description: Parallel Virtual Cluster common libraries (Python 3)
|
||||
Package: pvc-client-cli
|
||||
Architecture: all
|
||||
Depends: python3-requests, python3-requests-toolbelt, python3-yaml, python3-lxml, python3-click
|
||||
Description: Parallel Virtual Cluster CLI client (Python 3)
|
||||
Description: Parallel Virtual Cluster CLI client
|
||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||
.
|
||||
This package installs the PVC API command-line client
|
||||
|
@ -63,7 +63,8 @@ def entrypoint():
|
||||
|
||||
# Get our configuration
|
||||
config = pvcnoded.util.config.get_configuration()
|
||||
config["pvcnoded_version"] = version
|
||||
config["daemon_name"] = "pvcnoded"
|
||||
config["daemon_version"] = version
|
||||
|
||||
# Create and validate our directories
|
||||
pvcnoded.util.config.validate_directories(config)
|
||||
|
@ -123,7 +123,7 @@ def setup_node(logger, config, zkhandler):
|
||||
),
|
||||
(
|
||||
("node.data.pvc_version", config["node_hostname"]),
|
||||
config["pvcnoded_version"],
|
||||
config["daemon_version"],
|
||||
),
|
||||
(
|
||||
("node.ipmi.hostname", config["node_hostname"]),
|
||||
@ -159,7 +159,7 @@ def setup_node(logger, config, zkhandler):
|
||||
),
|
||||
(
|
||||
("node.data.pvc_version", config["node_hostname"]),
|
||||
config["pvcnoded_version"],
|
||||
config["daemon_version"],
|
||||
),
|
||||
(
|
||||
("node.ipmi.hostname", config["node_hostname"]),
|
||||
|
Reference in New Issue
Block a user