pvc-ansible/roles/pvc/templates/libvirt/libvirtd.service.j2

45 lines
1.4 KiB
Plaintext
Raw Normal View History

# NB we don't use socket activation. When libvirtd starts it will
# spawn any virtual machines registered for autostart. We want this
# to occur on every boot, regardless of whether any client connects
# to a socket. Thus socket activation doesn't have any benefit
[Unit]
Description=Virtualization daemon
Requires=virtlogd.socket
Requires=virtlockd.socket
Wants=systemd-machined.service
Before=libvirt-guests.service
After=network.target
After=dbus.service
After=iscsid.service
After=apparmor.service
After=local-fs.target
After=remote-fs.target
After=systemd-logind.service
After=systemd-machined.service
Documentation=man:libvirtd(8)
Documentation=https://libvirt.org
[Service]
Type=notify
EnvironmentFile=-/etc/default/libvirtd
ExecStart=/usr/sbin/libvirtd $libvirtd_opts
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=on-failure
# At least 1 FD per guest, often 2 (eg qemu monitor + qemu agent).
# eg if we want to support 4096 guests, we'll typically need 8192 FDs
# If changing this, also consider virtlogd.service & virtlockd.service
# limits which are also related to number of guests
LimitNOFILE=8192
# The cgroups pids controller can limit the number of tasks started by
# the daemon, which can limit the number of domains for some hypervisors.
# A conservative default of 8 tasks per guest results in a TasksMax of
# 32k to support 4096 guests.
TasksMax=32768
[Install]
WantedBy=multi-user.target
Also=virtlockd.socket
Also=virtlogd.socket