If you’re stuck at UEFI shell, disable secure booting. I found this “bug” several times while debugging the issue.
maybe worth adding to your first post @Aephir.
If you’re stuck at UEFI shell, disable secure booting. I found this “bug” several times while debugging the issue.
maybe worth adding to your first post @Aephir.
Thanks, I’ve added a note about this.
Thanks Aephir for such a nice guide and all the other contributors!
I ran into an issue with getting the networking to route the traffic from within HassOS and I’m banging my head against the proverbial wall. My computer has two NICs and I assigned one with a static IP in bridge mode following your latest guide (and disabled netfilter). Even though DHCP would connect to my router and provide an IP it wasn’t visible within the VM. So I manually set the IP with bsr203’s tip and then I could at least ping from within my VM to the bridge IP but that was it (can’t ping the gateway). I double checked that my router wasn’t blocking me and I’m running out of ideas.
Here’s the Ubuntu server 2020.04 host setup (enp2s0
is the physical network card for the bridge):
# brctl show
bridge name bridge id STP enabled interfaces
br-5f3493e0fbc0 8000.0242d3fbc585 no
br0 8000.245ebe5d2e92 yes enp2s0
vnet0
docker0 8000.0242c14aa5ad no vetha6a3fe5
# ip a | sed 's/..:.. /xx:xx /' | sed 's/:....:....\/64 /:xxxx:xxxx\/64 /'
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:xx:xx brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: enp2s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master br0 state UP group default qlen 1000
link/ether 24:5e:be:5d:xx:xx brd ff:ff:ff:ff:ff:ff
3: enp3s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 24:5e:be:5d:xx:xx brd ff:ff:ff:ff:ff:ff
inet 192.168.1.31/24 brd 192.168.1.255 scope global dynamic enp3s0
valid_lft 6661sec preferred_lft 6661sec
inet6 fe80::265e:beff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
4: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 24:5e:be:5d:xx:xx brd ff:ff:ff:ff:ff:ff
inet 192.168.1.32/24 brd 192.168.1.255 scope global br0
valid_lft forever preferred_lft forever
inet6 fe80::265e:beff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
5: br-5f3493e0fbc0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:d3:fb:xx:xx brd ff:ff:ff:ff:ff:ff
inet 172.21.0.1/16 brd 172.21.255.255 scope global br-5f3493e0fbc0
valid_lft forever preferred_lft forever
6: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:c1:4a:xx:xx brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:c1ff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
8: vetha6a3fe5@if7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether 4e:a4:fa:d0:xx:xx brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet6 fe80::4ca4:faff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
14: vnet0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master br0 state UNKNOWN group default qlen 1000
link/ether fe:54:00:ad:xx:xx brd ff:ff:ff:ff:ff:ff
inet6 fe80::fc54:ff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
# ip route
default via 192.168.1.1 dev br0 proto static
default via 192.168.1.1 dev enp3s0 proto dhcp src 192.168.1.31 metric 100
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1
172.21.0.0/16 dev br-5f3493e0fbc0 proto kernel scope link src 172.21.0.1 linkdown
192.168.1.0/24 dev br0 proto kernel scope link src 192.168.1.32
192.168.1.0/24 dev enp3s0 proto kernel scope link src 192.168.1.31
192.168.1.1 dev enp3s0 proto dhcp scope link src 192.168.1.31 metric 100
# virsh net-list
Name State Autostart Persistent
-----------------------------------------
br0 active yes yes
# virt-install --import --name hassio --memory 2048 --vcpus 2 --cpu host --disk haos_ova-7.2.qcow2,format=qcow2,bus=virtio --network bridge=br0,model=virtio --graphics none --noautoconsole --boot uefi
# ip a | sed 's/..:.. /xx:xx /' | sed 's/:....:....\/64 /:xxxx:xxxx\/64 /'
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:xx:xx brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop qlen 1000
link/ether 2e:fc:9d:ac:xx:xx brd ff:ff:ff:ff:ff:ff
3: enp0s2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
link/ether 52:54:00:ad:xx:xx brd ff:ff:ff:ff:ff:ff
inet 192.168.1.35/24 brd 192.168.1.255 scope global noprefixroute enp0s2
valid_lft forever preferred_lft forever
inet6 fe80::f60:7e1b:xxxx:xxxx/64 scope link noprefixroute
valid_lft forever preferred_lft forever
4: sit0@NONE: <NOARP> mtu 1480 qdisc noop qlen 1000
link/sit 0.0.0.0 brd 0.0.0.0
5: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue
link/ether 02:42:4f:d8:xx:xx brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:4fff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
6: hassio: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue
link/ether 02:42:10:3f:xx:xx brd ff:ff:ff:ff:ff:ff
inet 172.30.32.1/23 brd 172.30.33.255 scope global hassio
valid_lft forever preferred_lft forever
inet6 fe80::42:10ff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
8: vethaf21852@if7: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue master hassio
link/ether 6e:88:df:f7:xx:xx brd ff:ff:ff:ff:ff:ff
inet6 fe80::6c88:dfff:fef7:a65/64 scope link
valid_lft forever preferred_lft forever
10: veth06dcc8d@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue master docker0
link/ether ce:5c:58:a7:xx:xx brd ff:ff:ff:ff:ff:ff
inet6 fe80::cc5c:58ff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
12: veth7fe8231@if11: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue master hassio
link/ether f6:f1:40:56:xx:xx brd ff:ff:ff:ff:ff:ff
inet6 fe80::f4f1:40ff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
14: veth190e21c@if13: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue master hassio
link/ether 92:f2:66:0c:xx:xx brd ff:ff:ff:ff:ff:ff
inet6 fe80::90f2:66ff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
16: veth65cf789@if15: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue master hassio
link/ether aa:a3:7e:75:xx:xx brd ff:ff:ff:ff:ff:ff
inet6 fe80::a8a3:7eff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
18: veth86f3232@if17: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue master hassio
link/ether 52:e2:ff:d1:xx:xx brd ff:ff:ff:ff:ff:ff
inet6 fe80::50e2:ffff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
# ip route
(nothing, hmmmm)
# nmcli con edit "Home Assistant OS default"
nmcli> print ipv4
['ipv4' setting values]
ipv4.method: manual
ipv4.dns: 192.168.1.1
ipv4.dns-search: --
ipv4.dns-options: --
ipv4.dns-priority: 0
ipv4.addresses: 192.168.1.35/24
ipv4.gateway: 192.168.1.1
ipv4.routes: --
ipv4.route-metric: -1
ipv4.route-table: 0 (unspec)
ipv4.routing-rules: --
ipv4.ignore-auto-routes: no
ipv4.ignore-auto-dns: no
ipv4.dhcp-client-id: --
ipv4.dhcp-iaid: --
ipv4.dhcp-timeout: 0 (default)
ipv4.dhcp-send-hostname: yes
ipv4.dhcp-hostname: --
ipv4.dhcp-fqdn: --
ipv4.dhcp-hostname-flags: 0x0 (none)
ipv4.never-default: no
ipv4.may-fail: yes
ipv4.dad-timeout: -1 (default)
ipv4.dhcp-vendor-class-identifier: --
ipv4.dhcp-reject-servers: --
nmcli> q
# ping 192.168.1.32
192.168.1.32 is alive!
# ping 192.168.1.1
No response from 192.168.1.1
I can ping 192.168.1.32
from an external host but 192.168.1.35
(the VM) is dead.
I’m at a loss for what to try next…any help would be appreciated.
Cheers
I’m not really sure. It’s been a while since I did anything with this, so I might have forgotten a thing or two. Have you tried just letting you DHCP server take care of IPs? I mean getting the MAC of the VM, and making a reservation for a certain IP.
Also, how do you ping from within the VM? I’ve never been able to connect to a console inside the hassOS VM. I didn’t know that was possible without e.g. setting up SSH add-on. virsh console hassos
(which is the typical way when using virsh/KVM) just leaves me hanging with a
Connected to domain hassos
Escape character is ^]
(except that’s not the keyboard combination on a Danish macOS keyboard, so that took a while to get out of…)
Why not creating the bridge devices on your second NIC and assign the IP via DHCP?
I tried using regular DHCP first and my router would receive the request but the VM would not get the IP back.
For testing from within the VM I do virsh console hassos
and then hit Enter
and there will be a login prompt and I type root
for the username and then there is no password (get to a shell). The Escape
character gets you out of the shell. Depending on your OS it could be different (try Esc
then ]
, Control-]
, Alt-]
, GUI-]
until something works). If your keyboard doesn’t have the ‘]’ letter then you could close your terminal window as a workaround.
@MrksHfmn good idea. Can you point me to some instructions for how to do that? I could dedicate the second NIC purely for the VM (maybe I don’t even need the bridge?).
@hasst passing the NIC directly into the vm should work. don’t know how you do it manually. I achieved it once using cockpit and its kvm addon.
What software do you use to connect to your router? systemd? NetworkManager? this should work pretty straightforward as long as you disable the netfilter
https://wiki.archlinux.org/title/Network_bridge#With_NetworkManager
https://wiki.archlinux.org/title/Systemd-networkd#Bridge_interface
@Aephir instead of disabling bridge filtering there’s more elegant way that may not break some of docker features, assuming vmbr0 is the bridge for KVM, took me some time to find this out to make it proper instead of disabling bridge filtering
sudo systemctl edit docker.service
paste the following snippet
[Service]
ExecStartPost=/usr/sbin/iptables -I DOCKER-USER -i vmbr0 -o vmbr0 -j ACCEPT
sudo systemctl daemon-reload
sudo systemctl restart docker
@MrksHfmn I’m using systemd with netplan and edited /etc/netplan/00-installer-config.yaml
as described in the first post, like so:
# This is the network config written by 'subiquity'
network:
version: 2
ethernets:
enp2s0:
dhcp6: no
dhcp4: no
# addresses: [192.168.1.32/24]
# gateway4: 192.168.1.1
# nameservers:
# addresses: [192.168.1.1, 8.8.8.8, 1.1.1.1]
enp3s0:
dhcp4: true
optional: true
bridges:
br0:
interfaces: [ enp2s0 ]
addresses: [ 192.168.1.32/24 ]
gateway4: 192.168.1.1
nameservers:
addresses: [ 192.168.1.1, 8.8.8.8, 9.9.9.9 ]
parameters:
stp: true
forward-delay: 4
dhcp4: no
dhcp6: no
I suspect netfilter is still active as DHCP requests are making it out to my router but the VM doesn’t get the response. I double-checked those settings with:
# sysctl -a | grep brid
net.bridge.bridge-nf-call-arptables = 0
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-filter-pppoe-tagged = 0
net.bridge.bridge-nf-filter-vlan-tagged = 0
net.bridge.bridge-nf-pass-vlan-input-dev = 0
And it seems like netfilter is turned off so I don’t understand why the DHCP response isn’t making it back. I have it on a regular LAN as the host which gets traffic fine. Any idea?
@hasst I’m not familiar with netplan, but reading this yaml I found two issues:
enp3s0 is your normal NIC?
enp2s0 is the NIC you would like to bridge? The device itself must not get any IP address, but the bridge! so dhcp4/6 should be no/false.
The bridge br0 gets the IP assigned per dhcp. So you have to edit the config here and switch it to true (or yes; don’t know the syntax) and remove everything network related (dns, gateway …)
I don’t see anything particularly wrong with your settings (other than stp is not likely being used, so I would turn it off).
Looking at your earlier posting:
I’m trying to figure it out, but a couple of remarks:
brctl show
- This seems OK. Its showing vnet0 on br0 which should mean your VM is up and running as it usually uses vnet0.ip addr
- You show this before and after virt-install
. It looks like after virt-install, a new link was added called hassio
. I was not expecting this. Instead I was expecting to see vnet0
being added with vnet0 using br0 as master. Today, does your ip addr
still show a link named hassio
?Hi @Aephir so to reassure you I am sysadmin for about 20 years, the knowledge for this i took from this:
and this iptables - Docker breaks libvirt bridge network - Server Fault
So since br_netfilter turns kvm machine into “L3 switch/router” by passing all bridged packets through iptables this is necessary as docker by default puts DROP policy on forward chain
Turning off br_netfilter is like disabling firewall for bridged interfaces including docker ones and the thing i proposed is just turning it off for kvm interface.
AFAIK if you don’t have docker installed you don’t have to do anything of the two as br_netfilter is not loaded
So I’ve been trying to get this set up for a couple of days now - I followed @Aephir’s guide (but used the latest release of home assistant, haos_ova-7.4.qcow2.xz
) , but have run into some issues.
First, I my netplan had to be modified as such so that I could create the bridge
network:
version: 2
renderer: networkd
ethernets:
enp0s31f6:
dhcp4: no
bridges:
br0:
dhcp4: yes
interfaces:
- enp0s31f6
I think the trick is having the dhcp4: no for the ethernets adapter enp0s31f6 in my case. That created the bridge, and everything seemed to work OK so far. I followed @gacopl’s instructions because I was also running some docker containers, and didn’t want to create issues. Things still seemed to be working up to this point.
Then I started the virtual machine with the command in the guide - and received this warning:
WARNING No operating system detected, VM performance may suffer. Specify an OS with --os-variant for optimal results
It did seem to run though, and I could virsh console hassos and get to the console. But nothing ever showed up in my connected devices list on my router. I then found and followed @bsr203’s instructions, and afterwards, the device showed up in the connected devices list, but I am unable to connect to the web interface. I’m not very savvy when it comes to networking, so any help is appreciated, and I do appreciate all of the help from OP, as well as the commenters!
Thanks!
I’ll defer to experience I’ve updated the guide to include your solution, thanks for the pointer.
@stygarfield I haven’t personally tried @gacopl’s approach, so can’t really offer much help with that. However, the “original” way I did it (disabling netplan) never gave me any issues, and I’m running more than a dozen docker containers. Not saying it’s the best way (since it seems its not). It’s just the only way I have personally tried, and made it work.
Regarding the --os-variant
; as far as I can see, hassOS currently builds on Debian 11 (Bullseye), but for me osinfo-query os
doesn’t list any version higher than 9 as an option. I wasn’t sure about using a “wrong-but-close” version, so I omitted it.
Does anyone know what would be the best approach here?
Maybe Debian 10? As long as it is selectable. Debian 10 is available under arch linux
From my understanding, HassOS uses buildroot as its generic Linux kernel and tailors the environment to provide more of an embedded linux based operating system, that is more like Alpine. --os-variant
of generic
should work.
No joy with --os-variant=generic
I didn’t get the warning, but still am unable to connect via web browser.
This may be late for you, but I did a little research on networkd
as I am not familar with it. The way my Ubuntu is setup (I don’t think I changed this part), netplan
is setup to use Network Manager as its networking backend (instead of networkd
), as networkd
itself is inactive on my system. So I think you may want to change the renderer from networkd
to NetworkManager
.
You should confirm that your bridge interface has an address that works for you and that the ethernet interface does not have an address. Type ip addr
. See if br0 has an ip address that would work on your network, something like 192.168.
blah-blah and that enp0s31f6 has no address . If this looks fine then just make sure you can ping br0 ip address.
When you’re in the hassio console, if you get a ha>
prompt, then type login
to get to a linux root prompt #
. Type ip addr
. You may get a long list of interfaces, but see if one of them has an ip address that would work on your network, something like 192.168.
blah-blah. If you do have such an address, then at least you are on the right path.
Hey, I had trouble when starting my vms, got the errors Transport endpoint is not connected
and stderr=access denied by acl file
.
I found to create a file /etc/qemu/bridge.conf
with the line allow br0
to be the solution.
Just sharing in case.
First off, @Aephir: Thank you for the guide!
Like so many who have come before me, I’m running into a networking issue. I’ve done the whole setup, the bridge is up and I can use it to connect to the Linux host (Ubuntu 20l.04), I can virsh console hassos
into the VM and see it’s running. I set HASS to have a static IP (192.168.4.224), but what’s interesting is that I can ping that IP from the host, but I can’t ping it from any other hosts on my network. I initially tried DHCP for the guest but it didn’t work, so I’m assuming it can’t talk out to the general network at all. Unfortunately I can’t verify this because I can’t find any way to exit out of the HA CLI… every time it exit I just get sent back to the CLI login.
A few other relevant details:
enp3s0
. There’s a wireless NIC in there but I’m not using it and haven’t set it up.192.168.4.119
.At any rate, below are the outputs of all the relevant commands I found in this thread. Any help would be appreciated.
# brctl show
bridge name bridge id STP enabled interfaces
br-e925ad04412b 8000.0242e1764618 no
br0 8000.ee27c0a565fa yes enp3s0
vnet0
docker0 8000.0242eaf3ed2b no
# ip a | sed 's/..:.. /xx:xx /' | sed 's/:....:....\/64 /:xxxx:xxxx\/64 /'
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:xx:xx brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: enp3s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master br0 state UP group default qlen 1000
link/ether 2c:f0:5d:e0:xx:xx brd ff:ff:ff:ff:ff:ff
3: wlp2s0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
link/ether fc:44:82:76:xx:xx brd ff:ff:ff:ff:ff:ff
4: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether ee:27:c0:a5:xx:xx brd ff:ff:ff:ff:ff:ff
inet 192.168.4.119/22 metric 100 brd 192.168.7.255 scope global dynamic br0
valid_lft 14260sec preferred_lft 14260sec
inet6 fde0:5b67:ae88:1:ec27:c0ff:xxxx:xxxx/64 scope global dynamic mngtmpaddr noprefixroute
valid_lft 2591860sec preferred_lft 604660sec
inet6 fe80::ec27:c0ff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
5: br-e925ad04412b: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:e1:76:xx:xx brd ff:ff:ff:ff:ff:ff
inet 172.18.0.1/16 brd 172.18.255.255 scope global br-e925ad04412b
valid_lft forever preferred_lft forever
6: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:ea:f3:xx:xx brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
8: vnet0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br0 state UNKNOWN group default qlen 1000
link/ether fe:54:00:3a:xx:xx brd ff:ff:ff:ff:ff:ff
inet6 fe80::fc54:ff:xxxx:xxxx/64 scope link
valid_lft forever preferred_lft forever
# ip route
default via 192.168.4.1 dev br0 proto dhcp src 192.168.4.119 metric 100
8.8.4.4 via 192.168.4.1 dev br0 proto dhcp src 192.168.4.119 metric 100
8.8.8.8 via 192.168.4.1 dev br0 proto dhcp src 192.168.4.119 metric 100
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown
172.18.0.0/16 dev br-e925ad04412b proto kernel scope link src 172.18.0.1 linkdown
192.168.4.0/22 dev br0 proto kernel scope link src 192.168.4.119 metric 100
192.168.4.1 dev br0 proto dhcp scope link src 192.168.4.119 metric 100
# virsh net-list
Name State Autostart Persistent
-----------------------------------------
br0 active yes yes
# virt-install --import --name hassos --memory 4096 --vcpus 2 --cpu host --disk haos_ova-8.1.qcow2,format=qcow2,bus=virtio --network bridge=br0,model=virtio --graphics none --noautoconsole --boot uefi --os-variant debian11