diff --git a/README.md b/README.md index 5a34fe9ed..3e965b89d 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ Building Also the following Python dependencies: -``pip install sphinx sphinx_rtd_theme sphinx-prompt sphinx_substitution_extensions pyyaml``. +``pip install sphinx sphinx_rtd_theme sphinx-prompt sphinx_substitution_extensions sphinxcontrib-spelling pyyaml``. Build the documentation by running ``make html``. [More information](http://sphinx-doc.org/). diff --git a/publish/config.yaml b/publish/config.yaml index 354198fd9..ee98f5af0 100644 --- a/publish/config.yaml +++ b/publish/config.yaml @@ -1,5 +1,6 @@ mapping: - master: '6.10' + master: '6.99' + one-6.10: '6.10' one-6.8: '6.8' one-6.6: '6.6' one-6.4: '6.4' diff --git a/source/_templates/footer.html b/source/_templates/footer.html index f33a10cc1..f8db2103d 100644 --- a/source/_templates/footer.html +++ b/source/_templates/footer.html @@ -3,5 +3,5 @@ {% set show_copyright = False %} {% block extrafooter %} - Copyright 2002-2023 © OpenNebula Project (OpenNebula.io). All Rights Reserved. Please send comments to the webmaster.
Read the Legal Notice. This site is hosted by OpenNebula Systems. + Copyright 2002-2024 © OpenNebula Project (OpenNebula.io). All Rights Reserved. Please send comments to the webmaster.
Read the Legal Notice. This site is hosted by OpenNebula Systems. {% endblock %} diff --git a/source/conf.py b/source/conf.py index 7651e9608..d836313a4 100644 --- a/source/conf.py +++ b/source/conf.py @@ -88,7 +88,7 @@ # The short X.Y version. version = '6.10' # The full version, including alpha/beta/rc tags. -release = '6.10.0' +release = '6.10.2' # The context packages released version context_release = '6.10.0' diff --git a/source/ext/spellchecking/wordlists/opennebula.txt b/source/ext/spellchecking/wordlists/opennebula.txt index 148ca0ca1..93975b22d 100644 --- a/source/ext/spellchecking/wordlists/opennebula.txt +++ b/source/ext/spellchecking/wordlists/opennebula.txt @@ -11,6 +11,7 @@ Auth Authenticator Autodiscovered Autostart +backported Backported Balancer Bool @@ -27,6 +28,7 @@ Conf Config Cooldown Corosync +Cortana Crypted Ctrl Customizable @@ -91,10 +93,11 @@ Mountpoints Multicast Multicluster Multus +NSX Netplan Nokogiri -NSX Numa +NVMe Onecfg Oneflow Opennebula @@ -113,6 +116,7 @@ PublicIP Pyone Qcow Qemu +QinQ QoS Qos RSync @@ -124,6 +128,7 @@ SAML SLA SLAAC SRIOV +SSD Scalability Sched Schemas @@ -146,6 +151,7 @@ Unassigns Uncomment Unmanaged Unregister +Untagged Uplink VID VMs @@ -178,6 +184,7 @@ addhost addon addons addrule +addserver addvnet af affined @@ -257,6 +264,8 @@ conf config cooldown cpu +cpuset +cputune cryptographic css customizable @@ -282,6 +291,7 @@ decrypted deduplicate deduplication defaultquota +del deladmin delcluster deldatastore @@ -289,6 +299,7 @@ delgroup delhost delrule delvnet +delserver desc detachdisk detachnic @@ -304,6 +315,7 @@ disksnapshotcreate disksnapshotdelete disksnapshotrename disksnapshotrevert +distros dns dockerfile dockerfiles @@ -317,10 +329,13 @@ dsbl dvportgroup eagerZeroedThick ec +edk ee eebc eht +emulatorpin entrypoint +epil epilog eth ethernet @@ -412,6 +427,7 @@ kb keepalived keymap keyring +keyrings keytab kubeconfig kubernetes @@ -443,6 +459,7 @@ mem memcached microVM microVMs +migr migratelocal migrator migrators @@ -456,6 +473,7 @@ monitorization moref morefs mountpoints +msi multicast multicluster multitenant @@ -533,6 +551,7 @@ overcommitted overcommitting overprovision overriden +ovmf ovswitch parallelize param @@ -544,6 +563,7 @@ passthrough passwd passwordless pci +performant persisent persistency pluggable @@ -590,6 +610,7 @@ repos req rescan resched +resetserver restic resubmission rke @@ -627,6 +648,7 @@ securetty serveradmin serverless serveruser +server-del sftp sg sgID @@ -663,6 +685,7 @@ svncterm swapfile swapin swapout +swtpm symlink symlinked symlinks @@ -678,6 +701,7 @@ tmp toolchain toolset topologies +tpm tty tunables tx @@ -687,6 +711,7 @@ udev uid umask un +uncheck uncomment uncommented uncommenting @@ -704,6 +729,7 @@ unregister unresched unshare unsynced +untagged untar updatear updateconf diff --git a/source/images/aws_cluster_images_datastore.png b/source/images/aws_cluster_images_datastore.png index b470ea013..a58589db0 100644 Binary files a/source/images/aws_cluster_images_datastore.png and b/source/images/aws_cluster_images_datastore.png differ diff --git a/source/images/fireedge_sunstone_ssh_console.png b/source/images/fireedge_sunstone_ssh_console.png new file mode 100644 index 000000000..d2385776c Binary files /dev/null and b/source/images/fireedge_sunstone_ssh_console.png differ diff --git a/source/images/fireedge_sunstone_ssh_list.png b/source/images/fireedge_sunstone_ssh_list.png new file mode 100644 index 000000000..2cb90a998 Binary files /dev/null and b/source/images/fireedge_sunstone_ssh_list.png differ diff --git a/source/images/minione-aws-ubuntu24.04.png b/source/images/minione-aws-ubuntu24.04.png new file mode 100644 index 000000000..a521e3edc Binary files /dev/null and b/source/images/minione-aws-ubuntu24.04.png differ diff --git a/source/images/sunstone-aws_cluster_download_oneke.png b/source/images/sunstone-aws_cluster_download_oneke.png new file mode 100644 index 000000000..6d0a4ed17 Binary files /dev/null and b/source/images/sunstone-aws_cluster_download_oneke.png differ diff --git a/source/images/sunstone-aws_cluster_replica_host.png b/source/images/sunstone-aws_cluster_replica_host.png index 73b6b7899..756b3506f 100644 Binary files a/source/images/sunstone-aws_cluster_replica_host.png and b/source/images/sunstone-aws_cluster_replica_host.png differ diff --git a/source/images/sunstone-aws_edge_cluster_deploying.png b/source/images/sunstone-aws_edge_cluster_deploying.png new file mode 100644 index 000000000..e3b65bc6e Binary files /dev/null and b/source/images/sunstone-aws_edge_cluster_deploying.png differ diff --git a/source/images/sunstone-aws_edge_cluster_sys_ds.png b/source/images/sunstone-aws_edge_cluster_sys_ds.png new file mode 100644 index 000000000..ac3a80405 Binary files /dev/null and b/source/images/sunstone-aws_edge_cluster_sys_ds.png differ diff --git a/source/images/sunstone-aws_k8s_vms_list.png b/source/images/sunstone-aws_k8s_vms_list.png new file mode 100644 index 000000000..10bd5fb49 Binary files /dev/null and b/source/images/sunstone-aws_k8s_vms_list.png differ diff --git a/source/images/sunstone-aws_kubernetes_vnf_ip.png b/source/images/sunstone-aws_kubernetes_vnf_ip.png new file mode 100644 index 000000000..1af726b3e Binary files /dev/null and b/source/images/sunstone-aws_kubernetes_vnf_ip.png differ diff --git a/source/images/sunstone-k8s_enable_netw_params.png b/source/images/sunstone-k8s_enable_netw_params.png new file mode 100644 index 000000000..5bc600b1c Binary files /dev/null and b/source/images/sunstone-k8s_enable_netw_params.png differ diff --git a/source/images/sunstone_kubernetes_netw_dropdowns.png b/source/images/sunstone_kubernetes_netw_dropdowns.png new file mode 100644 index 000000000..9daa2d539 Binary files /dev/null and b/source/images/sunstone_kubernetes_netw_dropdowns.png differ diff --git a/source/images/sunstone_list_datatable.png b/source/images/sunstone_list_datatable.png new file mode 100644 index 000000000..367536902 Binary files /dev/null and b/source/images/sunstone_list_datatable.png differ diff --git a/source/images/sunstone_setting_list_datatable.png b/source/images/sunstone_setting_list_datatable.png new file mode 100644 index 000000000..b8d4f06d9 Binary files /dev/null and b/source/images/sunstone_setting_list_datatable.png differ diff --git a/source/images/tproxy-diagram.drawio.png b/source/images/tproxy-diagram.drawio.png new file mode 100644 index 000000000..e24d3a631 Binary files /dev/null and b/source/images/tproxy-diagram.drawio.png differ diff --git a/source/images/windows_bp_bypassnro.png b/source/images/windows_bp_bypassnro.png new file mode 100644 index 000000000..39ace1789 Binary files /dev/null and b/source/images/windows_bp_bypassnro.png differ diff --git a/source/images/windows_bp_create_image.png b/source/images/windows_bp_create_image.png new file mode 100644 index 000000000..f5c320493 Binary files /dev/null and b/source/images/windows_bp_create_image.png differ diff --git a/source/images/windows_bp_virtio_drivers.png b/source/images/windows_bp_virtio_drivers.png new file mode 100644 index 000000000..48648571e Binary files /dev/null and b/source/images/windows_bp_virtio_drivers.png differ diff --git a/source/images_drawio/tproxy-diagram.drawio b/source/images_drawio/tproxy-diagram.drawio new file mode 100644 index 000000000..3b76f3597 --- /dev/null +++ b/source/images_drawio/tproxy-diagram.drawio @@ -0,0 +1,164 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/source/installation_and_configuration/data_center_federation/config.rst b/source/installation_and_configuration/data_center_federation/config.rst index e568a0a8a..fb4dddcee 100644 --- a/source/installation_and_configuration/data_center_federation/config.rst +++ b/source/installation_and_configuration/data_center_federation/config.rst @@ -16,8 +16,6 @@ In this document, each configuration step starts with **Master** or **Slave** to .. important:: The federation can be set up with MySQL/MariaDB or SQLite as backends, but you can't mix them across Zones. MySQL/MariaDB is recommended for production deployments. -.. important:: FireEdge, the next-generation server for the Sunstone GUI, currently does not support switching zones in a federation environment. Please connect directly to the zone that you wish to work on. Likewise, take into account that the FireEdge functionality enabled in Sunstone will not be available if you switch to a remote zone from within Sunstone. - Step 1. Configure the OpenNebula Federation Master Zone ================================================================================ diff --git a/source/installation_and_configuration/frontend_installation/opennebula_repository_configuration.rst b/source/installation_and_configuration/frontend_installation/opennebula_repository_configuration.rst index f3aff47aa..cb59ce209 100644 --- a/source/installation_and_configuration/frontend_installation/opennebula_repository_configuration.rst +++ b/source/installation_and_configuration/frontend_installation/opennebula_repository_configuration.rst @@ -23,6 +23,14 @@ OpenNebula Systems provides an OpenNebula Enterprise Edition to customers with a AlmaLinux/RHEL -------------------------------------------------------------------------------- +In **rhel9** and **AlmaLinux9** Some dependencies cannot be found in the default repositories. Some extra repositories need to be enabled. To do this, execute the following as the root user: + +.. code-block:: bash + + repo=$(yum repolist --disabled | grep -i -e powertools -e crb | awk '{print $1}' | head -1) + yum config-manager --set-enabled $repo && yum makecache + + To add the OpenNebula enterprise repository, execute the following as user ``root``: **RHEL 8, 9** @@ -30,32 +38,32 @@ To add the OpenNebula enterprise repository, execute the following as user ``roo .. prompt:: bash # auto :substitutions: - # cat << "EOT" > /etc/yum.repos.d/opennebula.repo - [opennebula] - name=OpenNebula Enterprise Edition - baseurl=https://@enterprise.opennebula.io/repo/|version|/RedHat/$releasever/$basearch - enabled=1 - gpgkey=https://downloads.opennebula.io/repo/repo2.key - gpgcheck=1 - repo_gpgcheck=1 - EOT - # yum makecache + # cat << "EOT" > /etc/yum.repos.d/opennebula.repo + [opennebula] + name=OpenNebula Enterprise Edition + baseurl=https://@enterprise.opennebula.io/repo/|version|/RedHat/$releasever/$basearch + enabled=1 + gpgkey=https://downloads.opennebula.io/repo/repo2.key + gpgcheck=1 + repo_gpgcheck=1 + EOT + # yum makecache **AlmaLinux 8, 9** .. prompt:: bash # auto :substitutions: - # cat << "EOT" > /etc/yum.repos.d/opennebula.repo - [opennebula] - name=OpenNebula Enterprise Edition - baseurl=https://@enterprise.opennebula.io/repo/|version|/AlmaLinux/$releasever/$basearch - enabled=1 - gpgkey=https://downloads.opennebula.io/repo/repo2.key - gpgcheck=1 - repo_gpgcheck=1 - EOT - # yum makecache + # cat << "EOT" > /etc/yum.repos.d/opennebula.repo + [opennebula] + name=OpenNebula Enterprise Edition + baseurl=https://@enterprise.opennebula.io/repo/|version|/AlmaLinux/$releasever/$basearch + enabled=1 + gpgkey=https://downloads.opennebula.io/repo/repo2.key + gpgcheck=1 + repo_gpgcheck=1 + EOT + # yum makecache Debian/Ubuntu @@ -72,6 +80,15 @@ Debian/Ubuntu First, add the repository signing GPG key on the Front-end by executing as user ``root``: +.. note:: + + It might be needed to create /etc/apt/keyrings directory in Debian 11 because it does not exist by default: + + .. prompt:: bash # auto + + # mkdir -p /etc/apt/keyrings + + .. prompt:: bash # auto # wget -q -O- https://downloads.opennebula.io/repo/repo2.key | gpg --dearmor --yes --output /etc/apt/keyrings/opennebula.gpg @@ -138,12 +155,20 @@ The community edition of OpenNebula offers the full functionality of the Cloud M AlmaLinux/RHEL -------------------------------------------------------------------------------- +In **rhel9** and **AlmaLinux9** Some dependencies cannot be found in the default repositories. Some extra repositories need to be enabled. To do this, execute the following as the root user: + +.. code-block:: bash + + repo=$(yum repolist --disabled | grep -i -e powertools -e crb | awk '{print $1}' | head -1) + yum config-manager --set-enabled $repo && yum makecache + + To add OpenNebula repository, execute the following as user ``root``: **RHEL 8, 9** .. prompt:: bash # auto - :substitutions: + :substitutions: # cat << "EOT" > /etc/yum.repos.d/opennebula.repo [opennebula] @@ -159,7 +184,7 @@ To add OpenNebula repository, execute the following as user ``root``: **AlmaLinux 8, 9** .. prompt:: bash # auto - :substitutions: + :substitutions: # cat << "EOT" > /etc/yum.repos.d/opennebula.repo [opennebula] diff --git a/source/installation_and_configuration/ha/vm_ha.rst b/source/installation_and_configuration/ha/vm_ha.rst index e5d4b0b9d..417d177a0 100644 --- a/source/installation_and_configuration/ha/vm_ha.rst +++ b/source/installation_and_configuration/ha/vm_ha.rst @@ -56,6 +56,30 @@ More information on hooks :ref:`here `. .. warning:: Note that spurious network errors may lead to a VM being started twice on different hosts and possibly clashing on shared resources. The previous script needs to fence the error host to prevent split brain VMs. You may use any fencing mechanism for the host and invoke it within the error hook. +Tuning HA responsiveness +================================================================================ + +This HA mechanism is based on the host state monitoring. How long the host the host takes to be reported in ``ERROR`` is crucial for how quickly you want the VMs to be available. + +There are multiple timers that you can adjust on ``/etc/one/monitord.conf`` to adjust this. ``BEACON_HOST`` dictates how often the host is checked to make sure it is responding. If it doesn't respond past ``MONITORING_INTERVAL_HOST`` then the frontend will attempt to restart the monitoring on the host. + +This process tries to connect to the host via SSH, synchronize the probes and start their execution. It might be possible that this SSH connection hangs if the host is not responsive. This can lead to a situation where the VM workloads running on said host will be unavailable and the HA will not be present during this process. You can adjust how much are you comfortable with waiting for this ssh to fail by setting the parameter ``ConnectTimeout`` on the oneadmin ssh configuration at ``/var/lib/one/.ssh/config``. + +The following is a an example configuration + +.. code-block:: + + Host * + ServerAliveInterval 10 + ControlMaster no + ControlPersist 70s + ControlPath /run/one/ssh-socks/ctl-M-%C.sock + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + ConnectTimeout 15 + +.. warning:: Consider that a temporary network/host problem or a small hiccup combined with short timers can lead to an overkill situation where the HA hook gets triggered too fast when waiting a few more seconds could have been fine. This is a trade-off you'll have to be aware of when implementing HA. + Enabling Fencing ================================================================================ diff --git a/source/installation_and_configuration/opennebula_services/fireedge.rst b/source/installation_and_configuration/opennebula_services/fireedge.rst index 4468cca2b..ec1f2ccbc 100644 --- a/source/installation_and_configuration/opennebula_services/fireedge.rst +++ b/source/installation_and_configuration/opennebula_services/fireedge.rst @@ -29,6 +29,9 @@ Main Features .. _fireedge_install_configuration: +.. note:: + + We are continually expanding the feature set of FireEdge Sunstone, and hence its configuration files are in constant change. In versions 6.10.2 and later, configuration files in ``/etc/one/fireedge/`` can be replaced by the ones that can be downloaded from `here `__ in order to activate the latest features. Configuration ================================================================================ @@ -154,6 +157,11 @@ The FireEdge server configuration file can be found in ``/etc/one/fireedge-serve +-------------------------------------------+-------------------------------------------+------------------------------------------------------+ | ``keep_me_logged_in`` | ``true`` | True to display 'Keep me logged in' option | +-------------------------------------------+-------------------------------------------+------------------------------------------------------+ +| ``currentTimeZone`` | | Time Zone | ++-------------------------------------------+-------------------------------------------+------------------------------------------------------+ +| ``rowStyle`` | | Changes the style of rows in datatables, values can | +| | | be ``card`` or ``list``. | ++-------------------------------------------+-------------------------------------------+------------------------------------------------------+ Once the server is initialized, it creates the file ``/var/lib/one/.one/fireedge_key``, used to encrypt communication with Guacd. @@ -161,10 +169,6 @@ Once the server is initialized, it creates the file ``/var/lib/one/.one/fireedge In HA environments, ``fireedge_key`` needs to be copied from the first leader to the followers. Optionally, in order to have the provision logs available in all the HA nodes, ``/var/lib/one/fireedge`` need to be shared between nodes. -.. _fireedge_ssl_without_nginx: - -If you need to execute the FireEdge with SSL Certificate, in the following path: ``/usr/lib/one/fireedge`` you must create a folder called ``cert`` and inside it place the files ``cert.pem`` and ``key.pem``. After doing that you need to restart ``opennebula-fireedge``. - .. _fireedge_configuration_for_sunstone: Tuning and Extending @@ -205,6 +209,16 @@ The following example shows how you can change the logo to a generic linux one ( .. _fireedge_conf_guacamole: +Configure DataTables +-------------------------------------------------------------------------------- +You can change the style of the rows depending on your preferences. in case they are changed in the fireedge-server.conf file. this change will be priority. and it will adjust the view to all users. + +|fireedge_sunstone_list_datatable| + +Each user can also do it from his configuration. + +|fireedge_sunstone_setting_list_datatable| + Configure Guacamole -------------------------------------------------------------------------------- @@ -282,3 +296,5 @@ If another service is using the port, you can change FireEdge configuration (``/ :width: 45% .. |fireedge_sunstone_linux_drawer_logo| image:: /images/fireedge_drawer_linux_logo.png :width: 45% +.. |fireedge_sunstone_list_datatable| image:: /images/sunstone_list_datatable.png +.. |fireedge_sunstone_setting_list_datatable| image:: /images/sunstone_setting_list_datatable.png diff --git a/source/installation_and_configuration/opennebula_services/onegate.rst b/source/installation_and_configuration/opennebula_services/onegate.rst index c7c97f0b2..d7b7773f4 100644 --- a/source/installation_and_configuration/opennebula_services/onegate.rst +++ b/source/installation_and_configuration/opennebula_services/onegate.rst @@ -123,229 +123,37 @@ Other logs are also available in Journald. Use the following command to show: .. |onegate_net| image:: /images/onegate_net.png -.. - Advanced Setup - ============== - - - Example: Use OneGate/Proxy to Improve Security - ---------------------------------------------- - - In addition to the OneGate itself, OpenNebula provides transparent TCP-proxy for the OneGate's network traffic. - It's been designed to drop the requirement for guest VMs to be directly connecting to the service. Up to this point, - in cloud environments like :ref:`OneProvision/AWS `, the OneGate service had to be exposed - on a public IP address. Please take a look at the example diagram below: - - .. graphviz:: - - digraph { - graph [splines=true rankdir=LR ranksep=0.7 bgcolor=transparent]; - edge [dir=both color=blue arrowsize=0.6]; - node [shape=plaintext fontsize="11em"]; - - { rank=same; - F1 [label=< - - -
- -
ONE / 1 (follower)
eth1: 192.168.150.1
- >]; - F2 [label=< - - -
- -
- -
ONE / 2 (leader)
opennebula-gate
192.168.150.86:5030
eth1:
192.168.150.2
192.168.150.86 (VIP)
- >]; - F3 [label=< - - -
- -
ONE / 3 (follower)
eth1: 192.168.150.3
- >]; - } - - { rank=same; - H1 [label=< - - -
- -
- -
- -
- -
ONE-Host / 1
opennebula-gate-proxy
169.254.16.9:5030
lo:
127.0.0.1
169.254.16.9
⇅ (forwarding)
br0: 192.168.150.4
- >]; - H2 [label=< - - -
- -
- -
- -
- -
ONE-Host / 2
opennebula-gate-proxy
169.254.16.9:5030
lo:
127.0.0.1
169.254.16.9
⇅ (forwarding)
br0: 192.168.150.5
- >]; - } - - { rank=same; - G1 [label=< - - -
- -
- -
- -
VM-Guest / 1
ONEGATE_ENDPOINT=
http://169.254.16.9:5030
static route:
169.254.16.9/32 dev eth0
eth0: 192.168.150.100
- >]; - G2 [label=< - - -
- -
- -
- -
VM-Guest / 2
ONEGATE_ENDPOINT=
http://169.254.16.9:5030
static route:
169.254.16.9/32 dev eth0
eth0: 192.168.150.101
- >]; - } - - F1:s -> F2:n [style=dotted arrowhead=none]; - F2:s -> F3:n [style=dotted arrowhead=none]; - - F2:eth1:e -> H1:br0:w; - F2:eth1:e -> H2:br0:w; - - H1:br0:e -> G1:eth0:w; - H2:br0:e -> G2:eth0:w; - } - - | - - In this altered OneGate architecture, each hypervisor Node runs a process, which listens for connections on a dedicated - `IPv4 Link-Local Address `_. - After a guest VM connects to the proxy, the proxy connects back to OneGate and transparently forwards all the protocol traffic - both ways. Because a guest VM no longer needs to be connecting directly, it's now easy to setup a VPN/TLS tunnel between - hypervisor Nodes and the OpenNebula Front-end machines. It should allow for OneGate communication to be conveyed through securely, - and without the need for exposing OneGate on a public IP address. - - Each of the OpenNebula DEB/RPM node packages: ``opennebula-node-kvm`` and ``opennebula-node-lxc`` contains the ``opennebula-gate-proxy`` systemd service. To enable and start it on your Hosts, execute as **root**: - - .. prompt:: bash # auto - - # systemctl enable opennebula-gate-proxy.service --now - - You should be able to verify, that the proxy is running with the default config: - - .. prompt:: bash # auto +Advanced Setup +============== - # ss -tlnp | grep :5030 - LISTEN 0 4096 169.254.16.9:5030 0.0.0.0:* users:(("ruby",pid=9422,fd=8)) +Example: Use Transparent OneGate Proxy to Improve Security +---------------------------------------------------------- - .. important:: +Add the following config snippet to the ``~oneadmin/remotes/etc/vnm/OpenNebulaNetwork.conf`` file on Front-end machines: - The ``:onegate_addr`` attribute is configured automatically in the ``/var/tmp/one/etc/onegate-proxy.conf`` file during - the ``onehost sync -f`` operation. That allows for an easy reconfiguration in the case of a larger (many Hosts) - OpenNebula environment. - - To change the value of the ``:onegate_addr`` attribute, edit the ``/var/lib/one/remotes/etc/onegate-proxy.conf`` - file and then execute the ``onehost sync -f`` operation as **oneadmin**: - - .. prompt:: bash $ auto - - $ gawk -i inplace -f- /var/lib/one/remotes/etc/onegate-proxy.conf <<'EOF' - BEGIN { update = ":onegate_addr: '192.168.150.86'" } - /^#*:onegate_addr:/ { $0 = update; found=1 } - { print } - END { if (!found) print update >>FILENAME } - EOF - $ onehost sync -f - ... - All hosts updated successfully. - - .. note:: - - As a consequence of the ``onehost sync -f`` operation, the proxy service will be automatically restarted - and reconfigured on every hypervisor Node. - - To change the value of the ``ONEGATE_ENDPOINT`` context attribute for each guest VM, edit the ``/etc/one/oned.conf`` file - on your Front-end machines. For the purpose of using the proxy, just specify an IP address from the ``169.254.0.0/16`` - subnet (by default it's ``169.254.16.9``) and then restart the ``opennebula`` service: - - .. prompt:: bash # auto - - # gawk -i inplace -f- /etc/one/oned.conf <<'EOF' - BEGIN { update = "ONEGATE_ENDPOINT = \"http://169.254.16.9:5030\"" } - /^#*ONEGATE_ENDPOINT[^=]*=/ { $0 = update; found=1 } - { print } - END { if (!found) print update >>FILENAME } - EOF - # systemctl restart opennebula.service - - And, last but not least, it's required from guest VMs to setup this static route: +.. code:: - .. prompt:: bash # auto + :tproxy: + # OneGate service. + - :service_port: 5030 + :remote_addr: 10.11.12.13 # OpenNebula Front-end VIP + :remote_port: 5030 - # ip route replace 169.254.16.9/32 dev eth0 +Propagate config to Hypervisor hosts, execute as ``oneadmin`` on the leader Front-end machine: - Perhaps one of the easiest ways to achieve it, is to alter a VM template by adding a :ref:`start script `: +.. code:: - .. prompt:: bash # auto + $ onehost sync -f - # (export EDITOR="gawk -i inplace '$(cat)'" && onetemplate update alpine) <<'EOF' - BEGIN { update = "START_SCRIPT=\"ip route replace 169.254.16.9/32 dev eth0\"" } - /^CONTEXT[^=]*=/ { $0 = "CONTEXT=[" update "," } - { print } - EOF - # onetemplate instantiate alpine - VM ID: 0 +Deploy a guest Virtual Machine and test OneGate connectivity from within: - Finally, by examining the newly created guest VM, you can confirm if OneGate is reachable: +.. code:: - .. prompt:: bash # auto + $ onegate vm show - # grep -e ONEGATE_ENDPOINT -e START_SCRIPT /var/run/one-context/one_env - export ONEGATE_ENDPOINT="http://169.254.16.9:5030" - export START_SCRIPT="ip route replace 169.254.16.9/32 dev eth0" - # ip route show to 169.254.16.9 - 169.254.16.9 dev eth0 scope link - # onegate vm show --json - { - "VM": { - "NAME": "alpine-0", - "ID": "0", - "STATE": "3", - "LCM_STATE": "3", - "USER_TEMPLATE": { - "ARCH": "x86_64" - }, - "TEMPLATE": { - "NIC": [ - { - "IP": "192.168.150.100", - "MAC": "02:00:c0:a8:96:64", - "NAME": "NIC0", - "NETWORK": "public" - } - ], - "NIC_ALIAS": [] - } - } - } +Read more in :ref:`Transparent Proxies `. +.. Example: Deployment Behind TLS Proxy ------------------------------------ diff --git a/source/integration_and_development/infrastructure_drivers_development/sd.rst b/source/integration_and_development/infrastructure_drivers_development/sd.rst index d8734786e..0a7ca3e35 100644 --- a/source/integration_and_development/infrastructure_drivers_development/sd.rst +++ b/source/integration_and_development/infrastructure_drivers_development/sd.rst @@ -117,10 +117,11 @@ The backup datastore drivers are responsible to store the generate ``backup`` fo - ``backupjob_id`` if defined '-' otherwise - ``vm_id`` is the id of the VM - ``ds_id`` is the target datastore (the system datastore). - - **STDIN**: ``datastore_action_dump`` See a decoded :ref:`example `. - - **RETURNS**: ``backup_id size_mb`` - - ``backup_id`` driver reference for the backup. + - **STDIN**: ``datastore_backup_dump`` See an :ref:`example `. + - **RETURNS**: ``backup_id size_mb format`` + - ``backup_id`` driver reference for the backup - ``size_mb`` size that the backup takes + - ``format`` value of the backup image's FORMAT attribute (values: `raw`, `rbd`) - **restore**: Restore the OpenNebula objects (VM Template and Images). Note that the actual download of the images will be made by the Image Datastore using the reference uri. The specific mechanism for download images of a given protocol are coded in the ``downloader.sh`` script. The pseudo-URL takes the form: ``://///`` (example: ``restic://100/23/0:25f4b298,1:6968545c//var/lib/one/datastores/0/0/backup/disk.0``, the backup job id can be empty): @@ -662,6 +663,228 @@ Decoded Example 1 +Datastore Backup STDIN Example +================================================================================ + +.. _ds_backup_dump: + +.. code-block:: xml + + + + 100 + 0 + 0 + oneadmin + oneadmin + rsync + + 1 + 1 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + + rsync + - + /var/lib/one//datastores/100 + 3 + 0 + 0 + + 0 + + 19663 + 6457 + 13191 + + + + + 800 + 0 + 0 + oneadmin + oneadmin + alpine-800 + + 1 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 3 + 69 + 3 + 69 + 0 + 1727952499 + 0 + 7c657ee7-166b-46d3-bf5f-53886f0b77dd + + + + + x86_64 + + + + 800 + 1 + ubuntu2204-kvm-ceph-quincy-6-99-0c08-2.test + 62 + 0 + 1727952516 + 0 + kvm + ceph + 0 + 0 + 0 + 1727952516 + 0 + 0 + 0 + 0 + -1 + -1 + -1 + + + + + 100 + + + + + + + Export XML ================================================================================ diff --git a/source/integration_and_development/references/build_deps.rst b/source/integration_and_development/references/build_deps.rst index a7ef50e75..79c0b361b 100644 --- a/source/integration_and_development/references/build_deps.rst +++ b/source/integration_and_development/references/build_deps.rst @@ -16,7 +16,7 @@ This page lists the **build** dependencies for OpenNebula. * **openssl** development libraries (>= 0.9.8) * **ruby** interpreter (>= 2.0.0) -Ubuntu 20.04, 22.04 +Ubuntu 22.04, 24.04 ================================================================================ * **bash-completion** @@ -63,7 +63,7 @@ Install all requirements using:: apt install bash-completion debhelper default-jdk freerdp2-dev grunt javahelper libaugeas-dev libcairo2-dev libcurl4-openssl-dev libmysql++-dev libmysqlclient-dev libnode-dev libossp-uuid-dev libpango1.0-dev libpulse-dev libsqlite3-dev libssh2-1-dev libssl-dev libsystemd-dev libtool libvncserver-dev libvorbis-dev libwebp-dev libws-commons-util-java libxml2-dev libxmlrpc-c++8-dev libxslt1-dev libzmq3-dev libzmq5 nodejs npm python3 python3-pip python3-setuptools rake ruby-dev scons unzip && npm install -g bower -Debian 11 +Debian 11, Debian 12 ================================================================================ * **bash-completion** @@ -108,54 +108,6 @@ Install all requirements using:: apt install bash-completion debhelper default-jdk default-libmysqlclient-dev freerdp2-dev grunt javahelper libaugeas-dev libcairo2-dev libcurl4-openssl-dev libnode-dev libossp-uuid-dev libpango1.0-dev libpulse-dev libsqlite3-dev libssh2-1-dev libssl-dev libsystemd-dev libtool libvncserver-dev libvorbis-dev libwebp-dev libws-commons-util-java libxml2-dev libxmlrpc-c++8-dev libxslt1-dev libzmq3-dev libzmq5 nodejs npm python3 python3-setuptools rake ruby-dev scons unzip && npm install -g bower -Debian 10 -================================================================================ - -* **bash-completion** -* **bower** -* **debhelper (>= 7.0.50~)** -* **default-jdk** -* **default-libmysqlclient-dev** -* **freerdp2-dev** -* **grunt** -* **javahelper (>= 0.32)** -* **libaugeas-dev** -* **libcairo2-dev** -* **libcurl4-openssl-dev** -* **libnode-dev (>= 10)** -* **libossp-uuid-dev** -* **libpango1.0-dev** -* **libpulse-dev** -* **libsqlite3-dev** -* **libssh2-1-dev** -* **libssl-dev** -* **libsystemd-dev** -* **libtool** -* **libvncserver-dev** -* **libvorbis-dev** -* **libwebp-dev** -* **libws-commons-util-java** -* **libxml2-dev** -* **libxmlrpc-c++8-dev** -* **libxmlrpc3-client-java** -* **libxmlrpc3-common-java** -* **libxslt1-dev** -* **libzmq3-dev** -* **libzmq5** -* **nodejs (>= 10)** -* **npm** -* **python3** -* **python3-pip** -* **python3-setuptools** -* **rake** -* **ruby-dev** -* **scons** -* **unzip** - -Install all requirements using:: - - apt install bash-completion debhelper default-jdk default-libmysqlclient-dev freerdp2-dev grunt javahelper libaugeas-dev libcairo2-dev libcurl4-openssl-dev libnode-dev libossp-uuid-dev libpango1.0-dev libpulse-dev libsqlite3-dev libssh2-1-dev libssl-dev libsystemd-dev libtool libvncserver-dev libvorbis-dev libwebp-dev libws-commons-util-java libxml2-dev libxmlrpc-c++8-dev libxmlrpc3-client-java libxmlrpc3-common-java libxslt1-dev libzmq3-dev libzmq5 nodejs npm python3 python3-pip python3-setuptools rake ruby-dev unzip && npm install -g bower && pip3 install scons - AlmaLinux/RHEL 8,9 ================================================================================ diff --git a/source/integration_and_development/references/compile.rst b/source/integration_and_development/references/compile.rst index bf5bd1eed..f2627ecd0 100644 --- a/source/integration_and_development/references/compile.rst +++ b/source/integration_and_development/references/compile.rst @@ -160,8 +160,8 @@ The packages do a ``system-wide`` installation. To create a similar environment, In that case one needs to patch ``src/scheduler/src/sched/SConstruct`` file: .. prompt:: bash # auto - - # diff one/src/scheduler/src/sched/SConstruct one-orig/src/scheduler/src/sched/SConstruct + + # diff one/src/scheduler/src/sched/SConstruct one-orig/src/scheduler/src/sched/SConstruct 48c48,49 < 'xml2' --- @@ -186,13 +186,10 @@ Build Dependencies: Run Dependencies: -- **aenum**: python OCA support - **dict2xml**: python OCA support -- **feature**: python OCA support - **lxml**: python OCA support -- **six**: python OCA support -- **tblib**: python OCA support - **xml2dict**: python OCA support +- **requests**: python OCA support To build run following: diff --git a/source/integration_and_development/system_interfaces/api.rst b/source/integration_and_development/system_interfaces/api.rst index 6aae19d15..7fd7ccfb0 100644 --- a/source/integration_and_development/system_interfaces/api.rst +++ b/source/integration_and_development/system_interfaces/api.rst @@ -763,27 +763,33 @@ onevrouter onezone -------------------------------------------------------------------------------- -+-----------------+-------------------+---------------+ -| onezone command | XML-RPC Method | Auth. Request | -+=================+===================+===============+ -| create | one.zone.allocate | ZONE:CREATE | -+-----------------+-------------------+---------------+ -| rename | one.zone.rename | ZONE:MANAGE | -+-----------------+-------------------+---------------+ -| update | one.zone.update | ZONE:MANAGE | -+-----------------+-------------------+---------------+ -| delete | one.zone.delete | ZONE:ADMIN | -+-----------------+-------------------+---------------+ -| enable | one.zone.enable | ZONE:ADMIN | -| | | | -| disable | | | -+-----------------+-------------------+---------------+ -| show | one.zone.info | ZONE:USE | -+-----------------+-------------------+---------------+ -| list | one.zonepool.info | ZONE:USE | -+-----------------+-------------------+---------------+ -| set | -- | ZONE:USE | -+-----------------+-------------------+---------------+ ++-----------------+----------------------+---------------+ +| onezone command | XML-RPC Method | Auth. Request | ++=================+======================+===============+ +| create | one.zone.allocate | ZONE:CREATE | ++-----------------+----------------------+---------------+ +| rename | one.zone.rename | ZONE:MANAGE | ++-----------------+----------------------+---------------+ +| update | one.zone.update | ZONE:MANAGE | ++-----------------+----------------------+---------------+ +| delete | one.zone.delete | ZONE:ADMIN | ++-----------------+----------------------+---------------+ +| enable | one.zone.enable | ZONE:ADMIN | +| | | | +| disable | | | ++-----------------+----------------------+---------------+ +| server-add | one.zone.addserver | ZONE:ADMIN | ++-----------------+----------------------+---------------+ +| server-del | one.zone.delserver | ZONE:ADMIN | ++-----------------+----------------------+---------------+ +| server-reset | one.zone.resetserver | ZONE:ADMIN | ++-----------------+----------------------+---------------+ +| show | one.zone.info | ZONE:USE | ++-----------------+----------------------+---------------+ +| list | one.zonepool.info | ZONE:USE | ++-----------------+----------------------+---------------+ +| set | -- | ZONE:USE | ++-----------------+----------------------+---------------+ onesecgroup -------------------------------------------------------------------------------- @@ -985,7 +991,7 @@ onehook -------------------------------------------------------------------------------- +-----------------------+----------------------------+---------------------------+ -| onevntemplate command | XML-RPC Method | Auth. Request | +| onehook command | XML-RPC Method | Auth. Request | +=======================+============================+===========================+ | update | one.hook.update | HOOK:MANAGE | +-----------------------+----------------------------+---------------------------+ @@ -1005,7 +1011,7 @@ onehook +-----------------------+----------------------------+---------------------------+ | unlock | one.hook.unlock | HOOK:MANAGE | +-----------------------+----------------------------+---------------------------+ -| retry | one.hook.unlock | HOOK:MANAGE | +| retry | one.hook.retry | HOOK:MANAGE | +-----------------------+----------------------------+---------------------------+ | log | one.hooklog.info | HOOK:- | +-----------------------+----------------------------+---------------------------+ @@ -7196,6 +7202,60 @@ one.zone.rename | OUT | Int | ID of the object that caused the error. | +------+------------+---------------------------------------------+ +one.zone.addserver +------------------ + +- **Description**: Add server to zone. +- **Parameters** + +==== ========== ============================================ +Type Data Type Description +==== ========== ============================================ +IN String The session string. +IN Int The object ID. +IN String A string containing the template of the server. Syntax can be the usual ``attribute=value`` or XML. | +OUT Boolean True or false whenever is successful or not. +OUT Int/String The resource ID / The error string. +OUT Int Error code. +OUT Int ID of the object that caused the error. +==== ========== ============================================ + +one.zone.delserver +------------------ + +- **Description**: Delete a server from zone. +- **Parameters** + +==== ========== ============================================ +Type Data Type Description +==== ========== ============================================ +IN String The session string. +IN Int The object ID. +IN Int The server ID. +OUT Boolean true or false whenever is successful or not +OUT Int/String The resource ID / The error string. +OUT Int Error code. +OUT Int ID of the object that caused the error. +==== ========== ============================================ + +one.zone.resetserver +-------------------- + +- **Description**: Reset follower log index. This should be trigger when a follower DB has been reset. +- **Parameters** + +==== ========== ============================================ +Type Data Type Description +==== ========== ============================================ +IN String The session string. +IN Int The object ID. +IN Int The server ID. +OUT Boolean true or false whenever is successful or not +OUT Int/String The resource ID / The error string. +OUT Int Error code. +OUT Int ID of the object that caused the error. +==== ========== ============================================ + one.zone.info -------------- diff --git a/source/intro_release_notes/release_notes/acknowledgements.rst b/source/intro_release_notes/release_notes/acknowledgements.rst index c828f7b4e..f7b83dc7a 100644 --- a/source/intro_release_notes/release_notes/acknowledgements.rst +++ b/source/intro_release_notes/release_notes/acknowledgements.rst @@ -6,8 +6,8 @@ Acknowledgements The OpenNebula project would like to thank the `community members `__ and `users `__ who have contributed to this software release by being active in discussions, answering user questions, or providing patches for bugfixes, features, and documentation. -Part of the new functionality in OpenNebula 6.10 has been funded by the following innovation projects: +Some of the new functionality in OpenNebula 6.10 has been made possible through funding from the following innovation projects: - * `SovereignEdge.Cognit `__ (Grant Agreement 101092711), through the European Union’s Horizon Europe Research and Innovation Programme. - * `OneEdge5G `__ (Grant Agreement TSI-064200-2023-1), supported by the Spanish Ministry for Digital Transformation and Civil Service through the UNICO I+D 6G Program, co-funded by the European Union – NextGenerationEU through the Recovery and Resilience Facility (RRF). - * `OneNextGen `__ (Grant Agreement UNICO IPCEI-2023-003), supported by the Spanish Ministry for Digital Transformation and Civil Service through the UNICO IPCEI Program, co-funded by the European Union – NextGenerationEU through the Recovery and Resilience Facility (RRF). + * `SovereignEdge.Cognit `__, funded by the European Union’s Horizon Europe research and innovation programme through the SovereignEdge.Cognit project: A Cognitive Serverless Framework for the Cloud-Edge Continuum (Grant Agreement 101092711 - SovereignEdge.Cognit [2023-2025]). + * `ONEedge5G `__, funded by the Spanish Ministry for Digital Transformation and Civil Service through the ONEedge5G Project: Intelligence and Automation for the Operation of Distributed Edge Systems on 5G Advanced Infrastructures (TSI-064200-2023-1) and co-funded by the European Union’s NextGenerationEU Instrument through the Recovery and Resilience Facility (RRF). + * `ONEnextgen `__, funded by the Spanish Ministry for Digital Transformation and Civil Service through the ONEnextgen Project: Next-Generation European Platform for the Datacenter-Cloud-Edge Continuum (UNICO IPCEI-2023-003) and co-funded by the European Union’s NextGenerationEU instrument through the Recovery and Resilience Facility (RRF). diff --git a/source/intro_release_notes/release_notes/compatibility.rst b/source/intro_release_notes/release_notes/compatibility.rst index 5b92b5402..db3cd7ae9 100644 --- a/source/intro_release_notes/release_notes/compatibility.rst +++ b/source/intro_release_notes/release_notes/compatibility.rst @@ -39,4 +39,8 @@ Labels on Sunstone ================================================================================ Only :ref:`persistent user labels ` that were created in old Sunstone will be showed :ref:`in new Sunstone `. We are working to offer you a better experience in new Sunstone with the system and user labels in future versions of OpenNebula. -Remember that in new Sunstone you need to create the user label in the Settings section before apply a label to a resource. See :ref:`Sunstone labels guide ` to get more information. \ No newline at end of file +Remember that in new Sunstone you need to create the user label in the Settings section before apply a label to a resource. See :ref:`Sunstone labels guide ` to get more information. + +VMRC support on Sunstone +================================================================================ +New Sunstone removes the support for VMRC (VMware Remote Console) so an user cannot connect to a virtual machine using VMRC. \ No newline at end of file diff --git a/source/intro_release_notes/release_notes/known_issues.rst b/source/intro_release_notes/release_notes/known_issues.rst index c5c8aad74..2efe9f597 100644 --- a/source/intro_release_notes/release_notes/known_issues.rst +++ b/source/intro_release_notes/release_notes/known_issues.rst @@ -23,6 +23,12 @@ Sunstone - Guacamole RDP as is currently shipped in OpenNebula does not support NLA authentication. You can follow `these instructions `__ in order to disable NLA in the Windows box to use Guacamole RDP within Sunstone. +- `'Groupadmin' view fails to load for groups with multiple admin users `__. + +- The current configuration for both Sunstone and One-Provision contain a mismatch in the `keep_me_logged` configuration option. This issue has been fixed but the new configuration files need to be downloaded in order for this fix to take effect, refer to :ref:`the following section ` for instructions on how to do this. + +- The `Update VM Configuration` dialog contains a bug which will result in a blank screen if one tries to update the configuration of a virtual machine with less than 2 total disks attached. The quickest workaround for this is to attach another minimal disk to the VM. + Install Linux Graphical Desktop on KVM Virtual Machines ================================================================================ diff --git a/source/intro_release_notes/release_notes/platform_notes.rst b/source/intro_release_notes/release_notes/platform_notes.rst index bc77cfebb..976ae7631 100644 --- a/source/intro_release_notes/release_notes/platform_notes.rst +++ b/source/intro_release_notes/release_notes/platform_notes.rst @@ -43,7 +43,8 @@ vCenter Nodes +-----------+---------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+ | Component | Version | More information | +===========+=======================================+========================================================================================================================================+ -| vCenter | 7.0.x, managing ESX 7.0.x | :ref:`vCenter Node Installation ` | +| vCenter | 7.0.x managing ESX 7.0.x & | :ref:`vCenter Node Installation ` | +| | 8.0.x managing ESX 8.0.x | | +-----------+---------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+ | NSX-T | 2.4.1+ | `VMware compatiblity `__. :ref:`NSX Documentation `. | +-----------+---------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+ @@ -116,8 +117,8 @@ Open Cloud Storage Infrastructure +-----------+--------------------------------------------+-------------------------------------+ | LVM2 | Version included in the Linux distribution | :ref:`LVM Drivers ` | +-----------+--------------------------------------------+-------------------------------------+ -| Ceph | Pacific v16.2.x | :ref:`The Ceph Datastore ` | -| | Quincy v17.2.x | | +| Ceph | Quincy v17.2.x | :ref:`The Ceph Datastore ` | +| | Reef v18.2.x | | +-----------+--------------------------------------------+-------------------------------------+ Authentication diff --git a/source/intro_release_notes/release_notes/whats_new.rst b/source/intro_release_notes/release_notes/whats_new.rst index 46cef5547..4da7642e4 100644 --- a/source/intro_release_notes/release_notes/whats_new.rst +++ b/source/intro_release_notes/release_notes/whats_new.rst @@ -156,3 +156,4 @@ Also, the following issues have been solved in the FireEdge Sunstone Web UI: - `Fix detailed view stuck in fullscreen `__. - `Fix unnecesary extra step when creating Image `__. - `Fix simplified view of the table `__. +- `Fix display VM hostname in table `__. diff --git a/source/intro_release_notes/release_notes_enterprise/index.rst b/source/intro_release_notes/release_notes_enterprise/index.rst index d1d6fbec6..daea1797f 100644 --- a/source/intro_release_notes/release_notes_enterprise/index.rst +++ b/source/intro_release_notes/release_notes_enterprise/index.rst @@ -8,3 +8,5 @@ Release Notes |version| Enterprise Edition :maxdepth: 1 What is OpenNebula EE + Resolved Issues 6.10.1 + Resolved Issues 6.10.2 diff --git a/source/intro_release_notes/release_notes_enterprise/resolved_issues_6101.rst b/source/intro_release_notes/release_notes_enterprise/resolved_issues_6101.rst new file mode 100644 index 000000000..943c87f6d --- /dev/null +++ b/source/intro_release_notes/release_notes_enterprise/resolved_issues_6101.rst @@ -0,0 +1,42 @@ +.. _resolved_issues_6101: + +Resolved Issues in 6.10.1 +-------------------------------------------------------------------------------- + +A complete list of solved issues for 6.10.1 can be found in the `project development portal `__. + +The following new features have been backported to 6.10.1: + +- Backup datastore capacity is checked before attempting to create a backup. This test can be disable with the ``DATASTORE_CAPACITY_CHECK`` attribute, either globally or per datastore. +- Add a "disk-snapshot-list" option to :ref:`onevm cli `. +- `Optimize handling of VM history records, it greatly improves perfomance of all VM operations for VMs with many history records `__. +- `Add support for incremental backups in Ceph `__. +- :ref:`New Transparent Proxies for VMs to simplify access to external services (e.g. OneGate) ` + +The following issues has been solved in 6.10.1: + +- `Fix KVM VM migration when CLEANUP_MEMORY_STOP is not defined in the driver configuration `__. +- `Fix a very uncommon error while initializing drivers `__. +- `Fix restore of volatile disks from a VM backup `__. +- `Fix backups of volatile disks in Ceph drivers `__. +- `Fix backup of VM with ISO images to skip the backup of CD drives `__. +- `Fix Sunstone/OneProvision configuration mismatch `__. +- `Fix Sunstone check button for backing up volatile disks `__. +- `Fix the reloading process of the monitor drivers `__. +- `Fix oned initialization when the configuration file contains drivers with the same name `__. +- `Fix PyOne dependencies to not mix pip and python3-* packages `__. +- `Fix inconsistent CPU pinning after VM cold migration `__. +- `Fix User inputs doesn't propagate value to context attribute of vm using Sunstone `__. +- `Fix VM disk selection when restoring backups `__. +- `Fix Sunstone card view for large installations adding a new row mode style `__. +- `Fix Sunstone datastore limit parsing `__. +- `Fix reset backup failed after restore individual disk on the VM `__. +- `Fix reconfiguration process for VMs using context disk of block type `__. +- `Fix Ceph VM restore ignoring EC_POOL_NAME `__. +- `Fix QEMU Guest Agent VM Monitoring `__. +- `Fix host name not validated `__. +- `Fix several CLI parameters, which converts resource name always to ID 0 `__. +- `Fix Groupadmin view not accessible with multiple admins `__. +- `Fix Restic backup is still labeled EE only in sunstone `__. +- `Fix host requirements expression regex `__. +- `Fix FireEdge Sunstone cannot set deployment mode `__. \ No newline at end of file diff --git a/source/intro_release_notes/release_notes_enterprise/resolved_issues_6102.rst b/source/intro_release_notes/release_notes_enterprise/resolved_issues_6102.rst new file mode 100644 index 000000000..a826b47e7 --- /dev/null +++ b/source/intro_release_notes/release_notes_enterprise/resolved_issues_6102.rst @@ -0,0 +1,33 @@ +.. _resolved_issues_6102: + +Resolved Issues in 6.10.2 +-------------------------------------------------------------------------------- + +A complete list of solved issues for 6.10.2 can be found in the `project development portal `__. + +The following new features have been backported to 6.10.2: + +- `Add support for VLAN filtering to the Linux bridge drivers `__. This allows to limit the VLANs in trunk mode, as well as in QinQ mode. For more information check the :ref:`bridge driver ` and the :ref:`802.1Q VLAN driver ` documentation guides. + +The following issues has been solved in 6.10.2: + +- `Fix bug in the DS Ceph driver: set the value for the --keyfile to CEPH_KEY instead of CEPH_USER in the export operation `__. +- `Fix GOCA OS vector attribute to include FIRMWARE, FIRMWARE_SECURE, UUID and SD_DISK_BUS `__. +- `Fix PyOne installation through pip `__. +- `Fix the list of attibutes that can be overriden in vmm_exec_kvm.conf `__. +- `Fix a rare crash in 'onedb fsck' caused by a locked MarketPlaceApp in a federated environment `__. +- `Fix iotune attributes not being passed to VM if value is a big number `__. +- `Fix SecurityGroup rule validation logic to include additional checks for port ranges `__. +- `Fix KVM domain definition to set up CPU affinity to the auto-selected NUMA node when using huge pages without CPU pinning `__. +- `Fix multiple problems with QEMU Guest Agent monitoring `__. Additional monitor commands for the qemu-agent probe are `shown here `__. You can add them to your existing 6.10 configuration files. +- `Fix Checkpoint file is not always cleaned up on VM Action `__. +- `Fix Set NEXT_SNAPSHOT=1 for persistent images `__. +- `Fix Restored disks of VM additional disks does not show the real size of the original disk `__. + +The following issues have been solved in the Sunstone Web UI: + +- `Fix DEV_PREFIX wrong when using Sunstone `__. +- `Fix Sunstone host graph not showing information `__. +- `Fix number of instances ignored in service instantiation `__. +- `Fix Sunstone filter VMs on "Locked" gives empty white page `__. +- `Fix missing boot order selector `__. \ No newline at end of file diff --git a/source/intro_release_notes/upgrades/upgrading_single.rst b/source/intro_release_notes/upgrades/upgrading_single.rst index 8a931230b..b74915879 100644 --- a/source/intro_release_notes/upgrades/upgrading_single.rst +++ b/source/intro_release_notes/upgrades/upgrading_single.rst @@ -4,12 +4,97 @@ Upgrading Single Front-end Deployments ================================================================================ +If you are upgrading from a 6.10.x installation you only need to follow a reduced set of steps. If you are running a 6.8.x version or older, please check :ref:`these set of steps ` (some additional ones may apply, please review them at the end of the section). + .. important:: - Users of the Community Edition of OpenNebula can upgrade from the previous stable version if they are running a non-commercial OpenNebula cloud. In order to access the migrator package a request needs to be made through this `online form `__. In order to use these non-commercial migrators to upgrade to the latest CE release (OpenNebula 6.8.0), you will need to upgrade your existing OpenNebula environment first to CE Patch Release 6.6.0.1 + Users of the Community Edition of OpenNebula can upgrade from the previous stable version if they are running a non-commercial OpenNebula cloud. In order to access the migrator package a request needs to be made through this `online form `__. In order to use these non-commercial migrators to upgrade to the latest CE release (OpenNebula 6.8.0), you will need to upgrade your existing OpenNebula environment first to CE Patch Release 6.8.0.1 .. important:: If you haven't done so, please enable the :ref:`OpenNebula and needed 3rd party repositories ` before attempting the upgrade process. +.. _upgrade_610: + +Upgrading from 6.10.x +^^^^^^^^^^^^^^^^^^^^^ + +This section describes the installation procedure for systems that are already running a 6.10.x OpenNebula. The upgrade to OpenNebula |version| can be done directly following this section, you don't need to perform intermediate version upgrades. The upgrade will preserve all current users, hosts, resources and configurations. + +When performing a minor upgrade OpenNebula adheres to the following convention to ease the process: + + * No changes are made to the configuration files, so no configuration file will be changed during the upgrade. + * Database versions are preserved, so no upgrade of the database schema is needed. + +When a critical bug requires an exception to the previous rules it will be explicitly noted in this guide. + +Step 1. Stop OpenNebula Services +================================ + +Before proceeding, make sure you don't have any VMs in a transient state (prolog, migr, epil, save). Wait until these VMs get to a final state (run, suspended, stopped, done). Check the :ref:`Managing Virtual Machines guide ` for more information on the VM life-cycle. + +Now you are ready to stop OpenNebula and any other related services you may have running, e.g. Sunstone or OneFlow. It's preferable to use the system tools, like `systemctl` or `service` as `root` in order to stop the services. + +Step 2. Upgrade Front-end to the New Version +============================================ + +Upgrade the OpenNebula software using the package manager of your OS. Refer to the :ref:`Single Front-end Installation guide ` for a complete list of the OpenNebula packages installed on your system. Package repos need to be pointing to the latest version (|version|). + +For example, in CentOS/RHEL simply execute: + +.. prompt:: text # auto + + # yum upgrade opennebula + +For Debian/Ubuntu use: + +.. prompt:: text # auto + + # apt-get update + # apt-get install --only-upgrade opennebula + +Step 3. Upgrade Hypervisors to the New Version +============================================== + +You can skip this section for vCenter Hosts. + +Upgrade the OpenNebula node KVM or LXD packages, using the package manager of your OS. + +For example, in a rpm-based Linux distribution simply execute: + +.. prompt:: text # auto + + # yum upgrade opennebula-node-kvm + +For deb-based distros use: + +.. prompt:: text # auto + + # apt-get update + # apt-get install --only-upgrade opennebula-node-kvm + +.. note:: If you are using LXD the package is ``opennebula-node-lxd``. + +Step 4. Update the Drivers +========================== + +You should now be able to start OpenNebula as usual, running ``service opennebula start`` as ``root``. At this point, as ``oneadmin`` user, execute ``onehost sync`` to update the new drivers in the Hosts. + +.. note:: You can skip this step if you are not using KVM Hosts, or any Hosts that use remote monitoring probes. + +Testing +======= + +OpenNebula will continue the monitoring and management of your previous Hosts and VMs. + +As a measure of caution, look for any error messages in oned.log, and check that all drivers are loaded successfully. After that, keep an eye on oned.log while you issue the onevm, onevnet, oneimage, oneuser, onehost **list** commands. Try also using the **show** subcommand for some resources. + +Restoring the Previous Version +============================== + +If for any reason you need to restore your previous OpenNebula, simply uninstall OpenNebula |version|, and install again your previous version. After that, update the drivers if needed, as outlined in the Step 12 below. + +.. _upgrading_from_previous_extended_steps: + + Upgrading from 6.x and higher ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -81,20 +166,32 @@ RHEL Community Edition -------------------------------------------------------------------------------- -There is an additional step if you are upgrading OpenNebula CE. After you get the `opennebula-migration-community package `__, you need to install it in the OpenNebula Front-end. +If upgrading OpenNebula CE, you will need to install the ``opennebula-migration-community`` package on your Front-end. -RHEL +If you are upgrading to the *latest* version, you will need to download the package from the `Get Migration Packages `__ page. + +If you are upgrading to any prior version (such as upgrading from 6.8 to 6.10), then the migration package is already included in the OpenNebula repositories. + +To install the migration package: + +On RHEL: .. prompt:: bash $ auto $ rpm -i opennebula-migration-community*.rpm -Debian/Ubuntu +On Debian/Ubuntu: .. prompt:: bash $ auto $ dpkg -i opennebula-migration-community*.deb +.. note:: + + Before downloading the migration package, it's a good idea to double-check the URL in your software repository file. Ensure that the URL includes the software major and minor version (in ``.`` format), but not the exact release. + + For example, for OpenNebula version 6.10, the file should point to ``https://downloads.opennebula.io/repo/6.10`` and not ``https://downloads.opennebula.io/repo/6.10.0``. The first case will include migration packages for 6.10.*, whereas the second case will exclude minor versions such as 6.10.0.1. + Step 7. Update Configuration Files ================================================================================ diff --git a/source/management_and_operations/backups/operations.rst b/source/management_and_operations/backups/operations.rst index 5c7f5edd3..eb7354094 100644 --- a/source/management_and_operations/backups/operations.rst +++ b/source/management_and_operations/backups/operations.rst @@ -22,11 +22,16 @@ OpenNebula supports two backup types: - **Full**, each backup contains a full copy of the VM disks. Libvirt version >= 5.5 is required. - **Incremental**, each backup contains only the changes since the last backup. Incremental backups track changes by creating checkpoints (disk block dirty-bitmaps) using QEMU/Libvirt. Libvirt version >= 7.7 is required. -Incremental backups can use two different modes: +Incremental backups of **qcow2** disks can use two different modes via the ``INCREMENT_MODE`` user setting: - **CBT** (Changed Block Tracking). For each increment OpenNebula creates a block bitmap in the disk image to track which blocks have changed since the last backup. - **SNAPSHOT**. OpenNebula tracks changes by creating a separate disk snapshot. This snapshot stores all disk changes since the last backup. +Also, for **RBD** disks (Ceph), FULL and INCREMENT backups are currently stored in a different way, although the difference should be transparent to the user: + +- **Full** backups (``FORMAT=raw``) store the RBD export converted to a qcow2 file. The restore process involves converting it to a RAW file and importing it to the Ceph pool. +- **Incremental** backups (``FORMAT=rbd``) store the initial RBD export, as well as zero or more increment files, in the native format of Ceph exports (`rbd export --export-format 2` / `rbd export-diff`). The restore process involves importing the initial export and applying the diff files in the same order, one by one. + The Backup Process -------------------------------------------------------------------------------- VM backups can be taken live or while the VM is powered-off, the operation comprises three steps: @@ -35,11 +40,11 @@ VM backups can be taken live or while the VM is powered-off, the operation compr - *Backup*: Full disk copies (or increments) are uploaded to the backup server. In this step, OpenNebula will use the specific datastore drivers for the backup system. - *Post-backup*: Cleans any temporal file in the hypervisor. -.. note:: In order to save space in the backup system, disk backups are stored always in Qcow2 format. +.. note:: In order to save space in the backup system, RAW disk backups are converted and stored always in Qcow2 format. Limitations ============ -- Incremental backups are only available for KVM and qcow2 disks +- Incremental backups are only available for KVM and qcow2/RBD disks - Live backups are only supported for KVM - Attaching a disk to a VM that had an incremental backup previously made will yield an error. The `--reset` option for the backup operation is required to recreate a new incremental chain - Incremental backups on VMs with disk or system snapshots is not supported @@ -121,23 +126,23 @@ To configure using the Sunstone GUI, select the **Backup** tab: Reference: Backup Configuration Attributes -------------------------------------------------------------------------------- -+---------------------------+--------------------------------------------------------------------------------------------------------------+ -| Attribute | Description | -+===========================+==============================================================================================================+ -| ``BACKUP_VOLATILE`` | Perform backup of the volatile disks of the VM (default: ``NO``) | -+---------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``FS_FREEZE`` | Operation to freeze guest FS: ``NONE`` do nothing, ``AGENT`` use guest agent, ``SUSPEND`` suspend the domain | -+---------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``KEEP_LAST`` | Only keep the last N backups (full backups or increments) for the VM | -+---------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``MODE`` | Backup type ``FULL`` or ``INCREMENT`` | -+---------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``INCREMENT_MODE`` | Incremental backup type ``CBT`` or ``SNAPSHOT`` | -+---------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``INCREMENTAL_BACKUP_ID`` | For ``INCREMENT`` points to the backup image where increment chain is stored | -+---------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``LAST_INCREMENT_ID`` | For ``INCREMENT`` the ID of the last incremental backup taken | -+---------------------------+--------------------------------------------------------------------------------------------------------------+ ++---------------------------+------------------------------------------------------------------------------------------------------------------------+ +| Attribute | Description | ++===========================+========================================================================================================================+ +| ``BACKUP_VOLATILE`` | Perform backup of the volatile disks of the VM (default: ``NO``) | ++---------------------------+------------------------------------------------------------------------------------------------------------------------+ +| ``FS_FREEZE`` | Operation to freeze guest FS: ``NONE`` do nothing (default), ``AGENT`` use guest agent, ``SUSPEND`` suspend the domain | ++---------------------------+------------------------------------------------------------------------------------------------------------------------+ +| ``KEEP_LAST`` | Only keep the last N backups (full backups or increments) for the VM (default: none) | ++---------------------------+------------------------------------------------------------------------------------------------------------------------+ +| ``MODE`` | Backup type ``FULL`` (default) or ``INCREMENT`` | ++---------------------------+------------------------------------------------------------------------------------------------------------------------+ +| ``INCREMENT_MODE`` | Incremental backup type ``CBT`` (default) or ``SNAPSHOT`` | ++---------------------------+------------------------------------------------------------------------------------------------------------------------+ +| ``INCREMENTAL_BACKUP_ID`` | For ``INCREMENT`` points to the backup image where increment chain is stored (read-only) | ++---------------------------+------------------------------------------------------------------------------------------------------------------------+ +| ``LAST_INCREMENT_ID`` | For ``INCREMENT`` the ID of the last incremental backup taken (read-only) | ++---------------------------+------------------------------------------------------------------------------------------------------------------------+ Taking VM backups ================================================================================ diff --git a/source/management_and_operations/backups/overview.rst b/source/management_and_operations/backups/overview.rst index d98644020..d96250e74 100644 --- a/source/management_and_operations/backups/overview.rst +++ b/source/management_and_operations/backups/overview.rst @@ -26,28 +26,30 @@ Hypervisor & Storage Compatibility Performing a VM backup may require some support from the hypervisor or the disk image formats. The following table summarizes the backup modes supported for each hypervisor and storage system. -+------------+------------------------+------+-----------+------+-----------+ -| Hypervisor | Storage | Full | Incremental | -+ + +------+-----------+------+-----------+ -| | | Live | Power off | Live | Power off | -+============+========================+======+===========+======+===========+ -| KVM | File\ :sup:`*` (qcow2) | Yes | Yes | Yes | Yes | -+ +------------------------+------+-----------+------+-----------+ -| | File\ :sup:`*` (raw) | Yes | Yes | No | No | -+ +------------------------+------+-----------+------+-----------+ -| | Ceph | Yes | Yes | No | No | -+ +------------------------+------+-----------+------+-----------+ -| | LVM | Not supported | -+------------+------------------------+------+-----------+------+-----------+ -| LXC | File (any format) | No | Yes | No | No | -| +------------------------+------+-----------+------+-----------+ -| | Ceph | No | Yes | No | No | -| +------------------------+------+-----------+------+-----------+ -| | LVM | Not supported | -+------------+------------------------+------+-----------+------+-----------+ -| vCenter | vCenter\ :sup:`**` | Not supported | -+------------+------------------------+------+-----------+------+-----------+ ++------------+------------------------+---------+-----------+---------+-----------+ +| Hypervisor | Storage | Full | Incremental | ++ + +---------+-----------+---------+-----------+ +| | | Live | Power off | Live | Power off | ++============+========================+=========+===========+=========+===========+ +| KVM | File\ :sup:`*` (qcow2) | Yes | Yes | Yes | Yes | ++ +------------------------+---------+-----------+---------+-----------+ +| | File\ :sup:`*` (raw) | Yes | Yes | No | No | ++ +------------------------+---------+-----------+---------+-----------+ +| | Ceph | Yes`†` | Yes`†` | Yes`†` | Yes`†` | ++ +------------------------+---------+-----------+---------+-----------+ +| | LVM | Not supported | ++------------+------------------------+---------+-----------+---------+-----------+ +| LXC | File (any format) | No | Yes | No | No | +| +------------------------+---------+-----------+---------+-----------+ +| | Ceph | No | Yes | No | No | +| +------------------------+---------+-----------+---------+-----------+ +| | LVM | Not supported | ++------------+------------------------+---------+-----------+---------+-----------+ +| vCenter | vCenter\ :sup:`**` | Not supported | ++------------+------------------------+---------+-----------+---------+-----------+ \ :sup:`*` Any datastore based on files with the given format, i.e. NFS/SAN or SSH. \ :sup:`**` The legacy vCenter driver is included in the distribution, but no longer receives updates or bug fixes. + +\ :sup:`†` Ceph full/incremental backups are currently stored in a different way, see :ref:`backup types ` for more details. diff --git a/source/management_and_operations/backups/restic.rst b/source/management_and_operations/backups/restic.rst index e281f0ada..5b67e55c6 100644 --- a/source/management_and_operations/backups/restic.rst +++ b/source/management_and_operations/backups/restic.rst @@ -113,35 +113,37 @@ To recover from this error, check there are no ongoing operations and execute `` Reference: Restic Datastore Attributes ================================================================================ -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| Attribute | Description | -+========================+==============================================================================================================+ -| ``RESTIC_SFTP_USER`` | User to connect to the backup server (default ``oneadmin``) | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_SFTP_SERVER`` | IP address of the backup server | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_PASSWORD`` | Password to access the restic repository | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_IONICE`` | Run backups under a given ionice priority (best-effort, class 2). Value: 0 (high) - 7 (low) | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_NICE`` | Run backups under a given nice. Value: -19 (high) to 19 (low) | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_MAX_RIOPS`` | Run backups in a systemd slice, limiting the max number of read iops | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_MAX_WIOPS`` | Run backups in a systemd slice, limiting the max number of write iops | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_CPU_QUOTA`` | Run backups in a systemd slice with a given cpu quota (percentage). Use > 100 for using several CPUs | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_BWLIMIT`` | Limit restic upload/download bandwidth | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_COMPRESSION`` | Compression (three modes:off, auto, max), default is ``auto`` (average compression without to much CPU usage)| -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_CONNECTIONS`` | Number of concurrent connections (default 5). For high-latency backends this number can be increased. | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_MAXPROC`` | Sets ``GOMAXPROCS`` for restic to limit the OS threads that execute user-level Go code simultaneously. | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RESTIC_SPARSIFY`` | Runs ``virt-sparsify`` on flatten backups to reduce backup size. It requires ``libguestfs`` package. | -+------------------------+--------------------------------------------------------------------------------------------------------------+ ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| Attribute | Description | ++==============================+==============================================================================================================+ +| ``RESTIC_SFTP_USER`` | User to connect to the backup server (default ``oneadmin``) | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_SFTP_SERVER`` | IP address of the backup server | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_PASSWORD`` | Password to access the restic repository | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_IONICE`` | Run backups under a given ionice priority (best-effort, class 2). Value: 0 (high) - 7 (low) | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_NICE`` | Run backups under a given nice. Value: -19 (high) to 19 (low) | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_MAX_RIOPS`` | Run backups in a systemd slice, limiting the max number of read iops | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_MAX_WIOPS`` | Run backups in a systemd slice, limiting the max number of write iops | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_CPU_QUOTA`` | Run backups in a systemd slice with a given cpu quota (percentage). Use > 100 for using several CPUs | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_BWLIMIT`` | Limit restic upload/download bandwidth | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_COMPRESSION`` | Compression (three modes:off, auto, max), default is ``auto`` (average compression without to much CPU usage)| ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_CONNECTIONS`` | Number of concurrent connections (default 5). For high-latency backends this number can be increased. | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_MAXPROC`` | Sets ``GOMAXPROCS`` for restic to limit the OS threads that execute user-level Go code simultaneously. | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RESTIC_SPARSIFY`` | Runs ``virt-sparsify`` on flatten backups to reduce backup size. It requires ``libguestfs`` package. | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``DATASTORE_CAPACITY_CHECK`` | Enable/Disable automatic capacity check on the datastore before a backup operation | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ .. |restic_create| image:: /images/backup_restic_create.png :width: 700 diff --git a/source/management_and_operations/backups/rsync.rst b/source/management_and_operations/backups/rsync.rst index bdbc8886f..6e4b1d5e7 100644 --- a/source/management_and_operations/backups/rsync.rst +++ b/source/management_and_operations/backups/rsync.rst @@ -57,29 +57,31 @@ Other Configurations Reference: rsync Datastore Attributes ================================================================================ -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| Attribute | Description | -+========================+==============================================================================================================+ -| ``RSYNC_USER`` | User to connect to the rsync server (Required) | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RSYNC_HOST`` | IP address of the backup server (Required) | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RSYNC_ARGS`` | Command line arguments for `rsync` command (Default: `-az`) | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RSYNC_TMP_DIR`` | Temporary Directory used for rebasing incremental images (Default: `/var/tmp`) | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RSYNC_IONICE`` | Run backups under a given ionice priority (best-effort, class 2). Value: 0 (high) - 7 (low) | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RSYNC_NICE`` | Run backups under a given nice. Value: -19 (high) to 19 (low) | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RSYNC_MAX_RIOPS`` | Run backups in a systemd slice, limiting the max number of read iops | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RSYNC_MAX_WIOPS`` | Run backups in a systemd slice, limiting the max number of write iops | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RSYNC_CPU_QUOTA`` | Run backups in a systemd slice with a given cpu quota (percentage). Use > 100 for using several CPUs | -+------------------------+--------------------------------------------------------------------------------------------------------------+ -| ``RSYNC_SPARSIFY`` | Runs ``virt-sparsify`` on flatten backups to reduce backup size. It requires ``libguestfs`` package. | -+------------------------+--------------------------------------------------------------------------------------------------------------+ ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| Attribute | Description | ++==============================+==============================================================================================================+ +| ``RSYNC_USER`` | User to connect to the rsync server (Required) | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RSYNC_HOST`` | IP address of the backup server (Required) | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RSYNC_ARGS`` | Command line arguments for `rsync` command (Default: `-az`) | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RSYNC_TMP_DIR`` | Temporary Directory used for rebasing incremental images (Default: `/var/tmp`) | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RSYNC_IONICE`` | Run backups under a given ionice priority (best-effort, class 2). Value: 0 (high) - 7 (low) | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RSYNC_NICE`` | Run backups under a given nice. Value: -19 (high) to 19 (low) | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RSYNC_MAX_RIOPS`` | Run backups in a systemd slice, limiting the max number of read iops | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RSYNC_MAX_WIOPS`` | Run backups in a systemd slice, limiting the max number of write iops | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RSYNC_CPU_QUOTA`` | Run backups in a systemd slice with a given cpu quota (percentage). Use > 100 for using several CPUs | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``RSYNC_SPARSIFY`` | Runs ``virt-sparsify`` on flatten backups to reduce backup size. It requires ``libguestfs`` package. | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ +| ``DATASTORE_CAPACITY_CHECK`` | Enable/Disable automatic capacity check on the datastore before a backup operation | ++------------------------------+--------------------------------------------------------------------------------------------------------------+ .. |rsync_create| image:: /images/backup_rsync_create.png :width: 700 diff --git a/source/management_and_operations/capacity_planning/quotas.rst b/source/management_and_operations/capacity_planning/quotas.rst index dbf35e73e..bc0d863a5 100644 --- a/source/management_and_operations/capacity_planning/quotas.rst +++ b/source/management_and_operations/capacity_planning/quotas.rst @@ -212,6 +212,8 @@ Use the ``oneuser/onegroup defaultquota`` command. $ oneuser defaultquota +By default, the defaultquota is set to unlimited. Once the editor opens after issuing ``oneuser defaultquota`` you'll see comments regarding how to set the quotas and no quota template. Setting a quota with a template using unlimited values will translate to a blank quota. If you issue ``oneuser defaultquota`` again, you'll see the same comments with blank quota. If you set a non unlimited quota, you'll see the value of the quota that is established as default. + Checking User/Group Quotas ================================================================================ diff --git a/source/management_and_operations/end-user_web_interfaces/fireedge_sunstone.rst b/source/management_and_operations/end-user_web_interfaces/fireedge_sunstone.rst index 2ccfe1b91..f279d55e7 100644 --- a/source/management_and_operations/end-user_web_interfaces/fireedge_sunstone.rst +++ b/source/management_and_operations/end-user_web_interfaces/fireedge_sunstone.rst @@ -316,8 +316,6 @@ The attributes described here indicate which buttons are visible to operate over +-------------------------+-----------------------------------------------------------------------------+ | ``unshare`` | Users will be able to unshare VM Templates. | +-------------------------+-----------------------------------------------------------------------------+ -| ``vmrc`` | Users will be able to establish a VMRC connection. | -+-------------------------+-----------------------------------------------------------------------------+ | ``vnc`` | Users will be able to establish a VNC connection. | +-------------------------+-----------------------------------------------------------------------------+ diff --git a/source/management_and_operations/references/creating_images.rst b/source/management_and_operations/guest_os/creating_images.rst similarity index 100% rename from source/management_and_operations/references/creating_images.rst rename to source/management_and_operations/guest_os/creating_images.rst diff --git a/source/management_and_operations/guest_os/index.rst b/source/management_and_operations/guest_os/index.rst new file mode 100644 index 000000000..18b1c295d --- /dev/null +++ b/source/management_and_operations/guest_os/index.rst @@ -0,0 +1,10 @@ +==================================== +Guest Operating Systems +==================================== + +.. toctree:: + :maxdepth: 2 + + Contextualization + Creating Disk Images + Windows Best Practices diff --git a/source/management_and_operations/references/kvm_contextualization.rst b/source/management_and_operations/guest_os/kvm_contextualization.rst similarity index 100% rename from source/management_and_operations/references/kvm_contextualization.rst rename to source/management_and_operations/guest_os/kvm_contextualization.rst diff --git a/source/management_and_operations/guest_os/windows_best_practice.rst b/source/management_and_operations/guest_os/windows_best_practice.rst new file mode 100644 index 000000000..3a5248a17 --- /dev/null +++ b/source/management_and_operations/guest_os/windows_best_practice.rst @@ -0,0 +1,263 @@ +.. _windows_best_practice: + +================================================================================ +Windows Guest Best Practices +================================================================================ + +Windows as a guest operating system on KVM hypervisors requires some additional configuration in order to achieve performant virtual machines. In this document we'll go over the best practices for deploying your Windows Virtual Machine, and provide some extra actions that can be taken in Windows after deployment to improve performance. + +* Resource Allocations +* Template Configuration +* Post Deployment actions + +The best way to achieve high performance when using this guide is to also use higher performing hardware, the disk will be the most relevant and performance will be greatly impacted on systems with HDDs rather than SSD/NVMe. + +Preparing The Template +====================== + +In order to begin installing Windows we will need to create a template which will facilitate the installation. To prepare for this you should create some images: + +- Download the Windows ISO of your choice from Microsoft. Add this as a CDROM type image in OpenNebula +- :ref:`Create a persistent empty image ` which will be the target disk for Windows to be installed on. Different versions of Windows require different minimum disk space. + Under Advanced Options set BUS to Virtio, and setting the format to RAW will also increase disk performance but QCOW2 is sparse and saves disk space. + +.. image:: /images/windows_bp_create_image.png + :width: 90% + :align: center + +- Download the `VirtIO Drivers ISO from the virtio-win github page `_ and add it to OpenNebula as a CDROM type. + If you require WHQL-signed VirtIO drivers, then you may need to obtain those through a paid RHEL License as noted in that README. +- Download the latest `OpenNebula Contextualization ISO `__ and add it to OpenNebula as a CDROM type. + +Once all of these images have been prepared we can start creating the template. Under **Templates --> VMs** click the **+** button at the top, then select **Create**. In here we need to add all of these images and define the VM configuration, we'll go through each necessary section here: + +General +------- + +Fill out the name and resources you wish you allocate to this virtual machine. Ensure there is enough memory for the version of Windows you are installing. + +You may also set :ref:`Memory Resize Mode ` to Ballooning here to allow you to change the memory usage. If you do enable this you'll also want to Enable Hot Resize and set the Max memory value. Inside of the Windows VM, the hardware will read as having `MAX_MEMORY` amount of RAM however when you resize the memory, the QEMU Guest Agent will expand a "balloon" to effectively remove that memory from the Windows VM and free it up on the host. Later, the memory can be increased up to but not exceeding `MAX_MEMORY`. + +Once these are filled out, proceed to the Advanced Options to configure the rest of the template. + + +Storage +------- + +Here, you'll want to attach some disk images. The first disk should be the target persistent image we created earlier. Once selected, click Next to the Advanced options section. Here, define the BUS as Virtio(not required if you did this earlier), leave cache at None or set it to Writethrough, and set IO policy to Native. This should provide the most performance for the disk. + +Now add a new disk and select the Windows Installation ISO, there are no advanced options required here. Also add two more new disks, one of them select the VirtIO ISO, and the other select the Context ISO you downloaded earlier. These also do not require any advanced options. + +Network +------- + +On the main Networks tab when creating this template before attaching any NICs the Network Default Hardware Model should be set to `virtio` for best performance. You can leave this blank if you wish to use internet during setup but we recommend to use VirtIO devices whenever possible. + +You can define a network interface here if required. It's possible to setup Windows without network access however to update the system you'll need to eventually connect to the internet. Be aware that during installation if you do not define emulated hardware model as `virtio` then Windows will attempt to use DHCP on this interface to connect to the internet. You will need to either manually configure the networking inside the VM or install Context packages before the OpenNebula defined network configuration will be applied. + +The "RDP Connection" option is useful if you want to access RDP using the browser. You should enable this on the primary network device, and will need to enable it inside the virtual machine later before being able to use it. + + +OS & CPU +-------- + +There are some major changes necessary here in order to get the most out of your Windows VM. Let's go through each tab in this section: + +Boot +~~~~ + +- CPU Architecture: x86_64 +- Bus for SD Disks: SCSI +- Machine Type: q35 is required for secure boot, and handles PCI passthrough better. +- Boot Order: Move the ISO to the top, then the target disk as the second, and check the boxes next to both of them. Leave the others unchecked. +- Firmware: Set this to UEFI. If necessary, use the `OVMF_CODE.secboot.fd` to enable Secure Boot. + +.. note:: There are resources online for disabling Secure Boot and TPM however they involve modifying the registry. + +Most distributions' repositories do not have the properly signed Secure Boot BIOS for Windows. If you are having trouble installing with Secure Boot enabled, then you should download the RHEL RPM for edk2-ovmf, which can be found `here on pkgs.org `__. Once you have downloaded that you'll need to extract and update your firmware files manually. Download the RPM to a directory that is easily accessible, and maybe on the frontend so you can just transfer the files to each host. These firmware files should exist and be the same layout on every hypervisor. + +.. warning:: Modifying the UEFI/BIOS files may cause machines to be unable to boot. These machines must be re-instantiated in order to get the updated changes. If this is the case, you'll want to configure that VM to use the old UEFI files. Instructions for that are after the installation instructions. + +To extract the files from the RPM do the following: + +.. code:: + + cd /path/to/downloaded/RPM/ + mkdir extracted + cd extracted + rpm2cpio ../edk2-ovmf.el8.noarch.rpm | cpio -idmv + find . + +On each host, you should backup the original firmware files in case you need to restore or use them as well: + +.. code:: + + cd /usr/share + mkdir backup_OVMF + mv edk2 qemu OVMF backup_OVMF/ + +After that you should copy the new files into their places: + +.. code:: + + cd /path/to/download/RPM/extracted/usr/share/ + cp -r edk2 qemu OVMF /usr/share/. + +This should copy all the necessary folders to the same spot as the others. This is required as the NVRAM is copied from this directory. + +If you need to use one of the old firmware, mostly due to instantiated VM's already using the previous one, then you will need to perform a bit more configuration. Since we already put the old firmware files in `/usr/share/backup_OVMF` we just need to add them to the acceptable firmware list, and update any VM Templates necessary. For the VM Templates, just update their template and set the Firmware to "Custom" and then insert the full path to the backed up firmware by adding `backup` to the OVMF directory, for example `/usr/share/backup_OVMF/OVMF_CODE.fd` + +You'll also need to update the configuration file at `/etc/one/vmm_exec/vmm_exec_kvm.conf` to include these new files as well. Example: + +.. code:: + + OVMF_UEFIS = "/usr/share/OVMF/OVMF_CODE.fd /usr/share/OVMF/OVMF_CODE.secboot.fd /usr/share/AAVMF/AAVMF_CODE.fd /usr/share/backup_OVMF/OVMF/OVMF_CODE.fd /usr/share/backup_OVMF/OVMF/OVMF_CODE.secboot.fd" + +After these changes, make sure you restart the `opennebula` service. + +Features +~~~~~~~~ + +- ACPI: yes +- APIC: yes +- PAE: yes +- HYPERV: yes +- QEMU Guest Agent: yes +- Leave the rest blank for default values + +CPU Model +~~~~~~~~~ + +- CPU Model: host-passthrough + + +Input/Output +~~~~~~~~~~~~ + +Under the Inputs section select a Tablet type on USB bus, then click Add. This will make the mouse click where you want it to when using VNC. + +Defining a Virtio model display device at a higher resolution can be useful here as well, this can allow higher resolutions in the desktop. + +If you are using non-networking PCI Passthrough devices, this is the place to add them as well, such as GPU's. See the :ref:`PCI Passthrough Guide `. + + +Tags +~~~~ + +Here we can add some RAW data that can be useful depending on your use case. + + +TPM Device +********** + +If you have a physical TPM device on your host, you can pass through the TPM to the guest OS with this XML, however ensure the device is at `/dev/tpm0` before implementing it. + +.. note:: If you already have defined in your XML, insert the tags inside of that devices tag. + +.. code:: + + + + + + + + + +If you do not have a physical TPM device on your host you can emulate one. There are two options for the model, `tpm-tis` is the default and will work with both TPM 1.2 and 2.0 while `tpm-crb` will only work when the TPM version is 2.0. + +.. note:: If using an emulated TPM device, ensure you have installed swtpm and swtpm-tools packages on all hypervisors. + +.. code:: + + + + + + + +.. code:: + + + + + + + +Extra information on the Libvirt TPM device usage can be found in `their documentation `__. + +Above 4G Encoding +***************** + +If you have a GPU which has more than 4GB of memory, you may be unable to address all of the memory without changing a BIOS setting to allow this encoding. Include the following XML if you wish to utilize all the memory of the GPU: + +.. code:: + + + + + + + +NUMA +~~~~ + +By default, libvirt/QEMU will allocate 1 core to 1 socket, so 8 CPUs will be seen by the system as 8 sockets each with 1 core. This is fine for most operating systems however Windows has restrictions on sockets so we need to define NUMA topology. + +For best performance, the Pin Policy should be set to `core` however any of the policies will allow Windows to see all allocated CPUs. Define sockets as 1 and Threads as 1, but define Cores and Virtual CPU Select to the same value as the CPU defined in the General tab. + +You may also want to define Hugepages Size, the most performant should be 1024M (1G) hugepages. `Here is some RedHat Documenetation about enabling huge tables persistently `__. Enabling these should increase memory performance of the VM, and even with the default 2M pages you should see a difference. + +For extra information and how to enable this on the hosts please see :ref:`our documentation about NUMA Topology `. + +Installing the Operating System +=============================== + +.. note:: These instructions are written for installing Windows 10/11, but the instructions for Server editions should be nearly identical. + +Now that we've created the template with all the necessary images and configurations we can begin the deployment. Select the Template we just created and go to Instantiate. In this form you should mainly need to just fill out the name. The Capacity and Disks should already be filled and your Network should have been configured in the Template. If not, configure a network now if necessary. If you require a specific host or datastore then you may also want to define those here. + +Once the Virtual Machine has been instantiated, it should begin deploying. If it is not, ensure the scheduler requirements can be met and any hosts are the proper Pin Policy for their NUMA Configuration. + +Once the Virtual Machine is running, open up the VNC viewer. If you are fast enough, you should see the prompt `Press any key to boot from CD or DVD...` upon which you should click into the VNC viewer and press any key. If you do not see this and instead see a `Shell>` prompt, you should click in and type `exit` then hit Enter. This will cause it to reboot, and then you can press a key to trigger booting to the ISO. + +It may take a few minutes for the ISO to load properly but you should eventually see the Windows Setup window. Specify the Language/Time Formats and the Keyboard format, then click Next to continue. Click "Install Now" and wait for Setup to start. + +When prompted for a product key, select the option `I don't have a product key` so the machine can be activated later. Afterwards, select the edition of Windows you wish to install. After accepting the license agreement, you should see a page asking where to install Windows but there will not be any disks visible. We will need to install the VirtIO disk drivers. + +In order to do this, click `Load Driver` then `Browse...`. In here, scroll down to and open the CD Drive `virtio-win-*`, then expand the `amd64` folder and select the edition of Windows, then click OK. Select the `Red Hat VirtIO SCSI controller` and click Next. The disk should be visible once this is installed. You should see a `Drive 0 Unallocated Space` with the size of the image we created earlier to be the target image. Select this disk and click Next. + +.. image:: /images/windows_bp_virtio_drivers.png + :width: 65% + :align: center + +Windows will now begin installing. This will take some time depending on the hardware, but once it is completed and has rebooted you should be prompted to begin the setup. Proceed as normal here until it prompts for network access. Select the option `I don't have internet` and then `Continue with limited setup`. + +.. note:: For Windows 11 this may not be an option depending on how old the image is. If you are unable to bypass the network requirement part of Windows 11, press `Shift + F10` to open a Command Prompt in the Virtual Machine. Then type `oobe/BypassNRO` and hit enter. This will reboot the machine and allow you to bypass the network requirements. + +.. image:: /images/windows_bp_bypassnro.png + :width: 65% + :align: center + +You should have to create a local account here at this point, so continue through that setup. We recommend disabling all telemetry and diagnostic options and ad identification which may impact performance. Same with Cortana, this can be skipped or disabled later on. Windows should continue setting up now. Once completed you should be at the Windows Desktop + + +Post-Install Actions +==================== + +Now that we have Windows installed on our Virtual Machine and we are at the desktop, we can finish installing everything. First, open up an Explorer window and navigate to the CD Drive with `virtio-win-*` label. Scroll down and select the `virtio-win-gt-x64` installer. Unless your OS is 32-bit, then select the x86 installer instead. Proceed with this installation, installing all available virtio drivers including the QEMU Guest Agent. QEMU Guest agent is required for Memory Ballooning to operate properly. + +Once that is completed, you should navigate back to the list of drives and open up the CD Drive with the `one-context-*` label. In here should be an MSI, which you should run. It will install very quickly since our context packages are quite small. + +You should also enable Remote Desktop at this point. Just open the Settings and navigate to System -> Remote Desktop Settings and enable Remote Desktop. If you want to use the browser based RDP rather than an RDP client then you'll also need to expand this option and uncheck the box "Require devices to use Network Level Authentication to connect". + +Once this is done you should be able to shut down the virtual machine either from the VNC viewer or from OpenNebula's Power Off command. Once it is read as being in POWEROFF state, you can clean up everything. In the storage tab, make sure you disconnect the Windows Installation ISO, the VirtIO Windows ISO, and the Context-Windows ISO leaving behind the + +Finally, boot the virtual machine up again and verify the network configuration. It should match the assigned configuration in OpenNebula since we installed the context packages. If RDP was enabled you should be able to connect to it as well. At this point you should be able to move forward with updating the operating system with all the latest updates, then utilizing your system. + +At this point you can make any internal changes to the operating system necessary including updating it and disabling services or features to increase performance. There are probably some programs which can be uninstalled as well which are not necessary for most use cases. Once the Operating System is how you would like it to be you can shut down the virtual machine from inside. Once OpenNebula monitors the VM as being powered off, you can :ref:`Save the Virtual Machine Instance ` ( do not mark the saved one as persistent ) and then you should be able to instantiate this new saved Template to deploy multiple Windows machines. + +Extra Suggestions +================= + +Internally, the Windows OS can be a bit slower through this interface, partially due to the graphical effects. If you open Settings and navigate to System -> About -> Advanced system settings (on the right side), a window should pop up. On this window inside the Performance section click the Settings... button. Here, select the Adjust for best performance, or modify the check-boxes to your liking. The less effects, the more responsive the interface will be. diff --git a/source/management_and_operations/host_cluster_management/hosts.rst b/source/management_and_operations/host_cluster_management/hosts.rst index 354376011..72156a3ec 100644 --- a/source/management_and_operations/host_cluster_management/hosts.rst +++ b/source/management_and_operations/host_cluster_management/hosts.rst @@ -356,6 +356,8 @@ After a Virtual Machine is imported, its life-cycle (including creation of snaps .. warning:: Wild VMs’ support and limitations may differ depending on the virtualization driver used (e.g KVM or LXC). In order to find more specific information for the virtualization driver you’re using, please check the corresponding driver guide. +.. note:: This command is deprecated and will be removed in future release. Imported VMs will be removed from OpenNebula management and will appear again as wild VMs on the host. + Using Sunstone to Manage Hosts ================================================================================ diff --git a/source/management_and_operations/index.rst b/source/management_and_operations/index.rst index 674c04467..e3de011a2 100644 --- a/source/management_and_operations/index.rst +++ b/source/management_and_operations/index.rst @@ -21,5 +21,6 @@ The Management and Operations Guide provides complete information about how to o Multi-VM Service Users and Groups Capacity Planning - Monitoring and Alerting (EE) + Monitoring and Alerting + Guest Operating Systems References diff --git a/source/management_and_operations/multivm_service_management/appflow_elasticity.rst b/source/management_and_operations/multivm_service_management/appflow_elasticity.rst index 20610a389..592f2ea8d 100644 --- a/source/management_and_operations/multivm_service_management/appflow_elasticity.rst +++ b/source/management_and_operations/multivm_service_management/appflow_elasticity.rst @@ -4,7 +4,7 @@ OneFlow Services Auto-scaling ============================= -A Service Role's cardinality can be adjusted manually, based on metrics or based on a schedule. +A Service Role’s cardinality can be adjusted either manually, or automatically in two ways: based on metrics or based on a schedule. Overview ================================================================================ diff --git a/source/management_and_operations/network_management/index.rst b/source/management_and_operations/network_management/index.rst index 3f5657b26..f495579d3 100644 --- a/source/management_and_operations/network_management/index.rst +++ b/source/management_and_operations/network_management/index.rst @@ -13,3 +13,4 @@ Virtual Network Management Security Groups Self Provision Virtual Routers + Transparent Proxies diff --git a/source/management_and_operations/network_management/overview.rst b/source/management_and_operations/network_management/overview.rst index e4ad33c59..4316c4bbf 100644 --- a/source/management_and_operations/network_management/overview.rst +++ b/source/management_and_operations/network_management/overview.rst @@ -10,7 +10,7 @@ How Should I Read This Chapter Before reading this chapter, you should have already installed and configured your cloud. The Chapter is structured as follows: - The :ref:`Virtual Networks ` and :ref:`Virtual Networks Templates ` explain how to create networks. - - Regular users can self-provision virtual networks for their use. ref:`The details are explained here `. + - The :ref:`Self Provision ` section details how regular users can self-provision virtual networks for their use. - You will also find information on :ref:`Security Groups `, to easily define firewall rules. - Additionally you will learn on how to manage :ref:`Virtual Routers ` which are an OpenNebula resource that provide routing across Virtual Networks. diff --git a/source/management_and_operations/network_management/tproxy.rst b/source/management_and_operations/network_management/tproxy.rst new file mode 100644 index 000000000..7c29402d1 --- /dev/null +++ b/source/management_and_operations/network_management/tproxy.rst @@ -0,0 +1,234 @@ +.. _tproxy: + +================================================================================ +Transparent Proxies +================================================================================ + +Transparent Proxies make it possible to connect to management services, such as OneGate, by implicitly using the existing data center backbone networking. The OneGate service usually runs on the leader Front-end machine, which makes it difficult for Virtual Machines running in isolated virtual networks to contact it. This situation forces OpenNebula users to design virtual networking in advance, to ensure that VMs can securely reach OneGate. Transparent Proxies have been designed to remove that requirement. + +About the Design +================================================================================ + +|tproxy_diagram| + +Virtual networking in OpenNebula is bridge-based. Each Hypervisor that runs Virtual Machines in a specific Virtual Network pre-creates such a bridge before deploying the VMs. Transparent Proxies extend that design by introducing a pair of VETH devices, where one of two "ends" is inserted into the bridge and the other is boxed inside the dedicated network namespace. This makes it possible to deploy proxy processes that can be reached by Virtual Machine guests via TCP/IP securely, i.e. without compromising the internal networking of Hypervisor hosts. Proxy processes themselves form a "mesh" of daemons interconnected with UNIX sockets, which allows for complete isolation of the two involved TCP/IP stacks; we call this environment the "String-Phone Proxy." The final part of the solution requires that Virtual Machine guests contact services over proxy via the ``169.254.16.9`` link-local address on specific ports, instead of their real endpoints. + +Hypervisor Configuration +================================================================================ + +Transparent Proxies read their config from the ``~oneadmin/remotes/etc/vnm/OpenNebulaNetwork.conf`` file on the Front-end machines. The file uses the following syntax: + +.. code:: + + :tproxy_debug_level: 2 # 0 = ERROR, 1 = WARNING, 2 = INFO, 3 = DEBUG + :tproxy: + # OneGate service. + - :service_port: 5030 + :remote_addr: 10.11.12.13 # OpenNebula Front-end VIP + :remote_port: 5030 + # Custom service. + - :service_port: 1234 + :remote_addr: 10.11.12.34 + :remote_port: 1234 + :networks: [vnet_name_or_id] + +.. note:: + + The YAML snippet above defines two distinct proxies, where the first is the usual OneGate proxy and the second is a completely custom service. + +.. important:: + + If the ``:networks:`` YAML key is missing or empty, the particular proxy will be applied to *all* available Virtual Networks. Defining multiple entries with the identical ``:service_port:`` values will have no effect as the subsequent duplicates will be ignored by networking drivers. + +**To apply the configuration, you need to perform two steps:** + +1. On the leader Front-end machine: as the ``oneadmin`` system user, sync the ``OpenNebulaNetwork.conf`` file with the Hypervisor hosts, by running ``onehost sync -f``. +2. Power-cycle any running guests (for example by running ``onevm poweroff`` followed by ``onevm resume``); otherwise the desired configuration changes may show no effect. + +Guest Configuration +================================================================================ + +The most common use case of Transparent Proxies is for communication with OneGate. Below is an example Virtual Machine template: + +.. code:: + + NAME = "example0" + CONTEXT = [ + NETWORK = "YES", + SSH_PUBLIC_KEY = "$USER[SSH_PUBLIC_KEY]", + TOKEN = "YES" ] + CPU = "1" + DISK = [ + IMAGE = "img0" ] + GRAPHICS = [ + LISTEN = "0.0.0.0", + TYPE = "VNC" ] + MEMORY = "256" + NIC = [ + NETWORK = "vnet0", + NETWORK_UNAME = "oneadmin", + SECURITY_GROUPS = "100" ] + NIC_DEFAULT = [ + MODEL = "virtio" ] + OS = [ + ARCH = "x86_64" ] + +In the simplest (but still instructive) case, a Virtual Machine needs the following settings to connect to OneGate using Transparent Proxies: + +.. code:: + + $ grep ONEGATE_ENDPOINT /run/one-context/one_env + export ONEGATE_ENDPOINT="http://169.254.16.9:5030" + + $ ip route show to 169.254.16.9 + 169.254.16.9 dev eth0 scope link + +.. code:: + + $ onegate vm show -j | jq -r '.VM.NAME' + example0-0 + +Debugging +================================================================================ + +You can find driver logs for each guest on the Front-end machines, in ``/var/log/one/*.log``. + +Proxy logs are found on Hypervisor hosts, in ``/var/log/``. For example: + +.. code:: + + $ ls -1 /var/log/one_tproxy*.log + /var/log/one_tproxy.log + /var/log/one_tproxy_br0.log + +The internal implementation of Transparent Proxies involves several networking primitives combined together: + +* ``nft`` (``nftables``) to store the service mapping and manage ARP resolutions +* ``ip netns`` / ``nsenter`` family of commands to manage and use network namespaces +* ``ip link`` / ``ip address`` / ``ip route`` commands +* ``/var/tmp/one/vnm/tproxy`` the actual implementation of the "String-Phone" daemon mesh + +Below are several example command invocations, to gain familiarity with the environment. + +**Listing service mappings in nftables:** + +.. code:: + + $ nft list ruleset + ... + table ip one_tproxy { + map ep_br0 { + type inet_service : ipv4_addr . inet_service + elements = { 1234 : 10.11.12.34 . 1234, 5030 : 10.11.12.13 . 5030 } + } + } + +.. note:: + + The ``nftables`` config is not persisted across Hypervisor host reboots, as it is the default behavior in OpenNebula in general. + +**Listing all custom network namespaces:** + +.. code:: + + $ ip netns list + one_tproxy_br0 (id: 0) + +.. note:: + + Each active Virtual Network requires one of those namespaces to run the proxy inside. + +**Checking if the "internal" end of the VETH device pair has been put inside the dedicated namespace:** + +.. code:: + + $ ip netns exec one_tproxy_br0 ip address + 1: lo: mtu 65536 qdisc noop state DOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + 7: br0a@if8: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 12:00:83:53:f4:3d brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 169.254.16.9/32 scope global br0a + valid_lft forever preferred_lft forever + inet6 fe80::1000:83ff:fe53:f43d/64 scope link + valid_lft forever preferred_lft forever + +.. note:: + + In case multiple Hypervisor hosts participate in the Virtual Network's traffic, the ``169.254.16.9`` address stays the same regardless, the closest Hypervisor host is supposed to answer guest requests. + +**Checking if the default route for sending packets back into the bridge has been configured:** + +.. code:: + + $ ip netns exec one_tproxy_br0 ip route + default dev br0a scope link + +**Listing PIDs of running proxy processes:** + +.. code:: + + $ /var/tmp/one/vnm/tproxy status + one_tproxy: 16803 + one_tproxy_br0: 16809 + +.. note:: + + There is only a single ``one_tproxy`` process running in the default network namespace, it connects to real remote services. + +.. note:: + + There are multiple ``one_tproxy_*`` processes, they are boxed inside corresponding dedicated network namespaces and connect to the ``one_tproxy`` process using UNIX sockets. + +.. note:: + + There is no PID file management implemented. For simplicity, all proxy processes may be found by looking at the ``/proc/PID/cmdline`` process attributes. + +**Restarting/reloading config of proxy daemons:** + +.. code:: + + $ /var/tmp/one/vnm/tproxy restart + $ /var/tmp/one/vnm/tproxy reload + +.. important:: + + While you can manually run the ``start``, ``stop``, ``restart`` and ``reload`` commands as part of a debugging process, under normal circumstances the proxy daemons are completely managed by networking drivers. The command-line interface here is very minimal and does not require any extra parameters, as all the relevant config is stored in ``nftables``. + +Security Groups +================================================================================ + +Transparent Proxies can be used together with OpenNebula Security Groups. Below is an example of a security group template: + +.. code:: + + NAME = "example0" + + RULE = [ + PROTOCOL = "ICMP", + RULE_TYPE = "inbound" ] + RULE = [ + PROTOCOL = "ICMP", + RULE_TYPE = "outbound" ] + + RULE = [ + PROTOCOL = "TCP", + RANGE = "22", + RULE_TYPE = "inbound" ] + RULE = [ + PROTOCOL = "TCP", + RANGE = "80,443", + RULE_TYPE = "outbound" ] + + # Required for Transparent Proxies + RULE = [ + PROTOCOL = "TCP", + RANGE = "1234,5030", + RULE_TYPE = "outbound" ] + + # DNS + RULE = [ + PROTOCOL = "UDP", + RANGE = "53", + RULE_TYPE = "outbound" ] + +.. |tproxy_diagram| image:: /images/tproxy-diagram.drawio.png diff --git a/source/management_and_operations/references/cli.rst b/source/management_and_operations/references/cli.rst index 27f7a7a68..a0d8a27be 100644 --- a/source/management_and_operations/references/cli.rst +++ b/source/management_and_operations/references/cli.rst @@ -9,34 +9,34 @@ OpenNebula provides a set commands to interact with the system: CLI ================================================================================ -* `oneacct `__: gets accounting data from OpenNebula. -* `oneacl `__: manages OpenNebula ACLs. -* `onecfg `__: manages OpenNebula configuration files upgrade. -* `onecluster `__: manages OpenNebula clusters. -* `onedatastore `__: manages OpenNebula datastores. -* `onedb `__: OpenNebula database migration tool. -* `onegroup `__: manages OpenNebula groups. -* `onehook `__: manages OpenNebula hooks. -* `onehost `__: manages OpenNebula hosts. -* `oneimage `__: manages OpenNebula images. -* `onemarket `__: manages internal and external marketplaces. -* `onemarketapp `__: manages appliances from marketplaces. -* `oneprovider `__: manages OpenNebula providers. -* `oneprovision `__: manages OpenNebula provisions. -* `onesecgroup `__: manages OpenNebula security groups. -* `oneshowback `__: OpenNebula Showback tool. -* `onetemplate `__: manages OpenNebula templates. -* `oneuser `__: manages OpenNebula users. -* `onevcenter `__: handles vCenter resource import. -* `onevdc `__: manages OpenNebula Virtual DataCenters. -* `onevm `__: manages OpenNebula virtual machines. -* `onevmgroup `__: manages OpenNebula VMGroups. -* `onevnet `__: manages OpenNebula networks. -* `onevntemplate `__: manages OpenNebula networks templates. -* `onevrouter `__: manages OpenNebula Virtual Routers. -* `onezone `__: manages OpenNebula zones. -* `oneirb `__: opens an irb session. -* `onelog `__: access to OpenNebula services log files. +* `oneacct `__: gets accounting data from OpenNebula. +* `oneacl `__: manages OpenNebula ACLs. +* `onecfg `__: manages OpenNebula configuration files upgrade. +* `onecluster `__: manages OpenNebula clusters. +* `onedatastore `__: manages OpenNebula datastores. +* `onedb `__: OpenNebula database migration tool. +* `onegroup `__: manages OpenNebula groups. +* `onehook `__: manages OpenNebula hooks. +* `onehost `__: manages OpenNebula hosts. +* `oneimage `__: manages OpenNebula images. +* `onemarket `__: manages internal and external marketplaces. +* `onemarketapp `__: manages appliances from marketplaces. +* `oneprovider `__: manages OpenNebula providers. +* `oneprovision `__: manages OpenNebula provisions. +* `onesecgroup `__: manages OpenNebula security groups. +* `oneshowback `__: OpenNebula Showback tool. +* `onetemplate `__: manages OpenNebula templates. +* `oneuser `__: manages OpenNebula users. +* `onevcenter `__: handles vCenter resource import. +* `onevdc `__: manages OpenNebula Virtual DataCenters. +* `onevm `__: manages OpenNebula virtual machines. +* `onevmgroup `__: manages OpenNebula VMGroups. +* `onevnet `__: manages OpenNebula networks. +* `onevntemplate `__: manages OpenNebula networks templates. +* `onevrouter `__: manages OpenNebula Virtual Routers. +* `onezone `__: manages OpenNebula zones. +* `oneirb `__: opens an irb session. +* `onelog `__: access to OpenNebula services log files. The output of these commands can be customized by modifying the configuration files that can be found in ``/etc/one/cli/``. They also can be customized on a per-user basis, in this case the configuration files should be placed in ``$HOME/.one/cli``. @@ -45,13 +45,13 @@ List operation for each command will open a ``less`` session for a better user e OneFlow Commands ================================================================================ -* `oneflow `__: OneFlow Service management. -* `oneflow-template `__: OneFlow Service Template management. +* `oneflow `__: OneFlow Service management. +* `oneflow-template `__: OneFlow Service Template management. OneGate Commands ================================================================================ -* `onegate `__: OneGate Service management. +* `onegate `__: OneGate Service management. .. _cli_shell: diff --git a/source/management_and_operations/references/index.rst b/source/management_and_operations/references/index.rst index bccc6a22f..a1211a8a1 100644 --- a/source/management_and_operations/references/index.rst +++ b/source/management_and_operations/references/index.rst @@ -9,5 +9,3 @@ References Image Template Virtual Network Template Command Line Interface - Guest OS Installation - Contextualization diff --git a/source/management_and_operations/references/install_steps.txt b/source/management_and_operations/references/install_steps.txt index 1f6cd2a5c..eb9c7d13d 100644 --- a/source/management_and_operations/references/install_steps.txt +++ b/source/management_and_operations/references/install_steps.txt @@ -72,7 +72,7 @@ Windows Download the MSI package into ``C:\``: -* https://github.com/OpenNebula/one-apps/releases/download/v6.8.1/one-context-6.8.1.msi +* https://github.com/OpenNebula/one-apps/releases/download/v|context_release|/one-context-|context_release|.msi Or execute this command in powershell: diff --git a/source/management_and_operations/vm_management/vm_instances.rst b/source/management_and_operations/vm_management/vm_instances.rst index 76250d468..d018f8745 100644 --- a/source/management_and_operations/vm_management/vm_instances.rst +++ b/source/management_and_operations/vm_management/vm_instances.rst @@ -157,10 +157,21 @@ and details about it can be obtained with ``show``: Searching for VM Instances -------------------------------------------------------------------------------- -You can search for VM instances by using the ``--search`` option of the ``onevm list`` command. This is specially useful on large environments with many VMs. The filter must be in a ``VM.KEY1=VALUE1&VM.KEY2=VALUE2`` format and will return all the VMs which fit the filter. The ``&`` works as logical AND. You can use ``*=VALUE`` to search the full VM body. +You can search for VM instances by using the ``--search`` option of the ``onevm list`` command. This is specially useful on large environments with many VMs. The filter must be in a ``VM.KEY1=VALUE1&VM.KEY2=VALUE2`` format and will return all the VMs which fit the filter. The ``&`` works as logical AND. You can use ``*=VALUE`` to search the full VM body or ``VM.TEMPLATE=VALUE`` to search whole template. Searching is performed using JSON on the whole body of the VM. You can use the MySQL JSON path without the leading ``$.``, information about the path structure can be found in the [MySQL Documentation](https://dev.mysql.com/doc/refman/5.7/en/json.html#json-path-syntax) or [MariaDB Documentation](https://mariadb.com/kb/en/jsonpath-expressions/). Currently, the value is wrapped in ``%`` for the query, so it will match if it contains the value provided. +The ``VALUE`` part of a search query can utilize special characters to create flexible matching patterns: + +* ``%``: Matches any string, allowing for wildcard searches. For example, ``a%a%a`` matches names containing three "a"s in any position, with any number of characters between them. +* ``_``: Matches any single character, enabling precise pattern matching. For instance, ``a_a_a`` matches names with three "a"s, each separated by exactly one character. +* ``&``: Cannot be used in the ``VALUE`` part of the search query, as it is always interpreted as logical AND operator and does not support escaping. + +To search for strings that contain ``%`` or ``_`` literally, escape these characters with a backslash ``\``. For example: + +* ``a\%a`` will search for "a%a" as an exact sequence. +* ``a\_a`` will match "a_a" without interpreting ``_`` as a single-character wildcard. + For example, for searching a VM with a specific MAC address: .. prompt:: text $ auto @@ -854,10 +865,10 @@ To explain that, we are gonna use an example: leases: terminate: edit: false - execute_after_weeks: 3 + execute_after_weeks: 3 poweroff: edit: true - execute_after_minutes: 5 + execute_after_minutes: 5 The previous example will create two schedule actions: @@ -1017,7 +1028,7 @@ There are some ``onevm`` commands operations meant for the cloud administrators: **Deployment:** - ``deploy``: Starts an existing VM in a specific Host. -- ``migrate --live``: The Virtual Machine is transferred between Hosts with no noticeable downtime. +- ``migrate --live``: The Virtual Machine is transferred between Hosts with no noticeable downtime. The VM storage cannot be migrated to other system datastores. - ``migrate``: The VM gets stopped and resumed in the target host. In an infrastructure with :ref:`multiple system datastores `, the VM storage can be also migrated (the datastore id can be specified). Note: By default, the above operations do not check the target host capacity. You can use the ``--enforce`` option to be sure that the host capacity is not overcommitted. @@ -1172,8 +1183,12 @@ After that you can access the VM and configure the SSH service: # Add user: username/password root@:~$ adduser +|fireedge_sunstone_ssh_list| |fireedge_sunstone_ssh_console| + .. note:: Guacamole SSH uses RSA encryption. Make sure the VM SSH accepts RSA, otherwise you need to explicitly enable it in the VM SSH configuration (HostkeyAlgorithms and PubkeyAcceptedAlgorithms set as '+ssh-rsa) + + .. |sunstone_vm_charter| image:: /images/sunstone_vm_charter.png .. |sunstone_charter_info| image:: /images/sunstone_charter_info.png .. |sunstone_rdp_connection| image:: /images/sunstone_rdp_connection.png @@ -1182,6 +1197,10 @@ After that you can access the VM and configure the SSH service: .. |sunstone_guac_rdp| image:: /images/sunstone_guac_rdp.png .. |sunstone_guac_rdp_interface| image:: /images/sunstone_guac_rdp_interface.png .. |sunstone_guac_nic_1| image:: /images/sunstone_guac_nic_1.png -.. |sunstone_guac_nic_2| image:: /images/sunstone_guac_nic_2.png +.. |sunstone_guac_nic_2| image:: /images/sunstone_guac_nic_2.png .. |sunstone_sg_main_view| image:: /images/sunstone_sg_main_view.png .. |sunstone_sg_attach| image:: /images/sunstone_sg_attach.png +.. |fireedge_sunstone_ssh_list| image:: /images/fireedge_sunstone_ssh_list.png + :width: 45% +.. |fireedge_sunstone_ssh_console| image:: /images/fireedge_sunstone_ssh_console.png + :width: 45% diff --git a/source/open_cluster_deployment/kvm_node/kvm_driver.rst b/source/open_cluster_deployment/kvm_node/kvm_driver.rst index f0cb16cce..104a67c74 100644 --- a/source/open_cluster_deployment/kvm_node/kvm_driver.rst +++ b/source/open_cluster_deployment/kvm_node/kvm_driver.rst @@ -16,7 +16,7 @@ Considerations & Limitations Try to use :ref:`virtio ` whenever possible, both for networks and disks. Using emulated hardware, both for networks and disks, will have an impact on performance and will not expose all the available functionality. For instance, if you don't use ``virtio`` for the disk drivers, you will not be able to exceed a small number of devices connected to the controller, meaning that you have a limit when attaching disks and it will not work while the VM is running (live disk-attach). -When **updating the VM configuration live** using ``one.vm.updateconf`` although the all of the VM configuration will get updated on the VM instance template, only the CONTEXT and BACKUP_CONFIG will take effect immediately. The rest of the configuration will not take effect until the next VM reboot because it changes the VM virtual hardware. +When **updating the VM configuration live** using ``one.vm.updateconf`` although all of the VM configuration will get updated on the VM instance template, only the CONTEXT and BACKUP_CONFIG will take effect immediately. The rest of the configuration will not take effect until the next VM reboot because it changes the VM virtual hardware. The full list of configuration attributes are: @@ -50,22 +50,30 @@ The KVM driver is enabled by default in OpenNebula ``/etc/one/oned.conf`` on you Driver Defaults -------------------------------------------------------------------------------- -There are some attributes required for KVM to boot a VM. You can set a suitable default for them so all the VMs get the required values. These attributes are set in ``/etc/one/vmm_exec/vmm_exec_kvm.conf``. The following can be set for KVM: +There are some attributes required for KVM to boot a VM. You can set a suitable default for them so all the VMs get the required values. These attributes are set in ``/etc/one/vmm_exec/vmm_exec_kvm.conf``. Default values from the configuration file can be overriden in the Cluster, Host or VM Template. The following attributes can be set for KVM: * ``EMULATOR``: path to the kvm executable. -* ``OS``: attributes ``KERNEL``, ``INITRD``, ``BOOT``, ``ROOT``, ``KERNEL_CMD``, ``MACHINE``, ``ARCH`` and ``SD_DISK_BUS``. +* ``OS``: attributes ``KERNEL``, ``INITRD``, ``ROOT``, ``KERNEL_CMD``, ``MACHINE``, ``ARCH``, ``SD_DISK_BUS``, ``FIRMWARE``, ``FIMRWARE_SECURE`` and ``BOOTLOADER`` * ``VCPU`` -* ``FEATURES``: attributes ``ACPI``, ``PAE``, ``APIC``, ``HEPRV``, ``GUEST_AGENT``, ``VIRTIO_SCSI_QUEUES``, ``VIRTIO_BLK_QUEUES``, ``IOTHREADS``. -* ``CPU_MODEL``: attribute ``MODEL``. -* ``DISK``: attributes ``DRIVER``, ``CACHE``, ``IO``, ``DISCARD``, ``TOTAL_BYTES_SEC``, ``TOTAL_IOPS_SEC``, ``READ_BYTES_SEC``, ``WRITE_BYTES_SEC``, ``READ_IOPS_SEC``, ``WRITE_IOPS_SEC``, ``SIZE_IOPS_SEC``. +* ``VCPU_MAX`` +* ``MEMORY_SLOTS`` +* ``FEATURES``: attributes ``ACPI``, ``PAE``, ``APIC``, ``HEPRV``, ``LOCALTIME``, ``GUEST_AGENT``, ``VIRTIO_SCSI_QUEUES``, ``VIRTIO_BLK_QUEUES``, ``IOTHREADS``. +* ``CPU_MODEL``: attribute ``MODEL``, ``FEATURES``. +* ``DISK``: attributes ``DRIVER``, ``CACHE``, ``IO``, ``DISCARD``, ``TOTAL_BYTES_SEC``, ``TOTAL_BYTES_SEC_MAX``, ``TOTAL_BYTES_SEC_MAX_LENGTH``, ``TOTAL_IOPS_SEC``, ``TOTAL_IOPS_SEC_MAX``, ``TOTAL_IOPS_SEC_MAX_LENGTH``, ``READ_BYTES_SEC``, ``READ_BYTES_SEC_MAX``, ``READ_BYTES_SEC_MAX_LENGTH``, ``WRITE_BYTES_SEC``, ``WRITE_BYTES_SEC_MAX``, ``WRITE_BYTES_SEC_MAX_LENGTH``, ``READ_IOPS_SEC``, ``READ_IOPS_SEC_MAX``, ``READ_IOPS_SEC_MAX_LENGTH``, ``WRITE_IOPS_SEC``, ``WRITE_IOPS_SEC_MAX``, ``WRITE_IOPS_SEC_MAX_LENGTH``, ``SIZE_IOPS_SEC``. * ``NIC``: attribute ``FILTER``, ``MODEL``. * ``GRAPHICS``: attributes ``TYPE``, ``LISTEN``, ``PASSWD``, ``KEYMAP``, ``RANDOM_PASSWD``. The VM instance must have at least empty ``GRAPHICS = []`` section to read these default attributes from the config file and to generate cluster unique ``PORT`` attribute. * ``VIDEO``: attributes: ``TYPE``, ``IOMMU``, ``ATS``, ``VRAM``, ``RESOLUTION``. * ``RAW``: to add libvirt attributes to the domain XML file. * ``HYPERV_OPTIONS``: to enable hyperv extensions. +* ``HYPERV_TIMERS``: timers added when HYPERV is set to yes in FEATURES. * ``SPICE_OPTIONS``: to add default devices for SPICE. + +The following attributes can be overridden at Cluster and Host level, but not within individual VM configuration: + * ``OVMF_UEFIS``: to add allowed file paths for Open Virtual Machine Firmware. * ``Q35_ROOT_PORTS``: to modify the number of PCI devices that can be attached in q35 VMs (defaults to 16) +* ``CGROUPS_VERSION``: Use '2' to use Cgroup V2, all other values or undefined: use Cgroup V1 +* ``EMULATOR_CPUS``: Value used for kvm option .. warning:: These values are only used during VM creation; for other actions like nic or disk attach/detach the default values must be set in ``/var/lib/one/remotes/etc/vmm/kvm/kvmrc``. For more info check :ref:`Files and Parameters ` section. @@ -89,10 +97,6 @@ For example (check the actual state in the configuration file on your Front-end) " -.. note:: - - These values can be overriden in the Cluster, Host and VM Template - **Since OpenNebula 6.0** you should no longer need to modify the ``EMULATOR`` variable to point to the kvm executable; instead, ``EMULATOR`` now points to the symlink ``/usr/bin/qemu-kvm-one`` which should link the correct KVM binary for the given OS on a Host. Live-Migration for Other Cache settings @@ -360,12 +364,15 @@ QEMU Guest Agent allows the communication of some actions with the guest OS. Thi The agent package needed in the Guest OS is available in most distributions. It's called ``qemu-guest-agent`` in most of them. If you need more information you can follow these links: -* https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Virtualization_Deployment_and_Administration_Guide/chap-QEMU_Guest_Agent.html -* http://wiki.libvirt.org/page/Qemu_guest_agent -* https://wiki.qemu.org/Features/GuestAgent +* `QEMU Guest Agent - libvirt `_ +* `QEMU Guest Agent - rhel `_ +* `Guest Agent Features `_ The communication channel with guest agent is enabled in the domain XML when the ``GUEST_AGENT`` feature is selected in the VM Template. +QEMU Guest Agent monitoring +------------------------------------ + You can extend the VM monitoring information with information gathered by the guest agent by setting ``:enabled`` to **true** on the file ``/var/lib/one/remotes/etc/im/kvm-probes.d/guestagent.conf``. Execute ``onehost sync --force`` afterwards. This file contains a list of ``:commands`` that will be executed when running the VM monitoring probes. The result of the execution of these commands will appear on the MONITORING section on the VM instance template. By default an example command is provided, this effectively allows to detect VM crashes @@ -526,77 +533,85 @@ And the following driver configuration files: The parameters that can be changed here are as follows: -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| Parameter | Description | -+===============================================+=================================================================================================================================================================================================================+ -| ``LIBVIRT_URI`` | Connection string to libvirtd | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``QEMU_PROTOCOL`` | Protocol used for live migrations | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``SHUTDOWN_TIMEOUT`` | Seconds to wait after shutdown until timeout | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``VIRSH_RETRIES`` | Number of "virsh" command retries when required. Currently used in detach-interface and restore. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``SYNC_TIME`` | Trigger VM time synchronization from RTC on resume and after migration. QEMU guest agent must be running. Valid values: ``no`` or ``yes`` (default). | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``FORCE_DESTROY`` | Force VM cancellation after shutdown timeout | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``CANCEL_NO_ACPI`` | Force VMs without ACPI enabled to be destroyed on shutdown | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``MIGRATE_OPTIONS`` | Set options for the virsh migrate command | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``CLEANUP_MEMORY_ON_START`` | Compact memory before running the VM. Values ``yes`` or ``no`` (default) | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``CLEANUP_MEMORY_ON_STOP`` | Compact memory after VM stops. Values ``yes`` (default) or ``no`` | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_CACHE`` | This parameter will set the default cache type for new attached disks. It will be used in case the attached disk does not have a specific cache method set (can be set using templates when attaching a disk). | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_DISCARD`` | Default discard option for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_IO`` | Default I/O policy for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_TOTAL_BYTES_SEC`` | Default total bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_TOTAL_BYTES_SEC_MAX`` | Default Maximum total bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_TOTAL_BYTES_SEC_MAX_LENGTH`` | Default Maximum length total bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_READ_BYTES_SEC`` | Default read bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_READ_BYTES_SEC_MAX`` | Default Maximum read bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_READ_BYTES_SEC_MAX_LENGTH`` | Default Maximum length read bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_WRITE_BYTES_SEC`` | Default write bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_WRITE_BYTES_SEC_MAX`` | Default Maximum write bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_WRITE_BYTES_SEC_MAX_LENGTH`` | Default Maximum length write bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_TOTAL_IOPS_SEC`` | Default total IOPS throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_TOTAL_IOPS_SEC_MAX`` | Default Maximum total IOPS throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_TOTAL_IOPS_SEC_MAX_LENGTH`` | Default Maximum length total IOPS throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_READ_IOPS_SEC`` | Default read IOPS throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_READ_IOPS_SEC_MAX`` | Default Maximum read IOPS throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_READ_IOPS_SEC_MAX_LENGTH`` | Default Maximum length read IOPS throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_WRITE_IOPS_SEC`` | Default write IOPS throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_WRITE_IOPS_SEC_MAX`` | Default Maximum write IOPS throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_WRITE_IOPS_SEC_MAX_LENGTH`` | Default Maximum length write IOPS throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_SIZE_IOPS_SEC`` | Default size of IOPS throttling for newly attached disks, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_NIC_MODEL`` | Default NIC model for newly attached NICs, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``DEFAULT_ATTACH_NIC_FILTER`` | Default NIC libvirt filter for newly attached NICs, if the attribute is missing in the template. | -+-----------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| Parameter | Description | ++===============================================+============================================================================================================================+ +| ``LIBVIRT_URI`` | Connection string to libvirtd | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``QEMU_PROTOCOL`` | Protocol used for live migrations | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``SHUTDOWN_TIMEOUT`` | Seconds to wait after shutdown until timeout | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``VIRSH_RETRIES`` | Number of "virsh" command retries when required. Currently used in detach-interface and restore. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``VIRSH_TIMEOUT`` | Default "virsh" timeout for operations which might block indefinitely. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``SYNC_TIME`` | Trigger VM time synchronization from RTC on resume and after migration. QEMU guest agent must be running. | +| | Valid values: ``no`` or ``yes`` (default). | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``FORCE_DESTROY`` | Force VM cancellation after shutdown timeout | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``CANCEL_NO_ACPI`` | Force VMs without ACPI enabled to be destroyed on shutdown | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``MIGRATE_OPTIONS`` | Set options for the virsh migrate command | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``CLEANUP_MEMORY_ON_START`` | Compact memory before running the VM. Values ``yes`` or ``no`` (default) | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``CLEANUP_MEMORY_ON_STOP`` | Compact memory after VM stops. Values ``yes`` or ``no`` (default) | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_CACHE`` | This parameter will set the default cache type for new attached disks. It will be used in case the attached disk does | +| | not have a specific cache method set (can be set using templates when attaching a disk). | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_DISCARD`` | Default discard option for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_IO`` | Default I/O policy for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_VIRTIO_BLK_QUEUES`` | The default number of queues for virtio-blk driver. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_TOTAL_BYTES_SEC`` | Default total bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_TOTAL_BYTES_SEC_MAX`` | Default Maximum total bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_TOTAL_BYTES_SEC_MAX_LENGTH`` | Default Maximum length total bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_READ_BYTES_SEC`` | Default read bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_READ_BYTES_SEC_MAX`` | Default Maximum read bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_READ_BYTES_SEC_MAX_LENGTH`` | Default Maximum length read bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_WRITE_BYTES_SEC`` | Default write bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_WRITE_BYTES_SEC_MAX`` | Default Maximum write bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_WRITE_BYTES_SEC_MAX_LENGTH`` | Default Maximum length write bytes/s I/O throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_TOTAL_IOPS_SEC`` | Default total IOPS throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_TOTAL_IOPS_SEC_MAX`` | Default Maximum total IOPS throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_TOTAL_IOPS_SEC_MAX_LENGTH`` | Default Maximum length total IOPS throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_READ_IOPS_SEC`` | Default read IOPS throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_READ_IOPS_SEC_MAX`` | Default Maximum read IOPS throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_READ_IOPS_SEC_MAX_LENGTH`` | Default Maximum length read IOPS throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_WRITE_IOPS_SEC`` | Default write IOPS throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_WRITE_IOPS_SEC_MAX`` | Default Maximum write IOPS throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_WRITE_IOPS_SEC_MAX_LENGTH`` | Default Maximum length write IOPS throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_SIZE_IOPS_SEC`` | Default size of IOPS throttling for newly attached disks, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_NIC_MODEL`` | Default NIC model for newly attached NICs, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``DEFAULT_ATTACH_NIC_FILTER`` | Default NIC libvirt filter for newly attached NICs, if the attribute is missing in the template. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ +| ``OVMF_NVRAM`` | Virtual Machine Firmware path to the NVRAM file. | ++-----------------------------------------------+----------------------------------------------------------------------------------------------------------------------------+ See the :ref:`Virtual Machine drivers reference ` for more information. diff --git a/source/open_cluster_deployment/networking_setup/bridged.rst b/source/open_cluster_deployment/networking_setup/bridged.rst index 42744f1e8..255545eed 100644 --- a/source/open_cluster_deployment/networking_setup/bridged.rst +++ b/source/open_cluster_deployment/networking_setup/bridged.rst @@ -57,3 +57,25 @@ For example, you can define a *Bridged with Security Groups* type network with t NAME = "private1" VN_MAD = "fw" + +VLAN filtering and trunking +------------------------------ + +By default the Linux bridge driver does not performs any filtering on the VLAN traffic generated by the virtual machines. You can limit the allowed VLAN to trunk in the VM ports with following attributes: + ++-------------------------------+---------------------------------------------------------------+-----------+ +| Attribute | Value | Mandatory | ++===============================+===============================================================+===========+ +| | Specify a range of VLANs that are allowed for the VM traffic. | | +| ``VLAN_TAGGED_ID`` | Comma separated list of tags, ranges are supported. | NO | ++-------------------------------+---------------------------------------------------------------+-----------+ + +For example to only allow a VM to use the VLANS IDs 100, 105, 106 and 107, add to the network: + +.. code:: + + VLAN_TAGGED_ID = "100,105-107" + +.. note:: + + The VM is responsible for tagging the VLAN traffic, no tagging is performed in the bridge diff --git a/source/open_cluster_deployment/networking_setup/vlan.rst b/source/open_cluster_deployment/networking_setup/vlan.rst index a9368e6be..01c9af36f 100644 --- a/source/open_cluster_deployment/networking_setup/vlan.rst +++ b/source/open_cluster_deployment/networking_setup/vlan.rst @@ -100,3 +100,88 @@ For example, you can define a *802.1Q Network* with the following template: VLAN_ID = 50 # Optional. If not setting VLAN_ID set AUTOMATIC_VLAN_ID = "YES" In this example, the driver will check for the existence of the ``br0`` bridge. If it doesn't exist it will be created. ``eth0`` will be tagged (``eth0.50``) and attached to ``br0`` (unless it's already attached). + +Using 802.1Q driver with Q-in-Q +================================================================================ + +Q-in-Q is not natively supported by Linux bridges, as compared to Open vSwitch, and presents some limitations: + +- The service VLAN tag (also referred as transport or outer) cannot be preserved in the VMs, +- The bridge cannot be fully configured using both VLAN tags. + +However, for the most common scenarios the 802.1Q driver can produce the double tag and filter out VLANs not included in the customer VLAN set. In this configuration the bridge works as follow: + +- Untagged traffic from the VM will be tagged using the transport VLAN. +- Tagged traffic from the VM using the CVLANS will be also tagged with the transport VLAN. +- Tagged traffic from the VM using any other VLAN ID will be discarded. + +.. note:: + + When ``CVLANS`` is not configured the bridge will add the VLAN ID tag to any traffic coming from the VM (tagged or not). There is no filtering of the VLAN IDs used by the VM. + +OpenNebula Configuration +------------------------ + +There is no configuration specific for this use case, just consider the general options specified above. + +Defining a Q-in-Q Network +---------------------------------------- + +The Q-in-Q behavior is controlled by the following attributes (**please, also refer to the attributes defined above**): + ++-----------------------+----------------------------------------------------------------+----------------------------------------+ +| Attribute | Value | Mandatory | ++=======================+================================================================+========================================+ +| ``VLAN_ID`` | The VLAN ID for the transport/outer VLAN. | **YES** (unless ``AUTOMATIC_VLAN_ID``) | ++-----------------------+----------------------------------------------------------------+----------------------------------------+ +| ``CVLANS`` | The customer VLAN set. A comma separated list, supports ranges | **YES** | ++-----------------------+----------------------------------------------------------------+----------------------------------------+ + +For example, you can define an *QinQ aware Network* with the following template: + +.. code:: + + NAME = "qinq_net" + VN_MAD = "802.1Q" + PHYDEV = eth0 + VLAN_ID = 50 # Service VLAN ID + CVLANS = "101,103,110-113" # Customer VLAN ID list + +.. note:: + + ``CVLANS`` can be updated and will be dynamically reconfigured in any existing bridge + +Implementation Details +---------------------- + +When the ``CVLANS`` attribute is defined the 802.1Q perform the following configurations on the bridge: + +- Activate the VLAN filtering flag +- Installs a VLAN filter that includes all the VLANs in the ``CVLANS`` set in all VM ports in the network. In this way only tagged traffic in the customer set will be allowed in the bridge. +- All untagged traffic is associated to the transport (outer) VLAN. +- As in the other configurations, a tagged link for the transport VLAN is added to the bridge. This link is the one that will add the transport tag. + +The following example shows the main configurations performed in the bridge: + +.. code:: + + # - Transport / outer / S-VLAN : 100 + # - Customer / inner / C-VLAN : 200,300 + + # "Transport" link + ip link add link eth1 name eth1.100 type vlan id 100 + ip link set eth1.100 master onebr.23 + ip link set eth1.100 up + + # Bridge Configuration: + ip link set dev onebr.23 type bridge vlan_filtering 1 + + # VM port configuration (NIC 1 of VM 20, and transport link): + bridge vlan add dev one-20-1 vid 100 pvid untagged + bridge vlan add dev one-20-1 vid 200 + bridge vlan add dev one-20-1 vid 300 + + bridge vlan add dev eth1.100 vid 100 pvid untagged + bridge vlan add dev eth1.100 vid 200 + bridge vlan add dev eth1.100 vid 300 + diff --git a/source/open_cluster_deployment/storage_setup/ceph_ds.rst b/source/open_cluster_deployment/storage_setup/ceph_ds.rst index fcd008fb7..a2297235e 100644 --- a/source/open_cluster_deployment/storage_setup/ceph_ds.rst +++ b/source/open_cluster_deployment/storage_setup/ceph_ds.rst @@ -217,9 +217,9 @@ System Datastore also requires these attributes: +=================+===========================================================+===========+ | ``TYPE`` | ``SYSTEM_DS`` | **YES** | +-----------------+-----------------------------------------------------------+-----------+ -| ``TM_MAD`` | ``ceph`` to use the full Ceph mode, see below | **YES** | +| ``TM_MAD`` | ``ceph`` to use the full Ceph mode, see Ceph mode below | **YES** | | +-----------------------------------------------------------+ | -| | ``ssh`` to use local Host storage, SSH mode below | | +| | ``ssh`` to use local Host storage, see SSH mode below | | +-----------------+-----------------------------------------------------------+-----------+ | ``DISK_TYPE`` | ``RBD`` (used for volatile disks) | **NO** | +-----------------+-----------------------------------------------------------+-----------+ diff --git a/source/open_cluster_deployment/storage_setup/nas_ds.rst b/source/open_cluster_deployment/storage_setup/nas_ds.rst index 13af6d7f0..01fa113e9 100644 --- a/source/open_cluster_deployment/storage_setup/nas_ds.rst +++ b/source/open_cluster_deployment/storage_setup/nas_ds.rst @@ -4,14 +4,16 @@ NFS/NAS Datastores ================================================================================ -This storage configuration assumes that your Hosts can access and mount a shared volume located on a NAS (Network Attached Storage) server. You will use this shared volumes to store VM disk images files. The Virtual Machines will boot also from the shared volume. +This storage configuration assumes that your Hosts can access and mount a shared volume located on a NAS (Network Attached Storage) server. You will use this shared volume to store VM disk images files. The Virtual Machines will boot also from the shared volume. -The scalability of this solution is bounded to the performance of your NAS server. However you can use multiple NAS server simultaneously to improve the scalability of your OpenNebula cloud. The use of multiple NFS/NAS datastores will let you: +The scalability of this solution will be bound to the performance of your NAS server. However, you can use multiple NAS server simultaneously to improve the scalability of your OpenNebula cloud. The use of multiple NFS/NAS datastores will allow you to: * Balance I/O operations between storage servers. -* Apply different SLA policies (e.g., backup) to different VM types or users. +* Apply different SLA policies (e.g. backup) to different VM types or users. * Easily add new storage. +Using an NFS/NAS Datastore provides a straightforward solution for implementing thin provisioning for VMs, which is enabled by default when using the **qcow2** image format. + Front-end Setup ================================================================================ @@ -19,7 +21,7 @@ Simply mount the **Image** Datastore directory in the Front-end in ``/var/lib/on .. note:: The Front-end only needs to mount the Image Datastores and **not** the System Datastores. -.. note:: **NFS volumes mount tips**. The following options are recommended to mount NFS shares:``soft, intr, rsize=32768, wsize=32768``. With the documented configuration of libvirt/kvm the image files are accessed as ``oneadmin`` user. If the files must be read by ``root``, the option ``no_root_squash`` must be added. +.. note:: **NFS volumes mount tips**. The following options are recommended to mount NFS shares:``soft, intr, rsize=32768, wsize=32768``. With the documented configuration of libvirt/kvm, the image files can be accessed as the ``oneadmin`` user. If the files must be read by ``root``, the option ``no_root_squash`` must be added. Host Setup ================================================================================ @@ -30,7 +32,7 @@ The configuration is the same as for the Front-end above: simply mount in each H OpenNebula Configuration ================================================================================ -Once the Host and Front-end storage is setup, the OpenNebula configuration comprises the creation of an Image and System Datastores. +Once Host and Front-end storage have been is set up, the OpenNebula configuration comprises the creation of an Image and System Datastores. Create System Datastore -------------------------------------------------------------------------------- diff --git a/source/overview/cloud_architecture_and_design/cloud_architecture_design.rst b/source/overview/cloud_architecture_and_design/cloud_architecture_design.rst index 3f1d4297e..f650622a9 100644 --- a/source/overview/cloud_architecture_and_design/cloud_architecture_design.rst +++ b/source/overview/cloud_architecture_and_design/cloud_architecture_design.rst @@ -8,7 +8,7 @@ This page describes the high-level steps to design and deploy an OpenNebula clou To familiarize yourself with deployment and daily operations, or if you want to quickly try an Edge, Hybrid or Multi-cloud deployment, we strongly recommend you begin with the :ref:`Quick Start Guide `. In the Quick Start, you can: - * :ref:`Install an OpenNebula Front-end ` + * :ref:`Install an OpenNebula Front-end ` * Deploy on-demand :ref:`Edge Clusters ` on remote cloud providers * Deploy :ref:`Virtual Machines ` and :ref:`Kubernetes clusters ` diff --git a/source/quick_start/deployment_basics/index.rst b/source/quick_start/deployment_basics/index.rst index 53e2d26ad..e141e42ab 100644 --- a/source/quick_start/deployment_basics/index.rst +++ b/source/quick_start/deployment_basics/index.rst @@ -10,5 +10,4 @@ Deployment Basics Overview Try OpenNebula Front-end On-prem Try OpenNebula Front-end on AWS - Try OpenNebula Front-end on VMware Try OpenNebula Hosted Front-end diff --git a/source/quick_start/deployment_basics/overview.rst b/source/quick_start/deployment_basics/overview.rst index 7c4154b24..ec09bc954 100644 --- a/source/quick_start/deployment_basics/overview.rst +++ b/source/quick_start/deployment_basics/overview.rst @@ -16,9 +16,8 @@ Each section builds on the previous one, to take you from a bare install to quic First, to install your Front-end, please select your preferred infrastructure: +- :ref:`Deploy OpenNebula Front-end On-prem `. - :ref:`Deploy OpenNebula Front-end on AWS `. -- :ref:`Deploy OpenNebula Front-end on VMware `. - :ref:`Try OpenNebula Hosted Front-end `. Then, you can move on to the next sections to quickly deploy your VMs or multi-tier services on your new cloud. - diff --git a/source/quick_start/deployment_basics/try_opennebula_on_kvm.rst b/source/quick_start/deployment_basics/try_opennebula_on_kvm.rst index 4f8599878..17370040c 100644 --- a/source/quick_start/deployment_basics/try_opennebula_on_kvm.rst +++ b/source/quick_start/deployment_basics/try_opennebula_on_kvm.rst @@ -24,7 +24,7 @@ The cloud environment installed by miniONE is mainly intended for evaluation, de .. note:: To complete this tutorial, you will need to log in to a remote Linux machine via SSH. If you follow this tutorial on a Windows machine, you will need to use an SSH client application such as `PuTTY `__. - + .. tip:: For a list of options supported by the script, run ``bash minione -h``. The script supports several types of installations (such as installing a Front-end and a KVM hypervisor node) which are not covered in this tutorial. @@ -61,15 +61,15 @@ To run the miniONE script on AWS, you will need to instantiate a virtual machine - 2616 (for the FireEdge GUI) - 5030 (for the OneGate service) -.. tip:: To quickly deploy a suitable VM, browse the AWS AMI Catalog and select ``Ubuntu Server 22.04 LTS (HVM), SSD Volume Type``: +.. tip:: To quickly deploy a suitable VM, browse the AWS AMI Catalog and select **Ubuntu Server 24.04 LTS (HVM), SSD Volume Type**: - .. image:: /images/minione-aws-ubuntu22.04.png + .. image:: /images/minione-aws-ubuntu24.04.png :align: center Below is an example of a successfully-tested configuration (though by no means the only possible one): - Region: Frankfurt -- Operating System: Ubuntu Server 22.04 LTS (HVM) +- Operating System: Ubuntu Server 24.04 LTS (HVM) - Tier: ``t2.medium`` - Open ports: 22, 80, 2616, 5030 - Storage: 80 GB SSD @@ -111,7 +111,7 @@ For example: .. warning:: Ensure you have set the appropriate permissions for the PEM file, or for security reasons SSH will refuse to connect. - + Step 1.2. Update the VM Operating System ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -120,7 +120,7 @@ Once you have logged in to the VM as user ``ubuntu``, use the ``sudo`` command t .. prompt:: - sudo su - + sudo -i Then, update the system to its latest software packages by running the following command: @@ -128,6 +128,35 @@ Then, update the system to its latest software packages by running the following apt update && apt upgrade +After updating, you will probably need to restart the VM to run the latest kernel. Check the output of the ``apt upgrade`` command for lines similar to the following: + +.. prompt:: + + Pending kernel upgrade! + Running kernel version: + 6.8.0-1012-aws + Diagnostics: + The currently running kernel version is not the expected kernel version 6.8.0-1014-aws. + +In this example, you need to restart the VM in order to upgrade to kernel 6.8.0-1014-aws. To restart the VM, run: + +.. prompt:: + + shutdown -r now + +You will be immediately logged out of the VM as it restarts. Wait a few moments for the VM to finish rebooting, then log in again using the same procedure as before. After logging back into the VM, you can check the running kernel version with: + +.. prompt:: + + uname -a + +For example, in this case: + +.. prompt:: + + $ uname -a + Linux ip-172-31-3-252 6.8.0-1014-aws #15-Ubuntu SMP Thu Aug 8 19:13:06 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux + Your AWS VM is now ready. In the next steps, we’ll download the miniONE script, upload it to the VM, and run the installation. Step 3: Download and install miniONE @@ -141,20 +170,20 @@ Step 3.1. Copy the miniONE script to the AWS VM After downloading miniONE, you will need to copy it to your AWS VM. - On Linux and Mac: - + If you’re on Linux, you can copy it with the ``scp`` command, providing the same user and PEM file as when logging in via SSH. For example, the command below copies the miniONE script to the ``ubuntu`` user’s home directory: .. prompt:: - + scp -i ubuntu@:~ - On Windows: You can use either of two methods: - + * The GUI tool `WinSCP `__, which allows you to copy files by drag-and-drop * The command-line tool `PuTTY Secure Copy `__, which emulates the Unix ``scp`` tool. - + For both methods you will need to provide the private key file for authentication. Step 3.2. Run the miniONE script on the AWS VM @@ -162,11 +191,25 @@ Step 3.2. Run the miniONE script on the AWS VM After copying the miniONE script to the VM, log in to the VM (as described :ref:`above `). -Use the ``sudo`` command to become the ``root`` user. +Use the ``sudo`` command to become the ``root`` user: + +.. prompt:: + + sudo -i -If necessary, use the ``cd`` command to navigate to the folder where you copied the miniONE script. For example, if you copied it to the home directory of user ``ubuntu`` run ``cd ~ubuntu``. +If necessary, use the ``cd`` command to navigate to the folder where you copied the miniONE script. For example, if you copied it to the home directory of user ``ubuntu`` run: -To install miniONE, run: +.. prompt:: + + cd ~ubuntu + +Next, ensure that the ``minione`` file has execute permissions, by running: + +.. prompt:: + + chmod +x minione + +To install miniONE, run as root: .. prompt:: @@ -177,7 +220,7 @@ The miniONE script will begin the installation, logging output to the terminal. .. prompt:: ### Report - OpenNebula 6.8 was installed + OpenNebula 6.10 was installed Sunstone is running on: http:/// FireEdge is running on: @@ -185,7 +228,7 @@ The miniONE script will begin the installation, logging output to the terminal. Use following to login: user: oneadmin password: lCmPUb5Gwk - + At this point, you have successfully installed miniONE. OpenNebula services should be running, and the system should be ready for your first login. .. important:: @@ -224,7 +267,7 @@ This is the default view for cloud administrators. From this view in Sunstone, y | -Congratulations — you have deployed an OpenNebula Front-end node, which is ready to provision resources on cloud infrastructure. +Congratulations — you have deployed an OpenNebula Front-end node, which is ready to provision resources on cloud infrastructure. Next Steps diff --git a/source/quick_start/deployment_basics/try_opennebula_on_vmware.rst b/source/quick_start/deployment_basics/try_opennebula_on_vmware.rst deleted file mode 100644 index 045e2cd8a..000000000 --- a/source/quick_start/deployment_basics/try_opennebula_on_vmware.rst +++ /dev/null @@ -1,263 +0,0 @@ -.. _try_opennebula_on_vmware: - -============================================== -Deploy OpenNebula Front-end on VMware -============================================== - -In this tutorial, we’ll use **vOneCloud** to install an OpenNebula Front-end on top of an existing VMware installation. Completing this tutorial takes approximately five minutes. - -**vOneCLoud** is an Open Virtual Appliance (OVA) for VMware vSphere. It contains a complete OpenNebula Front-end, installed and configured on an AlmaLinux OS. It is free to download and use, and may be used for small-size production deployments. With **vOneCloud**, you can deploy on top of your VMware infrastructure all of the OpenNebula services needed to use, manage and run OpenNebula. - -In this tutorial, we’ll complete the following high-level steps: - - #. Verify the system requirements. - #. Download vOneCloud. - #. Deploy the vOneCloud OVA. - #. Configure the vOneCloud virtual appliance. - #. Access the OpenNebula Front-end through the Sunstone GUI. - -After finishing this tutorial, you will have deployed a complete, ready-to-use OpenNebula Front-end on top of your VMware infrastructure. You will then be able to log in via the Sunstone GUI, define hosts and deploy virtual machines. - -Brief Overview of vOneCloud -=========================== - -.. image:: /images/vonecloud_logo.png - :align: center - -vOneCloud ships with a default of 2 vCPUs, 16 GiB of RAM and 100GB of disk size. It is certified for infrastructures of the following dimensions: - -- Up to 1000 VMs in total -- Up to 100 users, with a limit of 10 users accessing the system simultaneously - -vOneCloud ships with the following components under the hood: - -+-----------------------+--------------------------------------------------------------------------------------------------+ -| **AlmaLinux** | 8 | -+-----------------------+--------------------------------------------------------------------------------------------------+ -| **OpenNebula** | |version| (:ref:`release notes `) | -+-----------------------+--------------------------------------------------------------------------------------------------+ -| **MariaDB** | Default version shipped in AlmaLinux 8 | -+-----------------------+--------------------------------------------------------------------------------------------------+ -| **Phusion Passenger** | Default version shipped in AlmaLinux 8 (used to run Sunstone) | -+-----------------------+--------------------------------------------------------------------------------------------------+ - -.. _accounts: - -vOneCloud ships with several pre-created user accounts, described below: - -+----------+---------------------+-------------------------+----------------------------------------------------------------------------------+ -| Account | Interface | Role | Description | -+==========+=====================+=========================+==================================================================================+ -| root | Linux | Appliance administrator | This user can log into the appliance (local login, no SSH). | -+----------+---------------------+-------------------------+----------------------------------------------------------------------------------+ -| oneadmin | Linux | Service user | Used to run all OpenNebula services. | -+----------+---------------------+-------------------------+----------------------------------------------------------------------------------+ -| oneadmin | OpenNebula Sunstone | Cloud administrator | Cloud administrator. Run any task in OpenNebula, including creating other users. | -+----------+---------------------+-------------------------+----------------------------------------------------------------------------------+ - -vOneCloud includes the Control Console, a text-based interface that offers menus for configuring the appliance. You can access the Control Console by opening the vOneCloud appliance console in vCenter. We will use the Control Console to configure vOneCloud in the steps below. - -.. note:: - - Please bear in mind that vOneCloud is shipped only for evaluation purposes. - - -Step 1. Verify the System Requirements -====================================== - -To deploy and use the vOneCloud appliance, you will need the following: - - * **vCenter 7.0** with ESX hosts grouped into clusters. - * **ESX 7.0** with at least 16 GB of free RAM and a datastore with 100 GB of free space. - * **Information** for connecting to vCenter7.0: - - IP or DNS address - - Login credentials (username and password) of an admin user - * **Web browser**: Firefox (3.5 and above) or Chrome. - - .. warning :: - - Other browsers, including Safari, are not supported and may not work well. - -Step 2. Download vOneCloud -========================== - -To download vOneCloud, you will need to complete the `download form `__. - -Download the OVA and save it to a convenient location. - -Step 3. Deploy the vOneCloud OVA -==================================== - -Log in to your vCenter installation. Determine which cluster to deploy vOneCloud on. - -In the left-hand pane, right-click the desired cluster, then click **Deploy OVF Template**. - -.. image:: /images/6.10-vOneCloud-download-deploy-001.png - :align: center - :scale: 70% - -| - -In the **Deploy OVF Template** dialog box, select **Local file**, then click **Browse**. Search for and select the vOneCloud appliance OVA that you downloaded. - -Click **Next**. In the next few screens, follow the vCenter wizard to deploy vOneCloud as you would any other OVA. You will need to select the compute resource to deploy on, the datastore where the OVA will be copied, and the network that the virtual appliance will use. - -.. note:: - - The datastore used for the vOneCloud appliance needs to have at least 100 GB of available space. - -The final screen displays a summary of deployment information. Click **Finish**. - -Wait for the deployment to complete. This should not take more than a few moments. - -After the VM has finished booting, the Web Console should display the OpenNebula Control Console: - -.. image:: /images/control-console.png - :align: center - :scale: 60% - -| - -At this point, the vOneCloud virtual appliance is up and running. - -.. note:: - - If instead of the Control Console you see a normal Linux tty login screen: - - .. image:: /images/control-console-wrong.png - :align: center - :scale: 60% - - then the virtual appliance is displaying the wrong tty terminal. The vOneCloud Control Console is on tty1. To access tty1, press ``Ctrl+Alt+F1``. - -In the next steps we’ll configure the vOneCloud appliance. - -Step 4. Configure vOneCloud -=========================== - -We’ll configure the following: - - * Network connection for the vOneCloud appliance - * OpenNebula user ``oneadmin`` password - * Linux ``root`` password - * IP address or FQDN for the public endpoint of Sunstone - -Step 4.1. Configure the Network -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The vOneCloud appliance is configured to connect automatically via DHCP. If you are using DHCP, you can skip to the :ref:`next step `. If using a manual network configuration, read on. - -To configure the network, in the Control Console press ``1``. Then, follow these steps: - - #. Select **Edit a connection**. - #. Select **System eth0**. - #. Select **IPv4 Configuration**, then **Show**. - #. Change the configuration from ``Automatic`` to ``Manual``. - #. Fill in the required information for manual configuration: - - **Addresses**: IPv4 address in /24 notation, e.g. ``10.0.1.249/24``. To add more addresses, use the **Add** item under the **Addresses** field. - - **Gateway**: IP address of the Gateway for the appliance. - - **DNS servers**: IP address(es) of one or more DNS servers. - - **Search domain** (optional): Search domains for DNS. - -Below is an example of a static network configuration on the available network interface, ``eth0``. The interface is set on the 10.0.1.x Class C network, the gateway is at 10.0.1.1 and the DNS server at 8.8.8.8. - -.. image:: /images/network-conf-example.png - :align: center - :scale: 60% - -| - -After filling in the information, select **OK** to exit the dialog. - -In the next screen, select **Activate a connection** and ensure that **System eth0** is activated. Then, select **Set system hostname** and type a hostname. - - -.. _Step 4.2: - -Step 4.2. Configure the OpenNebula User Password -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In the Control Console, press ``2`` to configure the password for the OpenNebula user, ``oneadmin``. - -Enter the desired password. You will use this password to log into the Sunstone GUI in the last step of this tutorial. - -.. important:: - - This password is for the OpenNebula system user account, not to be confused with the Linux user ``oneadmin``. - -.. _Step 4.3: - -Step 4.3. Configure the Linux ``root`` User Password -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In the Control Console, press ``3`` to set the password for the Linux OS ``root`` user. This is your master password for the virtual appliance. - -.. warning:: - - This password is not often used, so it’s easy to forget. As in all Unix-like systems, there is no way to recover a lost ``root`` password, so ensure it is stored in a safe place. - -.. _Step 4.4: - -Step 4.4. Configure a Public IP for vOneCloud -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In the Control Console, press ``4`` to select the FQDN or public IP address that will serve as the endpoint for accessing the Sunstone GUI. - -At this point, the vOneCloud appliance is configured and ready to be accessed through the Sunstone GUI. - -.. important:: - - Bear in mind that in this evaluation version, the FireEdge server for the Sunstone UI is listening on unencrypted HTTP over a public IP address. - -Step 5. Access the OpenNebula Front-end through the Sunstone GUI -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Open a web browser (Firefox or Chrome) and enter the public IP or FQDN you defined as the Sunstone endpoint in :ref:`Step 4.4 `. For example, ``http://10.0.1.176``. - -You should be greeted by the Sunstone login screen: - -.. image:: /images/sunstone_login_dark.png - :align: center - :scale: 50% - -| - -In the **Username** field, type ``oneadmin``. In the **Password** field, enter the password you defined for the OpenNebula user in :ref:`Step 4.2 `. - -Sunstone should display the Dashboard: - -.. image:: /images/6.10-sunstone_dashboard.png - :align: center - :scale: 50% - -| - -Congratulations — you have deployed and fully configured an OpenNebula Front-end on your VMware infrastructure. At this point, you are ready to add computing clusters to OpenNebula and launch virtual machines. - -.. note:: - - If you get an error message from Sunstone when attempting to log in, it means the public endpoint for Sunstone is not properly configured. - - .. image:: /images/sunstone-fe-error.png - :align: center - :scale: 70% - - Return to the Control Console and configure a public IP or FQDN (see :ref:`Step 4.4 ` above). - -.. _advanced_login: - -Accessing the Linux CLI in the Virtual Appliance -================================================ - -If wish to access the Linux OS running on the virtual appliance, you can do so in one of two ways: - - * Using SSH: - - Connect to vOneCloud’s public IP address or FQDN. For example: ``ssh root@10.0.1.176``. - (If connecting from Windows, you can use a program such as `PuTTY `__ or `WinSCP `__.) - * Using vCenter: - - When connected to the Control Console, change to tty2 by pressing ``Ctrl+Alt+F2``. Then, log in to the system as ``root`` with the password you defined in :ref:`Step 4.3 `. - -Next Steps -========== - -Want to try out automatic resource provisioning on public infrastructure? Follow the :ref:`Operations Guide ` to deploy an Edge Cluster on AWS — in under 10 minutes — and add computing power to your OpenNebula cloud. diff --git a/source/quick_start/operation_basics/provisioning_edge_cluster.rst b/source/quick_start/operation_basics/provisioning_edge_cluster.rst index 308caa875..1a4efde51 100644 --- a/source/quick_start/operation_basics/provisioning_edge_cluster.rst +++ b/source/quick_start/operation_basics/provisioning_edge_cluster.rst @@ -224,7 +224,7 @@ First, log in to the Front-end node. .. tip:: If you installed the Front-end by following the :doc:`Quickstart with miniONE on AWS <../deployment_basics/try_opennebula_on_kvm>` tutorial, to log into the Front-end you will need to use the key stored in the PEM file that you obtained from AWS. For details, see :ref:`minione_log_in_to_ec2` in that tutorial. -On the Front-end node, use the ``oneadmin`` command to perform the following actions: +On the Front-end node, use the ``oneprovision`` command to perform the following actions: List clusters in the provision: ``oneprovision cluster list``. diff --git a/source/quick_start/usage_basics/running_kubernetes_clusters.rst b/source/quick_start/usage_basics/running_kubernetes_clusters.rst index 31758eac4..e792a8725 100644 --- a/source/quick_start/usage_basics/running_kubernetes_clusters.rst +++ b/source/quick_start/usage_basics/running_kubernetes_clusters.rst @@ -39,7 +39,11 @@ Follow these steps: :align: center :scale: 50% - #. Select the **system** datastore for the AWS cluster. (If you began this Quick Start Guide on a clean install, it will probably display ID ``101``.) + #. Select the **system** datastore for the AWS cluster. (If you began this Quick Start Guide on a clean install, it will probably display ID ``100``.) + + .. image:: /images/sunstone-aws_edge_cluster_sys_ds.png + :align: center + #. Sunstone will display the **Info** panel for the datastore. Scroll down to the **Attributes** section and find the ``REPLICA_HOST`` attribute. Hover your mouse to the right, to display the **Copy**/**Edit**/**Delete** icons |icon3| for the attribute value: .. image:: /images/sunstone-aws_cluster_replica_host.png @@ -49,6 +53,7 @@ Follow these steps: | #. Click the **Delete** icon |icon4|. + #. When Sunstone requests to confirm the action, click **Yes**. You have deleted the ``REPLICA_HOST`` parameter from the datastore. In the next step we’ll download the OneKE appliance. @@ -61,8 +66,6 @@ The `OpenNebula Public Marketplace `__ is a r The Kubernetes cluster is packaged in a multi-VM service appliance listed as **Service OneKE **. To download it, follow the same steps as when downloading the WordPress VM: -Log in to Sunstone as user ``oneadmin``. - Open the left-hand pane, then select **Storage** -> **Apps**. Sunstone will display the **Apps** screen, showing the first page of apps that are available for download. .. image:: /images/sunstone-apps_list.png @@ -81,7 +84,13 @@ In the search field at the top, type ``oneke`` to filter by name. Then, select * Click the **Import into Datastore** |icon1| icon. -As with the WordPress appliance, Sunstone displays the **Download App to OpenNebula** wizard. In the first screen of the wizard, click **Next**. In the second screen you will need to select a datastore for the appliance. Select the **aws-edge-cluster-image** datastore. +As with the WordPress appliance, Sunstone displays the **Download App to OpenNebula** wizard. In the first screen of the wizard, click **Next**. + +.. image:: /images/sunstone-aws_cluster_download_oneke.png + :align: center + :scale: 60% + +In the second screen you will need to select a datastore for the appliance. Select the **aws-edge-cluster-image** datastore. |kubernetes-qs-marketplace-datastore| @@ -118,6 +127,8 @@ Sunstone displays the **Address Range** dialog box. Here you can define an addre |kubernetes-aws-private-network-range| +Click **Accept**. + Lastly, you will need to add a DNS server for the network. Select the **Context** tab, then the **DNS** input field. Type the address for the DNS server, such as ``8.8.8.8`` or ``1.1.1.1``. |kubernetes-aws-dns| @@ -187,12 +198,36 @@ To expose an example application on the public network, you will need to enable |kubernetes-qs-enable-ingress| +Enable Additional Network Options +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Click **3** at the bottom of the page to go to the third **User Inputs** screen. + +In this screen, activate the following toggle switches: + + * Enable DNS recursor + * Enable NAT + * Enable Router + + .. image:: /images/sunstone-k8s_enable_netw_params.png + :align: center + +| + + Click **Next** to go to the next screen, **Network**. + Select the Public and Private Networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The Kubernetes cluster needs access to the private and the public network defined for the Edge Cluster. First we’ll select the public network. Check that the **Network ID** drop-down menu displays ``Public``, then select the **metal-aws-edge-cluster-public** network. +The Kubernetes cluster needs access to the private and the public network defined for the Edge Cluster. First we’ll select the public network. + +Set the **Network ID** drop-down menu to ``Public``, and the **Network Type** drop-down menu to ``Existing``. + +.. image::/images/sunstone_kubernetes_netw_dropdowns.png + +Check that the **Network ID** drop-down menu displays ``Public``, then select the **metal-aws-edge-cluster-public** network. |kubernetes-qs-pick-networks-public| @@ -226,12 +261,11 @@ To verify that the VMs for the cluster were correctly deployed, you can use the .. prompt:: bash $ auto [oneadmin@FN]$ onevm list - ID USER GROUP NAME STAT CPU MEM HOST TIME - 5 oneadmin oneadmin storage_0_(service_3) runn 2 3G 0d 00h05 - 4 oneadmin oneadmin worker_0_(service_3) runn 2 3G 0d 00h05 - 3 oneadmin oneadmin master_0_(service_3) runn 2 3G 0d 00h05 - 2 oneadmin oneadmin vnf_0_(service_3) runn 1 2G 0d 00h06 - 1 oneadmin oneadmin Service WordPress - KVM-1 runn 1 2G 54.235.30.169 0d 00h21 + ID USER GROUP NAME STAT CPU MEM HOST TIME + 3 oneadmin oneadmin worker_0_(service_3) runn 2 3G 0d 00h31 + 2 oneadmin oneadmin master_0_(service_3) runn 2 3G 0d 00h31 + 1 oneadmin oneadmin vnf_0_(service_3) runn 1 512M 0d 00h31 + 0 oneadmin oneadmin Service WordPress - KVM-0 runn 1 768M 0d 01h22 At this point you have successfully instantiated the Kubernetes cluster. Before deploying an application, you need to find out the **public** IP address of the VNF node, since we will use it later to connect to the master Kubernetes node. @@ -240,15 +274,18 @@ At this point you have successfully instantiated the Kubernetes cluster. Before Check the IP Address for the VNF Node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To see the IP in Sunstone, go to **Instances** -> **VMs**, then check the **IP** column for the VNF VM. +To check the VNF node IP in Sunstone, in the left-hand pane go to **Instances** -> **VMs**, then check the information displayed under **vnf_0_(service_)**. The IP is displayed on the right, highlighted in the image below (note that all public IPs have been blurred in the image): + + .. image:: /images/sunstone-aws_k8s_vms_list.png + :align: center Alternatively, to check on the command line, log in to the Front-end and run: .. prompt:: bash $ auto - [oneadmin@FN]$ onevm show -j |jq -r .VM.TEMPLATE.NIC[0].EXTERNAL_IP + onevm show -j |jq -r .VM.TEMPLATE.NIC[0].EXTERNAL_IP -Replace ```` with the ID of the VNF VM as listed by the ``onevm list`` command (ID ``2`` in the example above). +Replace ```` with the ID of the VNF VM as listed by the ``onevm list`` command (ID ``1`` in the example above). If you do not see all VMs listed, or if the OneKE Service is stuck in ``DEPLOYING``, see :ref:`Known Issues ` below. @@ -277,17 +314,17 @@ To deploy an application, we will first connect to the master Kubernetes node vi For connecting to the master Kubernetes node, you need to know the public address (AWS elastic IP) of the VNF node, as described :ref:`above `. -Once you know the correct IP, from the Front-end node connect to the master Kubernetes node with this command: +Once you know the correct IP, from the Front-end node connect to the master Kubernetes node with the below command (replace “1.2.3.4” with the public IP address of the VNF node): .. prompt:: bash $ auto - $ ssh -A -J root@ root@172.20.0.2 + $ ssh -A -J root@1.2.3.4 root@172.20.0.2 In this example, ``172.20.0.2`` is the private IP address of the Kubernetes master node (the second address in the private network). .. tip:: - If you don't use ``ssh-agent`` then you may skip the ``-A`` flag in the above command. You will need to copy your *private* ssh key (used to connect to VNF) into the VNF node itself, at the location ``~/.ssh/id_rsa``. Make sure that the file permissions are correct, i.e. ``0600`` (or ``u=rw,go=``). For example: + If you don’t use ``ssh-agent`` then you may skip the ``-A`` flag in the above command. You will need to copy your *private* ssh key (used to connect to VNF) into the VNF node itself, at the location ``~/.ssh/id_rsa``. Make sure that the file permissions are correct, i.e. ``0600`` (or ``u=rw,go=``). For example: .. prompt:: bash $ auto @@ -342,7 +379,8 @@ On the Kubernetes master node, create a file called ``expose-nginx.yaml`` with t port: 80 targetPort: 80 --- - apiVersion: traefik.containo.us/v1alpha1 + # In Traefik < 3.0.0 it used to be "apiVersion: traefik.containo.us/v1alpha1". + apiVersion: traefik.io/v1alpha1 kind: IngressRoute metadata: name: nginx @@ -386,7 +424,7 @@ OneFlow Service is Stuck in ``DEPLOYING`` An error in network configuration, or any major failure (such as network timeouts or performance problems) can cause the OneKE service to lock up due to a communications outage between it and the Front-end node. The OneKE service will lock if *any* of the VMs belonging to it does not report ``READY=YES`` to OneGate within the default time. -If one or more of the VMs in the Kubernetes cluster never leave the ``DEPLOYING`` state, you can troubleshoot OneFlow communications by inspecting the file ``/var/log/oneflow.log`` on the Front-end node. Look for a line like the following: +If one or more of the VMs in the Kubernetes cluster never leave the ``DEPLOYING`` state, you can troubleshoot OneFlow communications by inspecting the file ``/var/log/one/oneflow.log`` on the Front-end node. Look for a line like the following: .. code-block:: text @@ -402,7 +440,7 @@ To recreate the VM instance, you must first terminate the OneKE service. A servi .. prompt:: bash $ auto - [oneadmin@FN]$ oneflow recover --delete + oneflow recover --delete Then, re-instantiate the service from the Sunstone UI: in the left-hand pane, **Service Templates** -> **OneKE 1.29**, then click the **Instantiate** icon. @@ -411,7 +449,7 @@ Lack of Connectivity to the OneGate Server Another possible cause for VMs in the Kubernetes cluster failing to run is lack of contact between the VNF node in the cluster and the OneGate server on the Front-end. -As described in :ref:`Quick Start Using miniONE on AWS `, the AWS instance where the Front-end is running needs to allow incoming connections for port 5030. If you do not want to open the port for all addresses, check the **public** IP address of the VNF node (the AWS Elastic IP, see :ref:`above `), and create an inbound rule in the AWS security groups that IP. +As described in :ref:`Quick Start Using miniONE on AWS `, the AWS instance where the Front-end is running must allow incoming connections for port 5030. If you do not want to open the port for all addresses, check the **public** IP address of the VNF node (the AWS Elastic IP, see :ref:`above `), and create an inbound rule in the AWS security groups for that IP. In cases of lack of connectivity with the OneGate server, the ``/var/log/one/oneflow.log`` file on the Front-end will display messages like the following: @@ -422,43 +460,121 @@ In cases of lack of connectivity with the OneGate server, the ``/var/log/one/one In this scenario only the VNF node is successfully deployed, but no Kubernetes nodes. -To troubleshoot, log in to the VNF node via SSH. Then, check if the VNF node is able to contact the OneGate server on the Front-end node, by running this command as root: +To troubleshoot, follow these steps: -.. prompt:: bash $ auto + #. Find out the IP address of the VNF node, as described :ref:`above `. + #. Log in to the VNF node via ssh as root. + #. Check if the VNF node is able to contact the OneGate server on the Front-end node, by running this command: - [root@VNF]$ onegate vm show + .. prompt:: bash $ auto -A successful response should look like: + onegate vm show -.. code-block:: text + A successful response should look like: - [root@VNF]$ onegate vm show - VM 0 - NAME : vnf_0_(service_3) + .. code-block:: text -And a failure gives a timeout message: + [root@VNF]$ onegate vm show + VM 0 + NAME : vnf_0_(service_3) -.. code-block:: text + And a failure gives a timeout message: - [root@VNF]$ onegate vm show - Timeout while connected to server (Failed to open TCP connection to :5030 (execution expired)). - Server: :5030 + .. code-block:: text -Possible causes -++++++++++++++++ + [root@VNF]$ onegate vm show + Timeout while connected to server (Failed to open TCP connection to :5030 (execution expired)). + Server: :5030 + + In this case, the VNF node cannot communicate with the OneGate service on the Front-end node. Possible causes include: -**Wrong Front-end node AWS IP**: The VNF node may be trying to connect to the OneGate server on the wrong IP address. In the VNF node, the IP address for the Front-end node is defined by the value of ``ONEGATE_ENDPOINT``, in the scripts found in the ``/run/one-context*`` directories. You can check the value with: + * **Wrong Front-end node for the AWS IP**: The VNF node may be trying to connect to the OneGate server on the wrong IP address. In the VNF node, the IP address for the Front-end node is defined by the value of ``ONEGATE_ENDPOINT``, in the scripts found in the ``/run/one-context`` directory. You can check the value with: -.. code-block:: text + .. code-block:: text - [root@VNF]$ grep ONEGATE -r /run/one-context* + grep -r ONEGATE /run/one-context* -If the value of ``ONEGATE_ENDPOINT`` does not match the IP address where OneGate is listening on the Front-end node, edit the parameter with the correct IP address, then terminate the service from the Front-end (see :ref:`above `) and re-deploy. + If the value of ``ONEGATE_ENDPOINT`` does not match the IP address where OneGate is listening on the Front-end node, edit the parameter with the correct IP address. Then, terminate the OneKE service from the Front-end (see :ref:`above `) and re-deploy. -**Filtered incoming connections**: On the Front-end node, the OneGate server listens on port 5030, so you must ensure that this port accepts incoming connections. If necessary, create an inbound rule in the AWS security groups for the elastic IP of the VNF node. + * **Filtered incoming connections**: On the Front-end node, the OneGate server listens on port 5030, so you must ensure that this port accepts incoming connections. If necessary, create an inbound rule in the AWS security groups for the elastic IP of the VNF node. .. |icon1| image:: /images/icons/sunstone/import_into_datastore.png .. |icon2| image:: /images/icons/sunstone/instantiate.png .. |icon3| image:: /images/icons/sunstone/parameter_manipulation_icons.png .. |icon4| image:: /images/icons/sunstone/trash.png .. |icon5| image:: /images/icons/sunstone/VNC.png + +One or more VMs Fail to Report Ready +++++++++++++++++++++++++++++++++++++++ + +Another possible cause for failure of the OneKE Service to leave the ``DEPLOYING`` state is that a temporary network glitch or other variation in performance prevented one or more of the VMs in the service to report ``READY`` to the OneGate service. In this case, it is possible that you see all of the VMs in the service up and running, but the OneKE service is stuck in ``DEPLOYING``. + +For example on the Front-end, the output of ``onevm list`` shows all VMs running: + +.. prompt:: + + onevm list + ID USER GROUP NAME STAT CPU MEM HOST TIME + 3 oneadmin oneadmin worker_0_(service_3) runn 2 3G 0d 01h02 + 2 oneadmin oneadmin master_0_(service_3) runn 2 3G 0d 01h02 + 1 oneadmin oneadmin vnf_0_(service_3) runn 1 512M 0d 01h03 + 0 oneadmin oneadmin Service WordPress - KVM-0 runn 1 768M 0d 01h53 + +Yet ``oneflow list`` shows: + +.. prompt:: + + ID USER GROUP NAME STARTTIME STAT + 3 oneadmin oneadmin OneKE 1.29 08/30 12:30:07 DEPLOYING + +In this case you can manually instruct the VMs to report ``READY`` to the OneGate server. Follow these steps: + + #. From the Front-end node, log in to the VNF node by running: + + .. prompt:: + + ssh root@ + + (To find out the IP address of the VNF node, see :ref:`above `.) + + #. For each VM in the OneKE service, run the following command: + + .. prompt:: + + onegate vm update --data "READY=YES" + + For example, ``onegate vm update 2 --data "READY=YES"``. + + Then, you can check the status of the service with ``onegate vm show``: + + .. prompt:: + + onegate service show + SERVICE 3 + NAME : OneKE 1.29 + STATE : RUNNING + + ROLE vnf + VM 1 + NAME : vnf_0_(service_3) + + ROLE master + VM 2 + NAME : master_0_(service_3) + + ROLE worker + VM 3 + NAME : worker_0_(service_3) + + ROLE storage + + #. On the Front-end, run ``oneflow list`` again to verify that the service reports ``RUNNING``: + + .. prompt:: + + [oneadmin@FN]$ oneflow list + ID USER GROUP NAME STARTTIME STAT + 3 oneadmin oneadmin OneKE 1.29 08/30 12:35:21 RUNNING + + +