diff --git a/publish/config.yaml b/publish/config.yaml index 354198fd96..ee98f5af03 100644 --- a/publish/config.yaml +++ b/publish/config.yaml @@ -1,5 +1,6 @@ mapping: - master: '6.10' + master: '6.99' + one-6.10: '6.10' one-6.8: '6.8' one-6.6: '6.6' one-6.4: '6.4' diff --git a/publish/publish.rb b/publish/publish.rb index baedfa539c..5898cb0ea0 100644 --- a/publish/publish.rb +++ b/publish/publish.rb @@ -90,12 +90,21 @@ def error(rc) " echo \"#{branch_symlink_path} is not a symlink\" >&2'") error(rc) +# cleanup old versions + +rc = run("ssh #{ssh_op} #{host} 'find #{host_path} -maxdepth 1 -type d -name \"#{branch_dir}.*\" -printf \"%f\\n\" | sort'") +error(rc) + builds = rc[0].split -exit(0) if builds.length <= 3 +puts "Total builds found: #{builds.length}" +puts "Builds: #{builds.join("\n")}" -builds = builds[0..-3] # Keep two latest ones +exit(0) if builds.length <= 2 -builds.each do |build| - run("ssh #{ssh_op} #{host} rm -rf #{host_path}/#{build}") +# List all builds except the two most recent +old_builds = builds[0...-2] +old_builds.each do |build| + puts "Removing old build folder: #{build}" + run("ssh #{ssh_op} #{host} rm -rf #{host_path}/#{build}") end diff --git a/source/conf.py b/source/conf.py index 6812cb3e8d..057c7764d2 100644 --- a/source/conf.py +++ b/source/conf.py @@ -88,7 +88,7 @@ # The short X.Y version. version = '6.10' # The full version, including alpha/beta/rc tags. -release = '6.10.4' +release = '6.10.5' # The context packages released version context_release = '6.10.0' diff --git a/source/ext/spellchecking/wordlists/opennebula.txt b/source/ext/spellchecking/wordlists/opennebula.txt index 95ed100c84..461389837c 100644 --- a/source/ext/spellchecking/wordlists/opennebula.txt +++ b/source/ext/spellchecking/wordlists/opennebula.txt @@ -76,6 +76,8 @@ Iothread JWT Javascript Journald +jsmith +JSmith Keepalive Kerberos Kube @@ -474,6 +476,7 @@ monitorization moref morefs mountpoints +msi multicast multicluster multitenant @@ -486,6 +489,7 @@ natively ncurses netaddr netmask +netplan networkd nginx nic diff --git a/source/images/aws_cluster_images_datastore.png b/source/images/aws_cluster_images_datastore.png index b470ea0134..a58589db06 100644 Binary files a/source/images/aws_cluster_images_datastore.png and b/source/images/aws_cluster_images_datastore.png differ diff --git a/source/images/minione-aws-ubuntu24.04.png b/source/images/minione-aws-ubuntu24.04.png new file mode 100644 index 0000000000..a521e3edc2 Binary files /dev/null and b/source/images/minione-aws-ubuntu24.04.png differ diff --git a/source/images/sunstone-aws_cluster_download_oneke.png b/source/images/sunstone-aws_cluster_download_oneke.png new file mode 100644 index 0000000000..6d0a4ed17d Binary files /dev/null and b/source/images/sunstone-aws_cluster_download_oneke.png differ diff --git a/source/images/sunstone-aws_cluster_replica_host.png b/source/images/sunstone-aws_cluster_replica_host.png index 73b6b7899a..756b3506f8 100644 Binary files a/source/images/sunstone-aws_cluster_replica_host.png and b/source/images/sunstone-aws_cluster_replica_host.png differ diff --git a/source/images/sunstone-aws_edge_cluster_deploying.png b/source/images/sunstone-aws_edge_cluster_deploying.png new file mode 100644 index 0000000000..e3b65bc6e6 Binary files /dev/null and b/source/images/sunstone-aws_edge_cluster_deploying.png differ diff --git a/source/images/sunstone-aws_edge_cluster_sys_ds.png b/source/images/sunstone-aws_edge_cluster_sys_ds.png new file mode 100644 index 0000000000..ac3a804059 Binary files /dev/null and b/source/images/sunstone-aws_edge_cluster_sys_ds.png differ diff --git a/source/images/sunstone-aws_k8s_vms_list.png b/source/images/sunstone-aws_k8s_vms_list.png new file mode 100644 index 0000000000..10bd5fb49f Binary files /dev/null and b/source/images/sunstone-aws_k8s_vms_list.png differ diff --git a/source/images/sunstone-aws_kubernetes_vnf_ip.png b/source/images/sunstone-aws_kubernetes_vnf_ip.png new file mode 100644 index 0000000000..1af726b3ec Binary files /dev/null and b/source/images/sunstone-aws_kubernetes_vnf_ip.png differ diff --git a/source/images/sunstone-k8s_enable_netw_params.png b/source/images/sunstone-k8s_enable_netw_params.png new file mode 100644 index 0000000000..5bc600b1c1 Binary files /dev/null and b/source/images/sunstone-k8s_enable_netw_params.png differ diff --git a/source/images/sunstone_kubernetes_netw_dropdowns.png b/source/images/sunstone_kubernetes_netw_dropdowns.png new file mode 100644 index 0000000000..9daa2d5393 Binary files /dev/null and b/source/images/sunstone_kubernetes_netw_dropdowns.png differ diff --git a/source/installation_and_configuration/authentication/ldap.rst b/source/installation_and_configuration/authentication/ldap.rst index 4f7514c1c9..1a44b7dc4e 100644 --- a/source/installation_and_configuration/authentication/ldap.rst +++ b/source/installation_and_configuration/authentication/ldap.rst @@ -23,6 +23,23 @@ This authentication mechanism is enabled by default. If it doesn't work, make su AUTHN = "ssh,x509,ldap,server_cipher,server_x509" ] +If you want to enable LDAP users auto-creation in the OpenNebula then you need to add ``default`` method in the ``AUTHN`` attribute of the ``AUTH_MAD`` section as shown below: + +.. code-block:: bash + + AUTH_MAD = [ + EXECUTABLE = "one_auth_mad", + AUTHN = "default,ssh,x509,ldap,server_cipher,server_x509" + ] + +and create a symlink as `root` user on the OpenNebula front-end node: + +.. code-block:: bash + + ln -s /var/lib/one/remotes/auth/ldap /var/lib/one/remotes/auth/default + + + Authentication driver ``ldap`` can be customized in ``/etc/one/auth/ldap_auth.conf``. This is the default configuration: .. code-block:: yaml @@ -188,7 +205,7 @@ To enable ``ldap`` authentication the described parameters should be configured. User Management =============== -Using the LDAP authentication module, the administrator doesn't need to create users with the ``oneuser`` command, as this will be done automatically. +Enabling LDAP users auto-creation in the OpenNebula as described above eliminates the need for the administrator to create users with the ``oneuser`` command manually. Users can store their credentials into a file referenced by environment variable ``$ONE_AUTH`` (usually ``$HOME/.one/one_auth``) in this fashion: @@ -228,6 +245,8 @@ Users can easily create escaped ``$ONE_AUTH`` tokens with the command ``oneuser The output of this command should be put in the ``$ONE_AUTH`` file. + + .. _active_directory: Active Directory @@ -325,3 +344,14 @@ And you want users whose login ends with ``a.example.com`` to be searched in ``l :match_user_regex: "^(.*)@a.example.com$": ldap-a.example.com "^(.*)@b.example.com$": ldap-b.example.com + +DB backend and case sensitivity +============================================ + +As LDAP, in general, is case insensitive there could be some oddities in the way how OpenNebula manages LDAP users. Moreover, it depends on the used DB backend. + +If you use SQLite or PostgreSQL, then users are always case-sensitive. Any further login with the same user but in a different casing will cause auth failure. + +However, when MySQL database is used (with the default configuration) it is case insensitive. Therefore, you can still log in with any casing of the username, such as jsmith, JSmith, JSMITH, etc. + +In any case, LDAP users are created in OpenNebula DB in lower-case. This is mainly to prevent multiple (case different) users to bypass quotas. diff --git a/source/installation_and_configuration/opennebula_services/fireedge.rst b/source/installation_and_configuration/opennebula_services/fireedge.rst index e572eadbc9..99ecb890f4 100644 --- a/source/installation_and_configuration/opennebula_services/fireedge.rst +++ b/source/installation_and_configuration/opennebula_services/fireedge.rst @@ -215,7 +215,7 @@ The following example shows how you can change the logo to a generic linux one ( Configure DataTables -------------------------------------------------------------------------------- -You can change the style of the rows depending on your preferences. in case they are changed in the fireedge-server.conf file. this change will be priority. and it will adjust the view to all users. +You can change the style of the rows depending on your preferences. in case they are changed in the fireedge-server.conf file. this change will be priority. and it will adjust the view to all users. |fireedge_sunstone_list_datatable| diff --git a/source/installation_and_configuration/whmcs_tenants/configure.rst b/source/installation_and_configuration/whmcs_tenants/configure.rst index fcf56ba2b8..2a327f4ad8 100644 --- a/source/installation_and_configuration/whmcs_tenants/configure.rst +++ b/source/installation_and_configuration/whmcs_tenants/configure.rst @@ -4,6 +4,8 @@ WHMCS Tenants Module Install/Update =================================== +.. warning:: You must use PHP 7.4, currently PHP 8.x will cause an error when creating the user + The install and update process are essentially identical. The Module files can be found in */usr/share/one/whmcs* after you have installed the *opennebula-whmcs-tenants* package via your package manager. You will just need to merge the *modules* directory to the main WHMCS directory on the server hosting WHMCS. When updating the module just copy the files on top of the existing files and overwrite them. An example command for copying the files: .. code-block:: bash @@ -11,7 +13,6 @@ The install and update process are essentially identical. The Module files can b cp -rf /usr/share/one/whmcs/modules /path/to/web/root/whmcs/. - .. note:: Make sure you download the updated package from the EE repository before doing either an install or an update. ================================== diff --git a/source/integration_and_development/system_interfaces/java.rst b/source/integration_and_development/system_interfaces/java.rst index 61f916e76c..dfc4350c44 100644 --- a/source/integration_and_development/system_interfaces/java.rst +++ b/source/integration_and_development/system_interfaces/java.rst @@ -9,14 +9,45 @@ This page contains the OpenNebula Cloud API Specification for Java. It has been Download ================================================================================ -You can download the ``.jar`` file compiled using Java 1.8, the required libraries, and the javadoc packaged in a tar.gz file `following this link `__ in the OpenNebula version you have installed. +You can download the ``.jar`` file compiled using Java 11, the required libraries, and the javadoc packaged in a tar.gz file `following this link `__ in the OpenNebula version you have installed. You can also consult the `javadoc online `__. +Compilation +================================================================================ + +If you need to compile the Java bindings, please follow these steps + +- Download the source code of opennebula (available on `OpenNebula code repository `__) + + ``git clone https://www.github.com/OpenNebula/one`` + +- On the source code, change to the directory ``src/oca/java`` + + ``cd one/src/oca/java`` + +- Download the apache xmlrpc library (note that the URL may change in your case) + + ``curl -OL https://archive.apache.org/dist/ws/xmlrpc/binaries/xmlrpc-3.1.1-bin.tar.gz`` + +- Uncompress it and move the ``/lib`` directory to the source code dir + + ``tar xzvf xmlrpc-3.1.1-bin.tar.gz; mv xmlrpc-3.1.1/lib .`` + +- Modify the build script adding to the libraries the xmlrpc library + + ``sed -i 's@^LIB_DIR="/usr/share/java"$@LIB_DIR="/usr/share/java:./lib"@g' build.sh`` + +- Build the library + + ``build.sh`` + +The jar file with the bindings will be the ``jar/org.opennebula.client.jar`` file + Usage ================================================================================ -To use the OpenNebula Cloud API for Java in your Java project, you have to add to the classpath the org.opennebula.client.jar file and the XML-RPC libraries located in the lib directory. +To use the OpenNebula Cloud API for Java in your Java project, you have to add to the classpath the ``org.opennebula.client.jar`` file and the XML-RPC libraries located in the lib directory. Code Sample ================================================================================ diff --git a/source/intro_release_notes/release_notes/arm64.rst b/source/intro_release_notes/release_notes/arm64.rst index bc877d099f..2aa23d3190 100644 --- a/source/intro_release_notes/release_notes/arm64.rst +++ b/source/intro_release_notes/release_notes/arm64.rst @@ -4,7 +4,7 @@ ARM64 packages ================================================================================ -6.10.4-EE and 6.10.0.1-CE release includes ARM64 packages for all supported operating systems. +6.10.5-EE and 6.10.0.1-CE release includes ARM64 packages for all supported operating systems. These ARM packages are being released in beta mode, allowing the community to test them and provide feedback. This initiative aims to extend OpenNebula’s flexibility by enabling deployment on ARM-based servers (both hypervisors and front-end nodes), opening new possibilities for lightweight, distributed cloud environments. diff --git a/source/intro_release_notes/release_notes/known_issues.rst b/source/intro_release_notes/release_notes/known_issues.rst index f8fd01e0dd..0c76366d10 100644 --- a/source/intro_release_notes/release_notes/known_issues.rst +++ b/source/intro_release_notes/release_notes/known_issues.rst @@ -28,13 +28,12 @@ Sunstone - Guacamole RDP as is currently shipped in OpenNebula does not support NLA authentication. You can follow `these instructions `__ in order to disable NLA in the Windows box to use Guacamole RDP within Sunstone. -- The current configuration for both Sunstone and One-Provision contain a mismatch in the `keep_me_logged` configuration option. This issue has been fixed but the new configuration files need to be downloaded in order for this fix to take effect, refer to :ref:`the following section ` for instructions on how to do this. +- `'Groupadmin' view fails to load for groups with multiple admin users `__. -- The `Update VM Configuration` dialog contains a bug which will result in a blank screen if one tries to update the configuration of a virtual machine with less than 2 total disks attached. The quickest workaround for this is to attach another minimal disk to the VM. +- The current configuration for both Sunstone and One-Provision contain a mismatch in the `keep_me_logged` configuration option. This issue has been fixed but the new configuration files need to be downloaded in order for this fix to take effect, refer to :ref:`the following section ` for instructions on how to do this. - When deleting an element in the datatable. such as running the terminate in a VM, we recommend refreshing the window. as there is an error in the client code discussed in the following `issue `__. - Install Linux Graphical Desktop on KVM Virtual Machines ================================================================================ diff --git a/source/intro_release_notes/release_notes/platform_notes.rst b/source/intro_release_notes/release_notes/platform_notes.rst index 4448af3131..fd37cd0925 100644 --- a/source/intro_release_notes/release_notes/platform_notes.rst +++ b/source/intro_release_notes/release_notes/platform_notes.rst @@ -33,6 +33,8 @@ Front-End Components | Ruby Gems | Versions installed by opennebula-rubygems | Detailed information in ``/usr/share/one/Gemfile`` | +--------------------------+---------------------------------------------+-------------------------------------------------------+ +.. note:: Support for nodes' operating system ensures that the latest two LTS releases feature certified packages. + .. _vcenter_nodes_platform_notes: vCenter Nodes @@ -43,7 +45,8 @@ vCenter Nodes +-----------+---------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+ | Component | Version | More information | +===========+=======================================+========================================================================================================================================+ -| vCenter | 7.0.x, managing ESX 7.0.x | :ref:`vCenter Node Installation ` | +| vCenter | 7.0.x managing ESX 7.0.x & | :ref:`vCenter Node Installation ` | +| | 8.0.x managing ESX 8.0.x | | +-----------+---------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+ | NSX-T | 2.4.1+ | `VMware compatiblity `__. :ref:`NSX Documentation `. | +-----------+---------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/source/intro_release_notes/release_notes_enterprise/index.rst b/source/intro_release_notes/release_notes_enterprise/index.rst index 5cc1c72b93..2d7cbf4927 100644 --- a/source/intro_release_notes/release_notes_enterprise/index.rst +++ b/source/intro_release_notes/release_notes_enterprise/index.rst @@ -11,4 +11,5 @@ Release Notes |version| Enterprise Edition Resolved Issues 6.10.1 Resolved Issues 6.10.2 Resolved Issues 6.10.3 - Resolved Issues 6.10.4 + Resolved Issues 6.10.5 + Resolved Issues 6.10.5 diff --git a/source/intro_release_notes/release_notes_enterprise/resolved_issues_6105.rst b/source/intro_release_notes/release_notes_enterprise/resolved_issues_6105.rst new file mode 100644 index 0000000000..e13a76a1cf --- /dev/null +++ b/source/intro_release_notes/release_notes_enterprise/resolved_issues_6105.rst @@ -0,0 +1,61 @@ +.. _resolved_issues_6105: + +Resolved Issues in 6.10.5 +-------------------------------------------------------------------------------- + +A complete list of solved issues for 6.10.5 can be found in the `project development portal `__. + +The following new features have been backported to 6.10.5: + +- `Change ETH* context parameters on live VMs `__. + +The following issues has been solved in 6.10.5: + +- `Fix an issue with fs_lvm_ssh not honoring BRIDGE_LIST in the image datastore `__. +- `Fix validation issue during Group + Group Admin creation at the same time `__. +- `Fix scheduler allocation for VMs with NUMA pinning enabled `__. +- `Fix user_inputs order not considered when instantiating a template through the CLI `__. +- `Fix the KVMRC Ruby parser regexp that was preventing more than one parameter `__. +- `Fix Sunstone should prioritize user views `__. +- `Fix Sunstone Update VM Configuration wizard doesn't scale correctly `__. +- `Fix Sunstone VM search leads to blank page `__. +- `Fix wrong QCOW2_STANDALONE option in NFS System Datastore `__. +- `Fix Sunstone does not cast placement constraints into update/instantiate dialog `__. +- `Fix Don't let add the ssh key more than one time `__. +- `Fix VIRTIO_QUEUES not applying to hot plugged virtio NICs `__. +- `Fix translation text when creating VMs `__. +- `Fix Fix ownership issue when instanciate Vm as a different user `__. +- `Fix Fix Ethernet text on Address Ranges when create VMs `__. +- `Fix re-arrange time orders when adding a scheduled action in Creating VMs `__. +- `Fix fsck to update history ETIME using EETIME or RETIME `__. +- `Fix VM placement expression parsing in Sunstone `__. +- `Fix physical CPU tooltip in Sunstone `__. +- `Fix errors when canceling a backup operation `__. +- `Fix add edit/delete buttons for VM attributes `__. +- `Fix rule addition for security group creation. `__. +- `Fix Security Group Role Description for IPs `__. +- `Fix history ETIME after resize, disk-resize and pci-(de)attach for VMs in undeployed state `__. +- `Fix cluster assignment when creating Virtual Network `__. +- `Fix UI issue on VM Instantiation when Memory or CPU modification is set to 'range' within a VM template `__. +- `Fix VM configuration for Single VM Backup configuration `__. +- `Fix UI overlay issue on Fireedge VNC on 1280*760 resolution `__. +- `Fix 2FA reset in FireEdge after UI changes are made `__. +- `Fix LDAP authentication by removeing password encoding `__. +- `Fix Change password button wrong displayed on FireEdge `__. + +Changes in Configuration Files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Since version 6.10.5 the following changes apply to OpenNebula services configuration files: + + +.. warning:: The following attributes are not included in the configuration files distributed with 6.10.5. If you wish to use these attributes, add them manually to the corresponding file. + +OpenNebula Service +^^^^^^^^^^^^^^^^^^ + ++----------------------+--------------------------------------------------------------+-------------------------------------------------------+-------------+ +| Config file | Description | Action | Values | ++======================+==============================================================+=======================================================+=============+ +| oned.conf | New attribute: CONTEXT_ALLOW_ETH_UPDATES | Allow manual updates of CONTEXT->ETH* values. | NO, YES | ++----------------------+--------------------------------------------------------------+-------------------------------------------------------+-------------+ diff --git a/source/legacy_components/ruby_sunstone/sunstone.rst b/source/legacy_components/ruby_sunstone/sunstone.rst index 05cb78e0e8..b73ebd87d3 100644 --- a/source/legacy_components/ruby_sunstone/sunstone.rst +++ b/source/legacy_components/ruby_sunstone/sunstone.rst @@ -20,6 +20,18 @@ The Sunstone configuration file can be found in ``/etc/one/sunstone-server.conf` After a configuration change, the Sunstone server must be :ref:`restarted ` to take effect. +.. tip:: + + If you update the ``tmpdir`` value make sure you add this new value to systemd's unit by running ``systemctl edit opennebula-sunstone.service``, + and adding in a new section at the empty top area: + + .. code:: + + [Service] + ReadWriteDirectories=/path/to/new/tmp + + where ``/path/to/new/tmp`` is the new value of ``tmpdir``. Then run ``systemctl daemon-reload``, and ``systemctl restart opennebula-sunstone`` + +------------------------------------+-----------------------------------------------------------------------------------------------------+ | Parameter | Description | +====================================+=====================================================================================================+ diff --git a/source/management_and_operations/references/cli.rst b/source/management_and_operations/references/cli.rst index 27f7a7a680..a0d8a27be1 100644 --- a/source/management_and_operations/references/cli.rst +++ b/source/management_and_operations/references/cli.rst @@ -9,34 +9,34 @@ OpenNebula provides a set commands to interact with the system: CLI ================================================================================ -* `oneacct `__: gets accounting data from OpenNebula. -* `oneacl `__: manages OpenNebula ACLs. -* `onecfg `__: manages OpenNebula configuration files upgrade. -* `onecluster `__: manages OpenNebula clusters. -* `onedatastore `__: manages OpenNebula datastores. -* `onedb `__: OpenNebula database migration tool. -* `onegroup `__: manages OpenNebula groups. -* `onehook `__: manages OpenNebula hooks. -* `onehost `__: manages OpenNebula hosts. -* `oneimage `__: manages OpenNebula images. -* `onemarket `__: manages internal and external marketplaces. -* `onemarketapp `__: manages appliances from marketplaces. -* `oneprovider `__: manages OpenNebula providers. -* `oneprovision `__: manages OpenNebula provisions. -* `onesecgroup `__: manages OpenNebula security groups. -* `oneshowback `__: OpenNebula Showback tool. -* `onetemplate `__: manages OpenNebula templates. -* `oneuser `__: manages OpenNebula users. -* `onevcenter `__: handles vCenter resource import. -* `onevdc `__: manages OpenNebula Virtual DataCenters. -* `onevm `__: manages OpenNebula virtual machines. -* `onevmgroup `__: manages OpenNebula VMGroups. -* `onevnet `__: manages OpenNebula networks. -* `onevntemplate `__: manages OpenNebula networks templates. -* `onevrouter `__: manages OpenNebula Virtual Routers. -* `onezone `__: manages OpenNebula zones. -* `oneirb `__: opens an irb session. -* `onelog `__: access to OpenNebula services log files. +* `oneacct `__: gets accounting data from OpenNebula. +* `oneacl `__: manages OpenNebula ACLs. +* `onecfg `__: manages OpenNebula configuration files upgrade. +* `onecluster `__: manages OpenNebula clusters. +* `onedatastore `__: manages OpenNebula datastores. +* `onedb `__: OpenNebula database migration tool. +* `onegroup `__: manages OpenNebula groups. +* `onehook `__: manages OpenNebula hooks. +* `onehost `__: manages OpenNebula hosts. +* `oneimage `__: manages OpenNebula images. +* `onemarket `__: manages internal and external marketplaces. +* `onemarketapp `__: manages appliances from marketplaces. +* `oneprovider `__: manages OpenNebula providers. +* `oneprovision `__: manages OpenNebula provisions. +* `onesecgroup `__: manages OpenNebula security groups. +* `oneshowback `__: OpenNebula Showback tool. +* `onetemplate `__: manages OpenNebula templates. +* `oneuser `__: manages OpenNebula users. +* `onevcenter `__: handles vCenter resource import. +* `onevdc `__: manages OpenNebula Virtual DataCenters. +* `onevm `__: manages OpenNebula virtual machines. +* `onevmgroup `__: manages OpenNebula VMGroups. +* `onevnet `__: manages OpenNebula networks. +* `onevntemplate `__: manages OpenNebula networks templates. +* `onevrouter `__: manages OpenNebula Virtual Routers. +* `onezone `__: manages OpenNebula zones. +* `oneirb `__: opens an irb session. +* `onelog `__: access to OpenNebula services log files. The output of these commands can be customized by modifying the configuration files that can be found in ``/etc/one/cli/``. They also can be customized on a per-user basis, in this case the configuration files should be placed in ``$HOME/.one/cli``. @@ -45,13 +45,13 @@ List operation for each command will open a ``less`` session for a better user e OneFlow Commands ================================================================================ -* `oneflow `__: OneFlow Service management. -* `oneflow-template `__: OneFlow Service Template management. +* `oneflow `__: OneFlow Service management. +* `oneflow-template `__: OneFlow Service Template management. OneGate Commands ================================================================================ -* `onegate `__: OneGate Service management. +* `onegate `__: OneGate Service management. .. _cli_shell: diff --git a/source/management_and_operations/references/install_steps.txt b/source/management_and_operations/references/install_steps.txt index 1f6cd2a5c4..eb9c7d13dc 100644 --- a/source/management_and_operations/references/install_steps.txt +++ b/source/management_and_operations/references/install_steps.txt @@ -72,7 +72,7 @@ Windows Download the MSI package into ``C:\``: -* https://github.com/OpenNebula/one-apps/releases/download/v6.8.1/one-context-6.8.1.msi +* https://github.com/OpenNebula/one-apps/releases/download/v|context_release|/one-context-|context_release|.msi Or execute this command in powershell: diff --git a/source/open_cluster_deployment/kvm_node/kvm_driver.rst b/source/open_cluster_deployment/kvm_node/kvm_driver.rst index aac66a95c2..5f77f52420 100644 --- a/source/open_cluster_deployment/kvm_node/kvm_driver.rst +++ b/source/open_cluster_deployment/kvm_node/kvm_driver.rst @@ -9,7 +9,7 @@ Requirements The Hosts will need a CPU with `Intel VT `__ or `AMD's AMD-V `__ features in order to support virtualization. KVM's `Preparing to use KVM `__ guide will clarify any doubts you may have regarding whether your hardware supports KVM. -Also, since OpenNebula 6.10.4-EE and 6.10.0.1-CE ARM64 architecture is supported (in beta mode). +Also, since OpenNebula 6.10.5-EE and 6.10.0.1-CE ARM64 architecture is supported (in beta mode). KVM will be installed and configured after following the :ref:`KVM Host Installation ` section. diff --git a/source/open_cluster_deployment/networking_setup/openvswitch.rst b/source/open_cluster_deployment/networking_setup/openvswitch.rst index 09eed3a112..f481130f21 100644 --- a/source/open_cluster_deployment/networking_setup/openvswitch.rst +++ b/source/open_cluster_deployment/networking_setup/openvswitch.rst @@ -188,7 +188,7 @@ For example, to configure default page size of **1G** and **250** hugepages with # update-grub -.. tip:: Use ``intel_iommu=on`` instead for hosts with an AMD CPU +.. tip:: Use ``amd_iommu=on`` instead for hosts with an AMD CPU Then reboot the system. After rebooting, make sure that the hugepages mount can be seen so the applications can access them. diff --git a/source/open_cluster_deployment/networking_setup/vxlan.rst b/source/open_cluster_deployment/networking_setup/vxlan.rst index 5ffffe6d16..0c17c1c680 100644 --- a/source/open_cluster_deployment/networking_setup/vxlan.rst +++ b/source/open_cluster_deployment/networking_setup/vxlan.rst @@ -13,7 +13,11 @@ Additionally, each VXLAN has an associated multicast address to encapsulate L2 b Considerations & Limitations ================================================================================ -This driver works with the default UDP server port 8472. +By default, this driver uses the default linux UDP server port 8472 to transfer VXLAN traffic between host. + +.. important:: + + Please note that the official IANA port for VXLAN transport is UDP 4789. If you will use hardware equipment take this in consideration VXLAN traffic is forwarded to a physical device; this device can be set (optionally) to be a VLAN tagged interface, but in that case you must make sure that the tagged interface is manually created first in all the Hosts. @@ -21,6 +25,18 @@ VXLAN traffic is forwarded to a physical device; this device can be set (optiona The network interface that will act as the physical device **must** have an IP. +The bridge ``${PHYSDEV}.${VXLAN_ID}`` (PHYSDEV is the physical interface and VXLAN_ID is the VxLAN VNI) will be created and the VM NICs will be attached to it. This has a very important implication: **the amount of characters for a bridge name that iproute2 allows is 15** + +.. important:: + + If the physical interface name and the VNI are longer than 15 characters the deploy of any VM with that virtual network will fail. The solution can be creating an alternative name (alias) for the interface. For instance, if you have the interface ``en0s0f0p0``, you can execute + + ``sudo ip link set en0s0f0p0 alias vx`` + + and use the physical device ``vx`` for the Virtual Network. + + This change DOES NOT PERSIST after a reboot (the command must be issued again or you should use netplan or some other software to make it persistent. + Limited Count of VXLANs on Host -------------------------------------------------------------------------------- diff --git a/source/open_cluster_deployment/storage_setup/local_ds.rst b/source/open_cluster_deployment/storage_setup/local_ds.rst index f17bedd4b1..b16fe5bb52 100644 --- a/source/open_cluster_deployment/storage_setup/local_ds.rst +++ b/source/open_cluster_deployment/storage_setup/local_ds.rst @@ -21,7 +21,10 @@ Host Setup Just make sure that there is enough space under ``/var/lib/one/datastores`` to store the disks of running VMs on that Host. -.. warning:: Make sure all the Hosts, including the Front-end, can SSH to any other host (including themselves), otherwise migrations will not work. +.. warning:: Local datastore requires that: + + * The **Frontend hostnames are resolvable** from all Hosts. + * Every Host (including the Front-end) can **SSH to every other Host**, including themselves. OpenNebula Configuration ================================================================================ diff --git a/source/open_cluster_deployment/storage_setup/lvm_drivers.rst b/source/open_cluster_deployment/storage_setup/lvm_drivers.rst index e2db3b0d81..80dd51893b 100644 --- a/source/open_cluster_deployment/storage_setup/lvm_drivers.rst +++ b/source/open_cluster_deployment/storage_setup/lvm_drivers.rst @@ -95,24 +95,25 @@ Create Image Datastore To create a new LVM Image Datastore, you need to set following (template) parameters: -+-----------------+---------------------------------------------------------------------------------------------+ -| Attribute | Description | -+=================+=============================================================================================+ -| ``NAME`` | Name of Datastore | -+-----------------+---------------------------------------------------------------------------------------------+ -| ``TYPE`` | ``IMAGE_DS`` | -+-----------------+---------------------------------------------------------------------------------------------+ -| ``DS_MAD`` | ``fs`` | -+-----------------+---------------------------------------------------------------------------------------------+ -| ``TM_MAD`` | ``fs_lvm`` for NFS mode | -| +---------------------------------------------------------------------------------------------+ -| | ``fs_lvm_ssh`` for SSH mode | -+-----------------+---------------------------------------------------------------------------------------------+ -| ``DISK_TYPE`` | ``BLOCK`` | -+-----------------+---------------------------------------------------------------------------------------------+ -| ``BRIDGE_LIST`` | List of Hosts with access to the LV. **NOT** needed if the Front-end is configured to access| -| | the LVs. | -+-----------------+---------------------------------------------------------------------------------------------+ ++------------------+----------------------------------------------------------------------------------------------+ +| Attribute | Description | ++==================+==============================================================================================+ +| ``NAME`` | Name of Datastore | ++------------------+----------------------------------------------------------------------------------------------+ +| ``TYPE`` | ``IMAGE_DS`` | ++------------------+----------------------------------------------------------------------------------------------+ +| ``DS_MAD`` | ``fs`` | ++------------------+----------------------------------------------------------------------------------------------+ +| ``TM_MAD`` | ``fs_lvm`` for NFS mode | ++------------------+----------------------------------------------------------------------------------------------+ +| | ``fs_lvm_ssh`` for SSH mode | ++------------------+----------------------------------------------------------------------------------------------+ +| ``DISK_TYPE`` | ``BLOCK`` | ++------------------+----------------------------------------------------------------------------------------------+ +|| ``BRIDGE_LIST`` || List of Hosts with access to the file system where image files are stored before dumping to | +|| || LVs. | ++------------------+----------------------------------------------------------------------------------------------+ + The following examples illustrate the creation of an LVM datastore using a template. In this case we will use the Host ``host01`` as one of our OpenNebula LVM-enabled Hosts. diff --git a/source/quick_start/deployment_basics/try_opennebula_on_kvm.rst b/source/quick_start/deployment_basics/try_opennebula_on_kvm.rst index 4f85998786..17370040cf 100644 --- a/source/quick_start/deployment_basics/try_opennebula_on_kvm.rst +++ b/source/quick_start/deployment_basics/try_opennebula_on_kvm.rst @@ -24,7 +24,7 @@ The cloud environment installed by miniONE is mainly intended for evaluation, de .. note:: To complete this tutorial, you will need to log in to a remote Linux machine via SSH. If you follow this tutorial on a Windows machine, you will need to use an SSH client application such as `PuTTY `__. - + .. tip:: For a list of options supported by the script, run ``bash minione -h``. The script supports several types of installations (such as installing a Front-end and a KVM hypervisor node) which are not covered in this tutorial. @@ -61,15 +61,15 @@ To run the miniONE script on AWS, you will need to instantiate a virtual machine - 2616 (for the FireEdge GUI) - 5030 (for the OneGate service) -.. tip:: To quickly deploy a suitable VM, browse the AWS AMI Catalog and select ``Ubuntu Server 22.04 LTS (HVM), SSD Volume Type``: +.. tip:: To quickly deploy a suitable VM, browse the AWS AMI Catalog and select **Ubuntu Server 24.04 LTS (HVM), SSD Volume Type**: - .. image:: /images/minione-aws-ubuntu22.04.png + .. image:: /images/minione-aws-ubuntu24.04.png :align: center Below is an example of a successfully-tested configuration (though by no means the only possible one): - Region: Frankfurt -- Operating System: Ubuntu Server 22.04 LTS (HVM) +- Operating System: Ubuntu Server 24.04 LTS (HVM) - Tier: ``t2.medium`` - Open ports: 22, 80, 2616, 5030 - Storage: 80 GB SSD @@ -111,7 +111,7 @@ For example: .. warning:: Ensure you have set the appropriate permissions for the PEM file, or for security reasons SSH will refuse to connect. - + Step 1.2. Update the VM Operating System ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -120,7 +120,7 @@ Once you have logged in to the VM as user ``ubuntu``, use the ``sudo`` command t .. prompt:: - sudo su - + sudo -i Then, update the system to its latest software packages by running the following command: @@ -128,6 +128,35 @@ Then, update the system to its latest software packages by running the following apt update && apt upgrade +After updating, you will probably need to restart the VM to run the latest kernel. Check the output of the ``apt upgrade`` command for lines similar to the following: + +.. prompt:: + + Pending kernel upgrade! + Running kernel version: + 6.8.0-1012-aws + Diagnostics: + The currently running kernel version is not the expected kernel version 6.8.0-1014-aws. + +In this example, you need to restart the VM in order to upgrade to kernel 6.8.0-1014-aws. To restart the VM, run: + +.. prompt:: + + shutdown -r now + +You will be immediately logged out of the VM as it restarts. Wait a few moments for the VM to finish rebooting, then log in again using the same procedure as before. After logging back into the VM, you can check the running kernel version with: + +.. prompt:: + + uname -a + +For example, in this case: + +.. prompt:: + + $ uname -a + Linux ip-172-31-3-252 6.8.0-1014-aws #15-Ubuntu SMP Thu Aug 8 19:13:06 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux + Your AWS VM is now ready. In the next steps, we’ll download the miniONE script, upload it to the VM, and run the installation. Step 3: Download and install miniONE @@ -141,20 +170,20 @@ Step 3.1. Copy the miniONE script to the AWS VM After downloading miniONE, you will need to copy it to your AWS VM. - On Linux and Mac: - + If you’re on Linux, you can copy it with the ``scp`` command, providing the same user and PEM file as when logging in via SSH. For example, the command below copies the miniONE script to the ``ubuntu`` user’s home directory: .. prompt:: - + scp -i ubuntu@:~ - On Windows: You can use either of two methods: - + * The GUI tool `WinSCP `__, which allows you to copy files by drag-and-drop * The command-line tool `PuTTY Secure Copy `__, which emulates the Unix ``scp`` tool. - + For both methods you will need to provide the private key file for authentication. Step 3.2. Run the miniONE script on the AWS VM @@ -162,11 +191,25 @@ Step 3.2. Run the miniONE script on the AWS VM After copying the miniONE script to the VM, log in to the VM (as described :ref:`above `). -Use the ``sudo`` command to become the ``root`` user. +Use the ``sudo`` command to become the ``root`` user: + +.. prompt:: + + sudo -i -If necessary, use the ``cd`` command to navigate to the folder where you copied the miniONE script. For example, if you copied it to the home directory of user ``ubuntu`` run ``cd ~ubuntu``. +If necessary, use the ``cd`` command to navigate to the folder where you copied the miniONE script. For example, if you copied it to the home directory of user ``ubuntu`` run: -To install miniONE, run: +.. prompt:: + + cd ~ubuntu + +Next, ensure that the ``minione`` file has execute permissions, by running: + +.. prompt:: + + chmod +x minione + +To install miniONE, run as root: .. prompt:: @@ -177,7 +220,7 @@ The miniONE script will begin the installation, logging output to the terminal. .. prompt:: ### Report - OpenNebula 6.8 was installed + OpenNebula 6.10 was installed Sunstone is running on: http:/// FireEdge is running on: @@ -185,7 +228,7 @@ The miniONE script will begin the installation, logging output to the terminal. Use following to login: user: oneadmin password: lCmPUb5Gwk - + At this point, you have successfully installed miniONE. OpenNebula services should be running, and the system should be ready for your first login. .. important:: @@ -224,7 +267,7 @@ This is the default view for cloud administrators. From this view in Sunstone, y | -Congratulations — you have deployed an OpenNebula Front-end node, which is ready to provision resources on cloud infrastructure. +Congratulations — you have deployed an OpenNebula Front-end node, which is ready to provision resources on cloud infrastructure. Next Steps diff --git a/source/quick_start/operation_basics/provisioning_edge_cluster.rst b/source/quick_start/operation_basics/provisioning_edge_cluster.rst index 1a4efde516..e7e1f4a68d 100644 --- a/source/quick_start/operation_basics/provisioning_edge_cluster.rst +++ b/source/quick_start/operation_basics/provisioning_edge_cluster.rst @@ -52,7 +52,7 @@ Step 1: Configure AWS As a first step, if you don’t already have one, create an account in AWS. AWS publishes a complete guide: `How do I create and activate a new AWS account? `__ -After you have created your account, you’ll need to obtain the ``access_key`` and ``secret_key`` of a user with the necessary permissions to manage instances. The relevant AWS guide is `Configure tool authentication with AWS `__. +After you have created your account, you’ll need to obtain the ``access_key`` and ``secret_key`` of a user with the necessary permissions to manage instances. The relevant AWS guide is `Configure tool authentication with AWS `__. Next, you need to choose the region where you want to deploy the new resources. You can check the available regions in AWS’s documentation: `Regions, Availability Zones, and Local Zones `__. @@ -71,7 +71,7 @@ To log in, point your browser to the OneProvision address: https://:2616/fireedge/provision -In the log in screen, enter the credentials for user ``oneadmin``. +In the log-in screen, enter the credentials for user ``oneadmin``. Sunstone will display the **OneProvision** screen: @@ -105,7 +105,7 @@ Sunstone displays the **Provider template** screen, showing the **Provision type |image_provider_create_step1| -Click **Next**. In the next screen you can enter a description for your provider: +Click **Next**. In the next screen, you can enter a description for your provider: |image_provider_create_step2| @@ -155,7 +155,7 @@ OneProvision now displays the **Provider** screen showing the available provider | -In the next screen you can enter a description for your cluster, if desired: +In the next screen, you can enter a description for your cluster, if desired: .. image:: /images/fireedge_cpi_provision_create3.png :align: center @@ -212,7 +212,7 @@ To see a running log of the provision, click **Log**: Provisioning will take a few minutes. When it’s finished, the log will display the message ``Provision successfully created``, followed by the provision’s ID. -At this point the Edge Cluster has been created, and is up and running. In the next step, we’ll verify that all of the specified resources for the provision (the host, datastore, network, and the cluster itself) have been correctly created and registered with OpenNebula. +At this point, the Edge Cluster has been created and is up and running. In the next step, we’ll verify that all of the specified resources for the provision (the host, datastore, network, and the cluster itself) have been correctly created and registered with OpenNebula. Step 4: Validate the New Infrastructure diff --git a/source/quick_start/usage_basics/running_kubernetes_clusters.rst b/source/quick_start/usage_basics/running_kubernetes_clusters.rst index 31758eac4f..4ef7c7921a 100644 --- a/source/quick_start/usage_basics/running_kubernetes_clusters.rst +++ b/source/quick_start/usage_basics/running_kubernetes_clusters.rst @@ -39,7 +39,11 @@ Follow these steps: :align: center :scale: 50% - #. Select the **system** datastore for the AWS cluster. (If you began this Quick Start Guide on a clean install, it will probably display ID ``101``.) + #. Select the **system** datastore for the AWS cluster. (If you began this Quick Start Guide on a clean install, it will probably display ID ``100``.) + + .. image:: /images/sunstone-aws_edge_cluster_sys_ds.png + :align: center + #. Sunstone will display the **Info** panel for the datastore. Scroll down to the **Attributes** section and find the ``REPLICA_HOST`` attribute. Hover your mouse to the right, to display the **Copy**/**Edit**/**Delete** icons |icon3| for the attribute value: .. image:: /images/sunstone-aws_cluster_replica_host.png @@ -49,6 +53,7 @@ Follow these steps: | #. Click the **Delete** icon |icon4|. + #. When Sunstone requests to confirm the action, click **Yes**. You have deleted the ``REPLICA_HOST`` parameter from the datastore. In the next step we’ll download the OneKE appliance. @@ -61,8 +66,6 @@ The `OpenNebula Public Marketplace `__ is a r The Kubernetes cluster is packaged in a multi-VM service appliance listed as **Service OneKE **. To download it, follow the same steps as when downloading the WordPress VM: -Log in to Sunstone as user ``oneadmin``. - Open the left-hand pane, then select **Storage** -> **Apps**. Sunstone will display the **Apps** screen, showing the first page of apps that are available for download. .. image:: /images/sunstone-apps_list.png @@ -81,7 +84,13 @@ In the search field at the top, type ``oneke`` to filter by name. Then, select * Click the **Import into Datastore** |icon1| icon. -As with the WordPress appliance, Sunstone displays the **Download App to OpenNebula** wizard. In the first screen of the wizard, click **Next**. In the second screen you will need to select a datastore for the appliance. Select the **aws-edge-cluster-image** datastore. +As with the WordPress appliance, Sunstone displays the **Download App to OpenNebula** wizard. In the first screen of the wizard, click **Next**. + +.. image:: /images/sunstone-aws_cluster_download_oneke.png + :align: center + :scale: 60% + +In the second screen you will need to select a datastore for the appliance. Select the **aws-edge-cluster-image** datastore. |kubernetes-qs-marketplace-datastore| @@ -118,6 +127,8 @@ Sunstone displays the **Address Range** dialog box. Here you can define an addre |kubernetes-aws-private-network-range| +Click **Accept**. + Lastly, you will need to add a DNS server for the network. Select the **Context** tab, then the **DNS** input field. Type the address for the DNS server, such as ``8.8.8.8`` or ``1.1.1.1``. |kubernetes-aws-dns| @@ -187,12 +198,36 @@ To expose an example application on the public network, you will need to enable |kubernetes-qs-enable-ingress| +Enable Additional Network Options +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Click **3** at the bottom of the page to go to the third **User Inputs** screen. + +In this screen, activate the following toggle switches: + + * Enable DNS recursor + * Enable NAT + * Enable Router + + .. image:: /images/sunstone-k8s_enable_netw_params.png + :align: center + +| + + Click **Next** to go to the next screen, **Network**. + Select the Public and Private Networks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The Kubernetes cluster needs access to the private and the public network defined for the Edge Cluster. First we’ll select the public network. Check that the **Network ID** drop-down menu displays ``Public``, then select the **metal-aws-edge-cluster-public** network. +The Kubernetes cluster needs access to the private and the public network defined for the Edge Cluster. First we’ll select the public network. + +Set the **Network ID** drop-down menu to ``Public``, and the **Network Type** drop-down menu to ``Existing``. + +.. image::/images/sunstone_kubernetes_netw_dropdowns.png + +Check that the **Network ID** drop-down menu displays ``Public``, then select the **metal-aws-edge-cluster-public** network. |kubernetes-qs-pick-networks-public| @@ -226,12 +261,11 @@ To verify that the VMs for the cluster were correctly deployed, you can use the .. prompt:: bash $ auto [oneadmin@FN]$ onevm list - ID USER GROUP NAME STAT CPU MEM HOST TIME - 5 oneadmin oneadmin storage_0_(service_3) runn 2 3G 0d 00h05 - 4 oneadmin oneadmin worker_0_(service_3) runn 2 3G 0d 00h05 - 3 oneadmin oneadmin master_0_(service_3) runn 2 3G 0d 00h05 - 2 oneadmin oneadmin vnf_0_(service_3) runn 1 2G 0d 00h06 - 1 oneadmin oneadmin Service WordPress - KVM-1 runn 1 2G 54.235.30.169 0d 00h21 + ID USER GROUP NAME STAT CPU MEM HOST TIME + 3 oneadmin oneadmin worker_0_(service_3) runn 2 3G 0d 00h31 + 2 oneadmin oneadmin master_0_(service_3) runn 2 3G 0d 00h31 + 1 oneadmin oneadmin vnf_0_(service_3) runn 1 512M 0d 00h31 + 0 oneadmin oneadmin Service WordPress - KVM-0 runn 1 768M 0d 01h22 At this point you have successfully instantiated the Kubernetes cluster. Before deploying an application, you need to find out the **public** IP address of the VNF node, since we will use it later to connect to the master Kubernetes node. @@ -240,15 +274,18 @@ At this point you have successfully instantiated the Kubernetes cluster. Before Check the IP Address for the VNF Node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To see the IP in Sunstone, go to **Instances** -> **VMs**, then check the **IP** column for the VNF VM. +To check the VNF node IP in Sunstone, in the left-hand pane go to **Instances** -> **VMs**, then check the information displayed under **vnf_0_(service_)**. The IP is displayed on the right, highlighted in the image below (note that all public IPs have been blurred in the image): + + .. image:: /images/sunstone-aws_k8s_vms_list.png + :align: center Alternatively, to check on the command line, log in to the Front-end and run: .. prompt:: bash $ auto - [oneadmin@FN]$ onevm show -j |jq -r .VM.TEMPLATE.NIC[0].EXTERNAL_IP + onevm show -j |jq -r .VM.TEMPLATE.NIC[0].EXTERNAL_IP -Replace ```` with the ID of the VNF VM as listed by the ``onevm list`` command (ID ``2`` in the example above). +Replace ```` with the ID of the VNF VM as listed by the ``onevm list`` command (ID ``1`` in the example above). If you do not see all VMs listed, or if the OneKE Service is stuck in ``DEPLOYING``, see :ref:`Known Issues ` below. @@ -277,17 +314,17 @@ To deploy an application, we will first connect to the master Kubernetes node vi For connecting to the master Kubernetes node, you need to know the public address (AWS elastic IP) of the VNF node, as described :ref:`above `. -Once you know the correct IP, from the Front-end node connect to the master Kubernetes node with this command: +Once you know the correct IP, from the Front-end node connect to the master Kubernetes node with the below command (replace “1.2.3.4” with the public IP address of the VNF node): .. prompt:: bash $ auto - $ ssh -A -J root@ root@172.20.0.2 + $ ssh -A -J root@1.2.3.4 root@172.20.0.2 In this example, ``172.20.0.2`` is the private IP address of the Kubernetes master node (the second address in the private network). .. tip:: - If you don't use ``ssh-agent`` then you may skip the ``-A`` flag in the above command. You will need to copy your *private* ssh key (used to connect to VNF) into the VNF node itself, at the location ``~/.ssh/id_rsa``. Make sure that the file permissions are correct, i.e. ``0600`` (or ``u=rw,go=``). For example: + If you don’t use ``ssh-agent`` then you may skip the ``-A`` flag in the above command. You will need to copy your *private* ssh key (used to connect to VNF) into the VNF node itself, at the location ``~/.ssh/id_rsa``. Make sure that the file permissions are correct, i.e. ``0600`` (or ``u=rw,go=``). For example: .. prompt:: bash $ auto @@ -342,7 +379,8 @@ On the Kubernetes master node, create a file called ``expose-nginx.yaml`` with t port: 80 targetPort: 80 --- - apiVersion: traefik.containo.us/v1alpha1 + # In Traefik < 3.0.0 it used to be "apiVersion: traefik.containo.us/v1alpha1". + apiVersion: traefik.io/v1alpha1 kind: IngressRoute metadata: name: nginx @@ -386,7 +424,7 @@ OneFlow Service is Stuck in ``DEPLOYING`` An error in network configuration, or any major failure (such as network timeouts or performance problems) can cause the OneKE service to lock up due to a communications outage between it and the Front-end node. The OneKE service will lock if *any* of the VMs belonging to it does not report ``READY=YES`` to OneGate within the default time. -If one or more of the VMs in the Kubernetes cluster never leave the ``DEPLOYING`` state, you can troubleshoot OneFlow communications by inspecting the file ``/var/log/oneflow.log`` on the Front-end node. Look for a line like the following: +If one or more of the VMs in the Kubernetes cluster never leave the ``DEPLOYING`` state, you can troubleshoot OneFlow communications by inspecting the file ``/var/log/one/oneflow.log`` on the Front-end node. Look for a line like the following: .. code-block:: text @@ -402,7 +440,7 @@ To recreate the VM instance, you must first terminate the OneKE service. A servi .. prompt:: bash $ auto - [oneadmin@FN]$ oneflow recover --delete + oneflow recover --delete Then, re-instantiate the service from the Sunstone UI: in the left-hand pane, **Service Templates** -> **OneKE 1.29**, then click the **Instantiate** icon. @@ -411,7 +449,7 @@ Lack of Connectivity to the OneGate Server Another possible cause for VMs in the Kubernetes cluster failing to run is lack of contact between the VNF node in the cluster and the OneGate server on the Front-end. -As described in :ref:`Quick Start Using miniONE on AWS `, the AWS instance where the Front-end is running needs to allow incoming connections for port 5030. If you do not want to open the port for all addresses, check the **public** IP address of the VNF node (the AWS Elastic IP, see :ref:`above `), and create an inbound rule in the AWS security groups that IP. +As described in :ref:`Quick Start Using miniONE on AWS `, the AWS instance where the Front-end is running must allow incoming connections for port 5030. If you do not want to open the port for all addresses, check the **public** IP address of the VNF node (the AWS Elastic IP, see :ref:`above `), and create an inbound rule in the AWS security groups for that IP. In cases of lack of connectivity with the OneGate server, the ``/var/log/one/oneflow.log`` file on the Front-end will display messages like the following: @@ -422,43 +460,132 @@ In cases of lack of connectivity with the OneGate server, the ``/var/log/one/one In this scenario only the VNF node is successfully deployed, but no Kubernetes nodes. -To troubleshoot, log in to the VNF node via SSH. Then, check if the VNF node is able to contact the OneGate server on the Front-end node, by running this command as root: +To troubleshoot, follow these steps: -.. prompt:: bash $ auto + #. Find out the IP address of the VNF node, as described :ref:`above `. + #. Log in to the VNF node via ssh as root. + #. Check if the VNF node is able to contact the OneGate server on the Front-end node, by running this command: - [root@VNF]$ onegate vm show + .. prompt:: bash $ auto -A successful response should look like: + onegate vm show -.. code-block:: text + A successful response should look like: - [root@VNF]$ onegate vm show - VM 0 - NAME : vnf_0_(service_3) + .. code-block:: text -And a failure gives a timeout message: + [root@VNF]$ onegate vm show + VM 0 + NAME : vnf_0_(service_3) -.. code-block:: text + And a failure gives a timeout message: - [root@VNF]$ onegate vm show - Timeout while connected to server (Failed to open TCP connection to :5030 (execution expired)). - Server: :5030 + .. code-block:: text -Possible causes -++++++++++++++++ + [root@VNF]$ onegate vm show + Timeout while connected to server (Failed to open TCP connection to :5030 (execution expired)). + Server: :5030 + + In this case, the VNF node cannot communicate with the OneGate service on the Front-end node. Possible causes include: -**Wrong Front-end node AWS IP**: The VNF node may be trying to connect to the OneGate server on the wrong IP address. In the VNF node, the IP address for the Front-end node is defined by the value of ``ONEGATE_ENDPOINT``, in the scripts found in the ``/run/one-context*`` directories. You can check the value with: + * **Wrong Front-end node for the AWS IP**: The VNF node may be trying to connect to the OneGate server on the wrong IP address. In the VNF node, the IP address for the Front-end node is defined by the value of ``ONEGATE_ENDPOINT``, in the scripts found in the ``/run/one-context`` directory. You can check the value with: -.. code-block:: text + .. code-block:: text - [root@VNF]$ grep ONEGATE -r /run/one-context* + grep -r ONEGATE /run/one-context* -If the value of ``ONEGATE_ENDPOINT`` does not match the IP address where OneGate is listening on the Front-end node, edit the parameter with the correct IP address, then terminate the service from the Front-end (see :ref:`above `) and re-deploy. + If the value of ``ONEGATE_ENDPOINT`` does not match the IP address where OneGate is listening on the Front-end node, edit the parameter with the correct IP address. Then, terminate the OneKE service from the Front-end (see :ref:`above `) and re-deploy. -**Filtered incoming connections**: On the Front-end node, the OneGate server listens on port 5030, so you must ensure that this port accepts incoming connections. If necessary, create an inbound rule in the AWS security groups for the elastic IP of the VNF node. + * **Filtered incoming connections**: On the Front-end node, the OneGate server listens on port 5030, so you must ensure that this port accepts incoming connections. If necessary, create an inbound rule in the AWS security groups for the elastic IP of the VNF node. .. |icon1| image:: /images/icons/sunstone/import_into_datastore.png .. |icon2| image:: /images/icons/sunstone/instantiate.png .. |icon3| image:: /images/icons/sunstone/parameter_manipulation_icons.png .. |icon4| image:: /images/icons/sunstone/trash.png .. |icon5| image:: /images/icons/sunstone/VNC.png + +One or more VMs Fail to Report Ready +++++++++++++++++++++++++++++++++++++++ + +Another possible cause for failure of the OneKE Service to leave the ``DEPLOYING`` state is that a temporary network glitch or other variation in performance prevented one or more of the VMs in the service to report ``READY`` to the OneGate service. In this case, it is possible that you see all of the VMs in the service up and running, but the OneKE service is stuck in ``DEPLOYING``. + +For example on the Front-end, the output of ``onevm list`` shows all VMs running: + +.. prompt:: + + onevm list + ID USER GROUP NAME STAT CPU MEM HOST TIME + 3 oneadmin oneadmin worker_0_(service_3) runn 2 3G 0d 01h02 + 2 oneadmin oneadmin master_0_(service_3) runn 2 3G 0d 01h02 + 1 oneadmin oneadmin vnf_0_(service_3) runn 1 512M 0d 01h03 + 0 oneadmin oneadmin Service WordPress - KVM-0 runn 1 768M 0d 01h53 + +Yet ``oneflow list`` shows: + +.. prompt:: + + ID USER GROUP NAME STARTTIME STAT + 3 oneadmin oneadmin OneKE 1.29 08/30 12:30:07 DEPLOYING + +In this case you can manually instruct the VMs to report ``READY`` to the OneGate server. Follow these steps: + + #. From the Front-end node, log in to the VNF node by running: + + .. prompt:: + + ssh root@ + + (To find out the IP address of the VNF node, see :ref:`above `.) + + #. For each VM in the OneKE service, run the following command: + + .. prompt:: + + onegate vm update --data "READY=YES" + + For example, ``onegate vm update 2 --data "READY=YES"``. + + Then, you can check the status of the service with ``onegate vm show``: + + .. prompt:: + + onegate service show + SERVICE 3 + NAME : OneKE 1.29 + STATE : RUNNING + + ROLE vnf + VM 1 + NAME : vnf_0_(service_3) + + ROLE master + VM 2 + NAME : master_0_(service_3) + + ROLE worker + VM 3 + NAME : worker_0_(service_3) + + ROLE storage + + #. On the Front-end, run ``oneflow list`` again to verify that the service reports ``RUNNING``: + + .. prompt:: + + [oneadmin@FN]$ oneflow list + ID USER GROUP NAME STARTTIME STAT + 3 oneadmin oneadmin OneKE 1.29 08/30 12:35:21 RUNNING + + +One or more VMs is Ready, but unreachable ++++++++++++++++++++++++++++++++++++++++++ + +In a similar situation as above when `onevm list`` shows all VMs running, but the service is still in ``DEPLOYING`` state, but the VM is not reachable through SSH (e.g. to run the ``onegate vm update`` command). + +In this case, we can try to scale down and up the role of the problematic VM from the Front-end UI: + #. Go to Services in the Front-end UI and select the OneKE Service + #. In the Roles tab choose the problematic VM's role (e.g. worker) + #. Scale the role to 0 + #. Wait until shutdown of the VM, the scaling and cooldown period of the service finishes + #. Scale the role to 1 + #. Verify if the problem is solved and the ``oneflow list`` reports the ``RUNNING`` state