diff --git a/README.rst b/README.rst index 686a01bb98..ab53a625df 100644 --- a/README.rst +++ b/README.rst @@ -90,7 +90,7 @@ On your computer, follow these steps to setup a local repository for working on .. code:: bash $ git clone https://github.com/YOUR_ACCOUNT/cloudstack-documentation.git - $ cd cloudstack-docs-install + $ cd cloudstack-documentation $ git remote add upstream https://github.com/apache/cloudstack-documentation.git $ git checkout main $ git fetch upstream diff --git a/requirements.txt b/requirements.txt index 97918fbdb8..1dbf927199 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,4 @@ docutils==0.20.1 Sphinx==7.2.6 sphinx-rtd-theme==2.0.0 readthedocs-sphinx-ext==2.2.5 -Jinja2==3.1.3 +Jinja2==3.1.5 diff --git a/source/_global.rst b/source/_global.rst index 0e5c15a353..2ba9c14a51 100644 --- a/source/_global.rst +++ b/source/_global.rst @@ -25,19 +25,19 @@ .. Latest version systemvm template name -.. |sysvm64-version| replace:: 4.19.1 -.. |sysvm64-name-xen| replace:: systemvm-xenserver-4.19.1 -.. |sysvm64-name-kvm| replace:: systemvm-kvm-4.19.1 -.. |sysvm64-name-vmware| replace:: systemvm-vmware-4.19.1 -.. |sysvm64-name-hyperv| replace:: systemvm-hyperv-4.19.1 -.. |sysvm64-name-ovm| replace:: systemvm-ovm-4.19.1 +.. |sysvm64-version| replace:: 4.20.0 +.. |sysvm64-name-xen| replace:: systemvm-xenserver-4.20.0-x86_64 +.. |sysvm64-name-kvm| replace:: systemvm-kvm-4.20.0-x86_64 +.. |sysvm64-name-vmware| replace:: systemvm-vmware-4.20.0-x86_64 +.. |sysvm64-name-hyperv| replace:: systemvm-hyperv-4.20.0-x86_64 +.. |sysvm64-name-ovm| replace:: systemvm-ovm-4.20.0-x86_64 .. Latest version systemvm template URL -.. |sysvm64-url-xen| replace:: http://download.cloudstack.org/systemvm/4.19/systemvmtemplate-4.19.1-xen.vhd.bz2 -.. |sysvm64-url-kvm| replace:: http://download.cloudstack.org/systemvm/4.19/systemvmtemplate-4.19.1-kvm.qcow2.bz2 -.. |sysvm64-url-vmware| replace:: http://download.cloudstack.org/systemvm/4.19/systemvmtemplate-4.19.1-vmware.ova -.. |sysvm64-url-hyperv| replace:: http://download.cloudstack.org/systemvm/4.19/systemvmtemplate-4.19.1-hyperv.vhd.zip -.. |sysvm64-url-ovm| replace:: http://download.cloudstack.org/systemvm/4.19/systemvmtemplate-4.19.1-ovm.raw.bz2 +.. |sysvm64-url-xen| replace:: http://download.cloudstack.org/systemvm/4.20/systemvmtemplate-4.20.0.0-x86_64-xen.vhd.bz2 +.. |sysvm64-url-kvm| replace:: http://download.cloudstack.org/systemvm/4.20/systemvmtemplate-4.20.0.0-x86_64-kvm.qcow2.bz2 +.. |sysvm64-url-vmware| replace:: http://download.cloudstack.org/systemvm/4.20/systemvmtemplate-4.20.0.0-x86_64-vmware.ova +.. |sysvm64-url-hyperv| replace:: http://download.cloudstack.org/systemvm/4.20/systemvmtemplate-4.20.0.0-x86_64-hyperv.vhd.zip +.. |sysvm64-url-ovm| replace:: http://download.cloudstack.org/systemvm/4.20/systemvmtemplate-4.20.0.0-x86_64-ovm.raw.bz2 .. Images diff --git a/source/_static/images/B&R-Backup-Respository.png b/source/_static/images/B&R-Backup-Respository.png new file mode 100644 index 0000000000..bd53395842 Binary files /dev/null and b/source/_static/images/B&R-Backup-Respository.png differ diff --git a/source/_static/images/account-limits.png b/source/_static/images/account-limits.png new file mode 100644 index 0000000000..63658b1859 Binary files /dev/null and b/source/_static/images/account-limits.png differ diff --git a/source/_static/images/add-remove-sharedfs-network.png b/source/_static/images/add-remove-sharedfs-network.png new file mode 100644 index 0000000000..8ca212de58 Binary files /dev/null and b/source/_static/images/add-remove-sharedfs-network.png differ diff --git a/source/_static/images/cks-acquire-publicip.png b/source/_static/images/cks-acquire-publicip.png new file mode 100644 index 0000000000..71831b7b70 Binary files /dev/null and b/source/_static/images/cks-acquire-publicip.png differ diff --git a/source/_static/images/cks-addfirewall.png b/source/_static/images/cks-addfirewall.png new file mode 100644 index 0000000000..585e5c2e46 Binary files /dev/null and b/source/_static/images/cks-addfirewall.png differ diff --git a/source/_static/images/cks-addloadbalancer.png b/source/_static/images/cks-addloadbalancer.png new file mode 100644 index 0000000000..ed2f7b4057 Binary files /dev/null and b/source/_static/images/cks-addloadbalancer.png differ diff --git a/source/_static/images/cks-addnode.png b/source/_static/images/cks-addnode.png new file mode 100644 index 0000000000..cabeced27c Binary files /dev/null and b/source/_static/images/cks-addnode.png differ diff --git a/source/_static/images/create-sharedfs-admin.png b/source/_static/images/create-sharedfs-admin.png new file mode 100644 index 0000000000..9eec6c5086 Binary files /dev/null and b/source/_static/images/create-sharedfs-admin.png differ diff --git a/source/_static/images/create-sharedfs.png b/source/_static/images/create-sharedfs.png new file mode 100644 index 0000000000..0a34d2aee8 Binary files /dev/null and b/source/_static/images/create-sharedfs.png differ diff --git a/source/_static/images/create-webhook.png b/source/_static/images/create-webhook.png new file mode 100644 index 0000000000..f1f6286f76 Binary files /dev/null and b/source/_static/images/create-webhook.png differ diff --git a/source/_static/images/default-login.png b/source/_static/images/default-login.png new file mode 100644 index 0000000000..882844c6d2 Binary files /dev/null and b/source/_static/images/default-login.png differ diff --git a/source/_static/images/deploy-vm-arch-types.png b/source/_static/images/deploy-vm-arch-types.png new file mode 100644 index 0000000000..ab16eb5d95 Binary files /dev/null and b/source/_static/images/deploy-vm-arch-types.png differ diff --git a/source/_static/images/dynamic-routing-as-number-ranges.png b/source/_static/images/dynamic-routing-as-number-ranges.png new file mode 100644 index 0000000000..882556b796 Binary files /dev/null and b/source/_static/images/dynamic-routing-as-number-ranges.png differ diff --git a/source/_static/images/dynamic-routing-as-numbers.png b/source/_static/images/dynamic-routing-as-numbers.png new file mode 100644 index 0000000000..fb67649020 Binary files /dev/null and b/source/_static/images/dynamic-routing-as-numbers.png differ diff --git a/source/_static/images/dynamic-routing-bgp-peers.png b/source/_static/images/dynamic-routing-bgp-peers.png new file mode 100644 index 0000000000..ad238bd2b5 Binary files /dev/null and b/source/_static/images/dynamic-routing-bgp-peers.png differ diff --git a/source/_static/images/dynamic-routing-change-network-bgp-peers.png b/source/_static/images/dynamic-routing-change-network-bgp-peers.png new file mode 100644 index 0000000000..2184c00bd6 Binary files /dev/null and b/source/_static/images/dynamic-routing-change-network-bgp-peers.png differ diff --git a/source/_static/images/dynamic-routing-change-vpc-bgp-peers.png b/source/_static/images/dynamic-routing-change-vpc-bgp-peers.png new file mode 100644 index 0000000000..9d3f8ce74a Binary files /dev/null and b/source/_static/images/dynamic-routing-change-vpc-bgp-peers.png differ diff --git a/source/_static/images/edit-user-api-key-access.png b/source/_static/images/edit-user-api-key-access.png new file mode 100644 index 0000000000..e36d6400d7 Binary files /dev/null and b/source/_static/images/edit-user-api-key-access.png differ diff --git a/source/_static/images/filter-user-api-key-access.png b/source/_static/images/filter-user-api-key-access.png new file mode 100644 index 0000000000..d474527a47 Binary files /dev/null and b/source/_static/images/filter-user-api-key-access.png differ diff --git a/source/_static/images/forgot-password.png b/source/_static/images/forgot-password.png new file mode 100644 index 0000000000..4042434809 Binary files /dev/null and b/source/_static/images/forgot-password.png differ diff --git a/source/_static/images/manage-ipv4-subnets-for-networks.png b/source/_static/images/manage-ipv4-subnets-for-networks.png new file mode 100644 index 0000000000..9d29f99251 Binary files /dev/null and b/source/_static/images/manage-ipv4-subnets-for-networks.png differ diff --git a/source/_static/images/manage-ipv4-subnets-for-zone.png b/source/_static/images/manage-ipv4-subnets-for-zone.png new file mode 100644 index 0000000000..d5959a02b8 Binary files /dev/null and b/source/_static/images/manage-ipv4-subnets-for-zone.png differ diff --git a/source/_static/images/management-server-peers.png b/source/_static/images/management-server-peers.png new file mode 100644 index 0000000000..898cb36352 Binary files /dev/null and b/source/_static/images/management-server-peers.png differ diff --git a/source/_static/images/management-server-statistics.png b/source/_static/images/management-server-statistics.png new file mode 100644 index 0000000000..6f18b33c1a Binary files /dev/null and b/source/_static/images/management-server-statistics.png differ diff --git a/source/_static/images/management-servers-list.png b/source/_static/images/management-servers-list.png new file mode 100644 index 0000000000..57d7a97ca9 Binary files /dev/null and b/source/_static/images/management-servers-list.png differ diff --git a/source/_static/images/nsx-phy-networks.png b/source/_static/images/nsx-phy-networks.png new file mode 100644 index 0000000000..d372f6307a Binary files /dev/null and b/source/_static/images/nsx-phy-networks.png differ diff --git a/source/_static/images/nsx-provider.png b/source/_static/images/nsx-provider.png new file mode 100644 index 0000000000..8002d9c988 Binary files /dev/null and b/source/_static/images/nsx-provider.png differ diff --git a/source/_static/images/nsx-public-traffic.png b/source/_static/images/nsx-public-traffic.png new file mode 100644 index 0000000000..77e16931eb Binary files /dev/null and b/source/_static/images/nsx-public-traffic.png differ diff --git a/source/_static/images/reset-password.png b/source/_static/images/reset-password.png new file mode 100644 index 0000000000..61f2f49b87 Binary files /dev/null and b/source/_static/images/reset-password.png differ diff --git a/source/_static/images/restart-sharedfs.png b/source/_static/images/restart-sharedfs.png new file mode 100644 index 0000000000..84981f330d Binary files /dev/null and b/source/_static/images/restart-sharedfs.png differ diff --git a/source/_static/images/routed-add-network-cidrsize.png b/source/_static/images/routed-add-network-cidrsize.png new file mode 100644 index 0000000000..2782ab1683 Binary files /dev/null and b/source/_static/images/routed-add-network-cidrsize.png differ diff --git a/source/_static/images/routed-add-network-offering.png b/source/_static/images/routed-add-network-offering.png new file mode 100644 index 0000000000..6a27164909 Binary files /dev/null and b/source/_static/images/routed-add-network-offering.png differ diff --git a/source/_static/images/routed-add-vpc-offering.png b/source/_static/images/routed-add-vpc-offering.png new file mode 100644 index 0000000000..120d36151c Binary files /dev/null and b/source/_static/images/routed-add-vpc-offering.png differ diff --git a/source/_static/images/routed-ipv4-routes.png b/source/_static/images/routed-ipv4-routes.png new file mode 100644 index 0000000000..bcb4b3c61e Binary files /dev/null and b/source/_static/images/routed-ipv4-routes.png differ diff --git a/source/_static/images/routed-ipv4-routing-firewall.png b/source/_static/images/routed-ipv4-routing-firewall.png new file mode 100644 index 0000000000..30be661ea6 Binary files /dev/null and b/source/_static/images/routed-ipv4-routing-firewall.png differ diff --git a/source/_static/images/sharedfs-access-tab.png b/source/_static/images/sharedfs-access-tab.png new file mode 100644 index 0000000000..97d9b088f3 Binary files /dev/null and b/source/_static/images/sharedfs-access-tab.png differ diff --git a/source/_static/images/template-upload-from-local.png b/source/_static/images/template-upload-from-local.png index e147848b0f..d1632ea6d0 100644 Binary files a/source/_static/images/template-upload-from-local.png and b/source/_static/images/template-upload-from-local.png differ diff --git a/source/_static/images/webhook-deliveries.png b/source/_static/images/webhook-deliveries.png new file mode 100644 index 0000000000..bb49ffb20d Binary files /dev/null and b/source/_static/images/webhook-deliveries.png differ diff --git a/source/_static/images/webhooks.png b/source/_static/images/webhooks.png new file mode 100644 index 0000000000..b8e9ed5c31 Binary files /dev/null and b/source/_static/images/webhooks.png differ diff --git a/source/_static/images/zone-capacities.png b/source/_static/images/zone-capacities.png new file mode 100644 index 0000000000..f218a40681 Binary files /dev/null and b/source/_static/images/zone-capacities.png differ diff --git a/source/adminguide/accounts.rst b/source/adminguide/accounts.rst index cbd705daaa..c66e0f0438 100644 --- a/source/adminguide/accounts.rst +++ b/source/adminguide/accounts.rst @@ -632,7 +632,7 @@ Using OAuth2 Authentication For Users OAuth2, the industry-standard authorization or authentication framework, simplifies the process of granting access to resources. CloudStack supports OAuth2 authentication wherein users can login into -CloudStack without using username and password. CloudStack currently supports Google and Github providers. +CloudStack without using username and password. CloudStack currently supports Google and GitHub providers. Other OAuth2 providers can be easily integrated with CloudStack using its plugin framework. For admins, the following are the settings available at global level to configure OAuth2. @@ -671,12 +671,12 @@ To register the OAuth provider client ID, redirect URI, secret key have to provi OAuth 2.0 has to be first configured in the corresponding provider to obtain the client ID, redirect URI, secret Key. For Google, please follow the instructions mentioned here `"Setting up OAuth 2.0 in Google" `_. -For Github, please follow the instructions mentioned here `"Setting up OAuth 2.0 in Github" `_. +For GitHub, please follow the instructions mentioned here `"Setting up OAuth 2.0 in GitHub" `_. In any OAuth 2.0 configuration admin has to use the redirect URI "http://:/#/verifyOauth" .. Note:: [Google OAuth 2.0 redirect URI] : - Google OAuth 2.0 configuration wont accept '#' in the URI, please use "http://:/?verifyOauth" + Google OAuth 2.0 configuration won't accept '#' in the URI, please use "http://:/?verifyOauth" Google does not accept direct IP address in the redirect URI, it must be a domain. As a workaround one can add the management server IP to host table in the local system and assign a domain, something like "management.cloud". In that redirect URI looks like "http://management.cloud:8080/?verifyOauth" @@ -807,4 +807,153 @@ The admin can also disable 2FA for a User using the action button as shown below If the admin themself loses the authenticator application or forgets the static PIN, then the admin will have to either use apikey to disable 2FA using the API setupUserTwoFactorAuthentication with enable flag to false or to do the database changes in 'user' table by clearing the columns - 'is_user_2fa_enabled', 'key_for_2fa', 'user_2fa_provider' for the specific entry. \ No newline at end of file + 'is_user_2fa_enabled', 'key_for_2fa', 'user_2fa_provider' for the specific entry. + +Password Recovery for Users (Forgot Password) +--------------------------------------------- + +CloudStack supports password recovery using email. To enable this feature, +set global setting `user.password.reset.enabled` to `true`. The following +global settings are available to configure SMTP for password recovery. + + +.. list-table:: Password Recovery Global Settings + :header-rows: 1 + + * - Global setting + - Default + - Description + * - ``user.password.reset.enabled`` + - `false` + - Determines whether password recovery via email is enabled or not. + * - ``user.password.reset.ttl`` + - `30` + - TTL in minutes for the token generated to reset the ACS user's password. + * - ``user.password.reset.email.sender`` + - `null` + - Sender for emails sent to the user to reset ACS user's password + * - ``user.password.reset.smtp.host`` + - `null` + - Host for SMTP server + * - ``user.password.reset.smtp.port`` + - `25` + - Port for SMTP server + * - ``user.password.reset.smtp.useAuth`` + - `false` + - Use auth in the SMTP server + * - ``user.password.reset.smtp.username`` + - `null` + - Username for SMTP server + * - ``user.password.reset.smtp.password`` + - `null` + - Password for SMTP Server + * - ``user.password.reset.mail.template`` + - `Hello {{username}}!` + + `You have requested to reset your password. Please click the following link to reset your password:`` + + `http://{{{resetLink}}}` + + `If you did not request a password reset, please ignore this email.` + + + `Regards,` + + `The CloudStack Team` + - Template of mail sent to the user to reset ACS user's password. This uses + mustache template engine. Available variables are: `username`, + `firstName`, `lastName`, `resetLink`, `token`. + + +Once the global settings are configured, follow the below steps to reset the +password for a user: + +#. Open the "Forgot Password" link on the login page. + + .. figure:: /_static/images/default-login.png + :align: center + +#. Enter your username and domain name and click on "Submit". + + .. figure:: /_static/images/forgot-password.png + :align: center + +#. An email will be sent to the User with a link to reset the password. + +#. Open the link in the email and set the new password. + + .. figure:: /_static/images/reset-password.png + :align: center + +Using API Key and Secret Key based Authentication +------------------------------------------------- +Users can generate API key and Secret key to directly access CloudStack APIs. +This authenctication method is used for programatically calling CloudStack APIs and thus helps in automation. +The API key uniquely identifies the Account, while the Secret key is used to generate a secure singnature. +When making an API call, the API key and signature are included along with the command and other parameters, +and sent to the CloudStack API endpoint. For detailed information, refer to the CloudStack's Programmer Guide. + +Disabling Api Key and Secret Key based Access +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Root Administrators may choose to Disable Api key based access for certain Users, Accounts or Domains. +Or the Administrator may choose to Disable Api Key based access globally and allow only for certain users. +This could be particularly useful in cases where external authorization mechanisms like LDAP, SAML or OAuth2 are used, +as then Api key based authorization is the only means for automation. +This gives control to the Admin over who is allowed to run automation. + +Api key based access is enabled by default but it can be disabled (or enabled) at different granularities: + +1. Users + +Setting for a User can be changed through the Api Key Access field in the Edit User form, visible only to the Root Administrator. +Three values are possible: Disable, Enable and Inherit. Inherit means that the User will inherit whatever value is set for the Account. + + .. figure:: /_static/images/edit-user-api-key-access.png + :align: center + +Admins can also search for Users having the required Api key access value using the User list view search filter. + + .. figure:: /_static/images/filter-user-api-key-access.png + :align: center + +2. Accounts + +Similar to Users, Api Key Access field is present in the Edit Account Form and the Account list view search filter, only for the Root Administrator. +If the value is set to Inherit, it means that Account will inherit whatever value is set for the Domain. + +3. Domains + +Api Key Access at Domain level is controlled by the Domain level setting "api.key.access". If the Domain level +configuration is not set, then similar to other configurations it will consult the global value. + +4. Global + +The global value of the configuration setting "api.key.access" is set to 'True' by default. So Api Key Access at +all levels is enabled by default. If the global value is changed to 'False' without setting any of the lower levels, +then Api Key Access will be disabled for all Users. + +Order of Precedence +^^^^^^^^^^^^^^^^^^^ +The local value always takes precedence over the global value. So if Api key access is disabled for a User but +enabled for an Account, the User authorisation will still fail. Only if the User's Api key access is set to +'Inherit', the Account's Api Key Access value is considered. +Similarly if Account's Api Key Access is set to 'Inherit', only then the Domain level setting is considered, +And only if the Domain level configuration is not set, the Global configuration is considered. + +Examples +^^^^^^^^ + +#. Disallow Api key access for all Accounts and Users in a Domain. + + #. Leave all User and Account level Api Key Access values to the default 'Inherit'. + #. Set the Domain level setting "api.key.access" to False only for the required domain. + +#. Disallow Api key access for some Users, but allowed globally. + + #. Set the User level permission to ‘Disabled’ only for the required Users. + #. All upper level permissions should either be Inherit or Enabled. + +#. Allow Api key access to some Users, but disallowed globally. + + #. Set User level permission to ‘Enabled’ only for the required Users. + #. All upper level permissions should either be Inherit or Disabled. diff --git a/source/adminguide/api.rst b/source/adminguide/api.rst index 2f65f4f9eb..53515d6456 100644 --- a/source/adminguide/api.rst +++ b/source/adminguide/api.rst @@ -64,12 +64,16 @@ the user data: #. Run the following command to find the virtual router. .. code:: bash + # cat /var/lib/dhclient/dhclient-eth0.leases | grep dhcp-server-identifier | tail -1 + #. Access user data by running the following command using the result of the above command .. code:: bash + # curl http://10.1.1.1/latest/user-data + Meta Data can be accessed similarly, using a URL of the form http://10.1.1.1/latest/meta-data/{metadata type}. (For backwards compatibility, the previous URL http://10.1.1.1/latest/{metadata type} @@ -169,7 +173,7 @@ VMdata - a list of String arrays representing [“directory”, “filename”, - meta_data.json - - Network_data.json + - network_data.json - label, which is configurable in global settings: diff --git a/source/adminguide/arch_types.rst b/source/adminguide/arch_types.rst new file mode 100644 index 0000000000..eea29adec8 --- /dev/null +++ b/source/adminguide/arch_types.rst @@ -0,0 +1,30 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information# + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +Hosts/Cluster Arch Types Allocation +=================================== + +Since CloudStack 4.20.0, it is possible to add AMD 64 bits and ARM 64 bits clusters (and hosts). A single zone can contain clusters (and hosts) of different arch types (multi-arch zones). + +When a multi-arch zone is selected for VM deployment, CloudStack allows the users to filter the templates/ISOs by their arch type. + +|deploy-vm-arch-types.png| + +Once a template/ISO is selected, only the clusters (and hosts) matching the arch type will be considered for the VM allocation + +.. |deploy-vm-arch-types.png| image:: /_static/images/deploy-vm-arch-types.png + :alt: Filtering templates and ISOs by arch types + diff --git a/source/adminguide/autoscale_with_virtual_router.rst b/source/adminguide/autoscale_with_virtual_router.rst index b196866885..7e7be108f8 100644 --- a/source/adminguide/autoscale_with_virtual_router.rst +++ b/source/adminguide/autoscale_with_virtual_router.rst @@ -14,7 +14,7 @@ under the License. -Configuring AutoScale with using CloudStack Virtual Router +Configuring AutoScale with the CloudStack Virtual Router ============================================= diff --git a/source/adminguide/backup_and_recovery.rst b/source/adminguide/backup_and_recovery.rst index 2fc82d19da..eb926a345a 100644 --- a/source/adminguide/backup_and_recovery.rst +++ b/source/adminguide/backup_and_recovery.rst @@ -28,13 +28,17 @@ The following providers are currently supported: - VMware with Veeam Backup and Recovery - KVM with DELL EMC Networker +- KVM with NAS B&R Plugin (4.20 onwards) See the Veeam Backup and Recovery plugin documentation for plugin specific information. -:ref:`Veeam Backup and Recovery Plugin` +:ref:`Veeam Backup and Replication Plugin` See the DELL EMC Networker Backup and Recovery plugin documentation for plugin specific information. :ref:`DELL EMC Networker Backup and Recovery Plugin` +See the NAS Backup and Recovery plugin documentation for plugin specific information. +:ref:`NAS Backup and Recovery Plugin` + Backup and Recovery Concepts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -73,7 +77,7 @@ the Global Settings area of the CloudStack UI. Configuration Description ================================= ======================== backup.framework.enabled Setting to enable or disable the feature. Default: false. -backup.framework.provider.plugin The backup provider (plugin) name. For example: 'dummy', 'veeam' and 'networker'. This is a zone specific setting. Default: dummy. +backup.framework.provider.plugin The backup provider (plugin) name. For example: 'dummy', 'veeam', 'networker' and 'nas'. This is a zone specific setting. Default: dummy. backup.framework.sync.interval Background sync task internal in seconds that performs metrics/usage stats collection, backup reconciliation and backup scheduling. Default: 300. ================================= ======================== @@ -88,7 +92,7 @@ Backup Offerings ------------------ Admins can import an external provider's backup offerings using UI or API for a -particular zone, as well as manage a backup offering's lifecyle. Admins can also +particular zone, as well as manage a backup offering's lifecycle. Admins can also specify if a backup offering allows user-defined backup schedules and ad-hoc backups. Users can list and consume the imported backup offerings, only root admins can import or delete offerings. diff --git a/source/adminguide/events.rst b/source/adminguide/events.rst index 7766973507..fcb06047ec 100644 --- a/source/adminguide/events.rst +++ b/source/adminguide/events.rst @@ -73,6 +73,13 @@ in the AMQP server. Additionally, both an in-memory implementation and an Apache Kafka implementation are also available. + +.. note:: + On upgrading from 4.19.x or lower, existing AMQP or Kafka intergration + configurations should be moved from folder + ``/etc/cloudstack/management/META-INF/cloudstack/core`` to + ``/etc/cloudstack/management/META-INF/cloudstack/event`` + Use Cases ~~~~~~~~~ @@ -101,7 +108,7 @@ As a CloudStack administrator, perform the following one-time configuration to enable event notification framework. At run time no changes can control the behaviour. -#. Create the folder ``/etc/cloudstack/management/META-INF/cloudstack/core`` +#. Create the folder ``/etc/cloudstack/management/META-INF/cloudstack/event`` #. Inside that folder, open ``spring-event-bus-context.xml``. @@ -203,6 +210,23 @@ changes can control the behaviour. #. Restart the Management Server. +#. CloudStack creates the exchange ‘cloudstack-events’ which will receive messages containing CloudStack events; however will be no queues created. + + To create a queue and bind with cloudstack-events the following steps are needed: + + - Go to Queues tab and add a queue, e.g. 'cloudstack-queue’ + - Go to Exchanges tab and Bind to queue cloudstack-queue with the desired ‘Routing key’. + + +#. Routing keys + + The routing key is a list of words, delimited by a period ("."). CloudStack builds routing keys according to each event type, some examples are: + + Some example of routing keys that match CloudStack events: + - A pound symbol (“#”) indicates a match on zero or more words; thus, it will match any possible set of words; + - Asterisk (“*”) matching any word and the period (“.”) delimiting example '\*.*.*.*.*' + + Kafka Configuration ~~~~~~~~~~~~~~~~~~~ @@ -214,9 +238,22 @@ changes can control the behaviour. which contains valid kafka configuration properties as documented in http://kafka.apache.org/documentation.html#newproducerconfigs The properties may contain an additional ``topic`` property which if not provided will default to ``cloudstack``. While ``key.serializer`` and ``value.serializer`` are usually required for a producer to correctly start, they may be omitted and - will default to ``org.apache.kafka.common.serialization.StringSerializer``. + will default to ``org.apache.kafka.common.serialization.StringSerializer``. A sample example which will be used by cloudstack for exporting of events -#. Create the folder ``/etc/cloudstack/management/META-INF/cloudstack/core`` + .. parsed-literal:: + + cat /etc/cloudstack/management/kafka.producer.properties + + bootstrap.servers=:9092 + acks=all + topic=cs + retries=1 + + + + + +#. Create the folder ``/etc/cloudstack/management/META-INF/cloudstack/event`` #. Inside that folder, open ``spring-event-bus-context.xml``. @@ -366,3 +403,9 @@ Procedure date. #. Click OK. + + +Webhooks +-------- + +.. include:: events/webhooks.rst diff --git a/source/adminguide/events/webhooks.rst b/source/adminguide/events/webhooks.rst new file mode 100644 index 0000000000..e9705ea613 --- /dev/null +++ b/source/adminguide/events/webhooks.rst @@ -0,0 +1,199 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information# + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +Webhooks allow external services to be notified when certain events happen. +CloudStack allows provisioning webhooks for all account roles and for various +scopes. +This allows users to consume event notifications without any external services +such as an event streaming platforms. + +Webhooks can be managed using both API and UI. CloudStack provides following +APIs for webhhoks: + + .. cssclass:: table-striped table-bordered table-hover + + ====================== =========================== + API Description + ====================== =========================== + createWebhook Creates a Webhook + listWebhooks Lists Webhooks + updateWebhook Updates a Webhook + deleteWebhook Deletes a Webhook + listWebhookDeliveries Lists Webhook deliveries + deleteWebhookDelivery Deletes Webhook delivery(s) + executeWebhookDelivery Executes a Webhook delivery + ====================== =========================== + +In the UI, webhooks can be managed under *Tools > Webhhooks* menu. + + |webhooks.png| + + +Creating a webhook +~~~~~~~~~~~~~~~~~~ + +Any CloudStack user having createWebhook API access can create a new webhook +for the event notifications. + +To create a webhook: + +#. Log in to the CloudStack UI. + +#. In the left navigation bar, click Tools and choose Webhooks. + +#. Click Create Webhook. + +#. In the dialog, make the following choices: + + - **Name**. Any desired name for the webhook. + + - **Description**. A short description of the webhook. + + - **Scope**. (Available only for ROOT admins or domain admins). Scope + of the webhook. The value can be Local, Domain or Global. + Local - only events associated with the owner account will be notified. + Domain - events associated with domain will be notified. + Global - all events will be notified. This is available only for ROOT + admin account. + For a normal user account, webhooks can be created with Local scope + only. + + - **Domain**. An optional domain for the Webhook. If the account parameter + is used, domain must also be used. + + - **Account**. An optional account for the webhook. Must be used with + domain. + + - **Payload URL**. The payload URL of the Webhook. All events for the + webhook will posted on this URL. + + - **SSL Verification**. An optional parameter to specify whether the HTTP + POST requests for event notifications must be sent with strict SSL + verification request when a HTTPS payload URL is used. + + - **Secret Key**. An option secret key parameter which can be used to sign + the HTTP POST requests for event notifications with HMAC. + + - **Enabled**. To specify whether the webhook be created with enabled or + disabled state + + |create-webhook.png| + + +Working with webhook deliveries +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +CloudStack attempts webhook deliveries using a thread pool with given retries. +The following global configuration can be used to configure thread pool size +for deliveries: + + - **webhook.delivery.thread.pool.size**: Size of the thread pool for webhook + deliveries. + + +Also, the number attempts for a particular event notification and the timeout +for one particular attempt can be configured using the following domain-level +configurations: + + - **webhook.delivery.retries**: Number of tries to be made for a webhook + delivery. + + - **webhook.delivery.timeout**: Wait timeout (in seconds) for a webhook + delivery attempt. + +.. note:: + The onus of dealing with the duplicate event deliveries lies with the payload + server or application. During delivery, when the server doesn't respond in a + timely manner or returns a failure CloudStack will re-attempt the delivery of + the event, based on the above global settings, irrespective of the fact whether + the server already received the event in any previous attempts. + + +CloudStack allows retrieving recent deliveries for a webhook with details such +as event, headers, payload, response, success, duration, etc. +In the UI, these can be accessed under Recent deliveries tab in the Webhook +detail view. +The user can redeliver an existing delivery. To check the working of the +webhook consumer test deliveries can made. Test deliveries are not recorded +by CloudStack. + + |webhook-deliveries.png| + +The administrator can configure storage of webhook deliveries using the +following global configurations: + + - **webhook.deliveries.limit**: Limit for number of deliveries to keep + in DB per webhook. Default value is 10. + + - **webhook.deliveries.cleanup.interval**: Interval (in seconds) for + cleaning up webhook deliveries. Default value is 3600 or 1 hour. + + - **webhook.deliveries.cleanup.initial.delay**: Initial delay (in seconds) + for webhook deliveries cleanup task. Default value is 180. + +Based on the above configurations CloudStack will purge older deliveries in +the database using a repeatedly running task. + +For a webhook delivery, CloudStack sends a HTTP POST request with event data +as the payload. The following custom headers are sent with the request: + + - **X-CS-Event-ID**. Event ID for which the webhook delivery is made. + + - **X-CS-Event**. Event for for which the webhook delivery is made. + + - **User-Agent**. In the format - *CS-Hookshot/*. Here + ACCOUNT_ID is the ID of the account which triggered the event. + + - **X-CS-Signature**. HMAC SHA256 signature created using the webhook + secret key and the delivery payload. It is sent only when secret key + is specified for the webhook. + + +Working with HTTPS webhook payload URL with self-signed certificate +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Generate a self signed certificate for the server, make sure to mention the IP address of the server when it prompts. + + .. parsed-literal:: + + openssl req -x509 -newkey rsa:4096 -nodes -out cert.pem -keyout key.pem -days 365 + +#. Copy the generated cert.pem to the management server(s). + +#. Import the certificate for JDK on the management server(s) + + .. parsed-literal:: + + cp /etc/java/java-17-openjdk/java-17-openjdk-17.0.10.0.7-2.0.1.el8.x86_64/lib/security/cacerts /etc/java/java-17-openjdk/java-17-openjdk-17.0.10.0.7-2.0.1.el8.x86_64/lib/security/jssecacerts + + keytool -importcert -file /root/kiran/cert.pem -alias webhook -keystore /etc/java/java-17-openjdk/java-17-openjdk-17.0.10.0.7-2.0.1.el8.x86_64/lib/security/jssecacerts -storepass changeit + +4. Test the webhook. + + + +.. Images + + +.. |webhooks.png| image:: /_static/images/webhooks.png +.. |create-webhook.png| image:: /_static/images/create-webhook.png +.. |webhook-deliveries.png| image:: /_static/images/webhook-deliveries.png + + + + + + diff --git a/source/adminguide/host_and_storage_tags.rst b/source/adminguide/host_and_storage_tags.rst index 1f909f801e..984120f120 100644 --- a/source/adminguide/host_and_storage_tags.rst +++ b/source/adminguide/host_and_storage_tags.rst @@ -23,32 +23,81 @@ Host tags --------- Host tags are responsible for directing VMs to compatible hosts. They are validated with the host tags informed in the compute offerings or in the system offerings. +There are two types of host tags: + +- (Explicit) host tags: the host tags are managed by CloudStack, including the flexible host tags. Cloud operator can set, update, and delete host tags via CloudStack API or GUI. +- Implicit host tags: the host tags are not managed by CloudStack API. For more information, see section `“Implicit host tags” `_. + To explain the behavior of host tags, some examples will be demonstrated with two hosts (Host1 and Host2): #. Tag setup: + * Host1: h1 * Host2: h2 * Offering: h1 + When a VM is created with the offering, the deployment will be carried out on Host1, as it is the one that has the tag compatible with the offering. #. Tag setup: + * Host1: h1 * Host2: h2,h3 * Offering: h3 + Hosts and offerings accept a list of tags, with comma (,) being their separator. So in this example, Host2 has the h2 and h3 tags. When a VM is created with the offering, the deployment will be carried out on Host2, as it is the one that has the tag compatible with the offering. #. Tag setup: + * Host1: h1 * Host2: h2,h3 * Offering: (no tag) + When the offering does not have tags, it will be possible to deploy the VM on any host. #. Tag setup: + * Host1: (no tag) * Host2: h2 * Offering: h3 + None of the hosts have compatible tags and it will not be possible to deploy a VM with the offering. However, CloudStack ignores this behavior when a host is manually selected. +.. _strict-host-tags: + +Strict Host Tags +----------------- +During certain operations, such as changing the compute offering or starting or +live migrating an instance to a specific host, CloudStack may ignore the host +tags. This behavior is intentional and is designed to provide flexibility in +resource allocation. However, in some cases, this can lead to instances being +deployed on undesired hosts. + +To address this, CloudStack introduces an add-on feature that allows administrators +to enforce tag checks during these operations. By specifying the required tags +in the global configuration `vm.strict.host.tags`, CloudStack will ensure that +the specified tags must match during the operations. If any of the specified +tags do not match, the operation will fail. + +If `resource.limit.host.tags` are defined and +`vm.strict.resource.limit.host.tag.check` is set to true, the tags defined in +`resource.limit.host.tags` are included with the `vm.strict.host.tags`. + +.. list-table:: Strict host tags related global settings + :header-rows: 1 + + * - Parameter + - Default + - Description + * - ``vm.strict.host.tags`` + - empty + - A comma-separated list of tags which must match during operations like + modifying the compute offering for an instance, and starting or live + migrating an instance to a specific host. + * - ``vm.strict.resource.limit.host.tag.check`` + - `true` + - If set to true, tags specified in `resource.limit.host.tags` are also + included in `vm.strict.host.tags`. + Storage tags ------------ Storage tags are responsible for directing volumes to compatible primary storage. They are validated with the storage tags entered in the disk offerings or system offerings. @@ -56,29 +105,37 @@ Storage tags are responsible for directing volumes to compatible primary storage To explain the behavior of storage tags, some examples will be demonstrated: #. Tag setup: + * Storage: A * Offering: A,B + Storage and offering accept a list of tags, with the comma (,) being their separator. Therefore, in this example, the offering has tags A and B. In this example, it will not be possible to allocate the volume, as all the offering tags must exist in the storage. Although the storage has the A tag, it does not have the B tag. #. Tag setup: + * Storage: A,B,C,D,X * Offering: A,B,C + In this example, it will be possible to allocate the volume, as all the offering tags exist in the storage. #. Tag setup: + * Storage: A, B, C * Offering: (no tag) + In this example, it will be possible to allocate the volume, as the offering does not have any tag requirements. #. Tag setup: + * Storage: (no tag) * Offering: D,E + In this example, it will not be possible to allocate the volume, as the storage does not have tags, therefore it does not meet the offering requirements. In short, if the offering has tags, the storage will need to have all the tags for the volume to be allocated. If the offering does not have tags, the volume can be allocated, regardless of whether the storage has a tag or not. Flexible Tags --------------- +------------- When defining tags for a resource (a host, for example), offerings with those tags will be directed to that resource. However, offerings without tags can also be targeted to it. So, even after adding tags to a resource with the intention of making it exclusive to certain types of offerings, this exclusivity can be ignored. Furthermore, the standard tag system only allows the user to enter a simple list of tags, without the possibility of creating more complex rules, such as checking whether the offering has certain pairs of tags. @@ -90,3 +147,33 @@ Configuring flexible tags on hosts is carried out through the ``updateHost`` API It is worth mentioning that the compute offering or disk offering tags are injected in list format. Thus, when validating an offering with tags ``A, B``, during processing, there will be the variable ``tags``, where ``tags[0]`` will be tag A, and ``tags[1]`` will be tag B. It's also important to mention that flexible tags are not compatible with quota's activation rules. + +Implicit Host Tags +------------------ +In Apache CloudStack 4.19 and prior, cloud operators are only able to set tags of host via Cloudstack API or on CloudStack GUI. + +Implicit host tags feature is supported since Apache CloudStack 4.20. With the feature, Cloud operators can easily set the +implicit host tags per host based on the server configurations. For example, based on the following hardware devices and +software which can be fetched by commands, scripts or tools: + +- CPU architecture and model +- Network card type and speed +- Hard disk type and raid type +- GPU model +- OS distribution and version + +To set it, please add the following line to /etc/cloudstack/agent/agent.properties and restart cloudstack-agent. + +.. parsed-literal:: + host.tags= + +Cloud operators can also get the information and set the implicit host tags by automation tools (chef, ansible, puppet, etc). + +.. note:: + - Implicit host tags are only configurable on KVM hosts. They are not managed by CloudStack API. + + - Implicit host tags are not compatible with flexible host tags. + + - Flexible host tags and host tags managed by CloudStack API are explicit tags. + + - Explicit and implicit host tags have no difference in VM instance deployment and migration. diff --git a/source/adminguide/hosts.rst b/source/adminguide/hosts.rst index fee3cd32af..9afdb50297 100644 --- a/source/adminguide/hosts.rst +++ b/source/adminguide/hosts.rst @@ -164,7 +164,7 @@ migrated to other Hosts. To remove a Host from the cloud: #. Use the UI option to remove the node. - Then you may power down the Host, re-use its IP address, re-install + Then you may power down the Host, reuse its IP address, re-install it, etc @@ -472,7 +472,7 @@ To change the over-provisioning factors for an existing cluster: #. Fill in your desired over-provisioning multipliers in the fields CPU overcommit factor and RAM overcommit factor. The value which is - intially shown in these fields is the default value inherited from + initially shown in these fields is the default value inherited from the global configuration settings. .. note:: @@ -595,7 +595,7 @@ The former behaviour also is supported — VLAN is randomly allocated to a network from the VNET range of the physical network when the network turns to Implemented state. The VLAN is released back to the VNET pool when the network shuts down as a part of the Network Garbage Collection. -The VLAN can be re-used either by the same network when it is +The VLAN can be reused either by the same network when it is implemented again, or by any other network. On each subsequent implementation of a network, a new VLAN can be assigned. @@ -676,7 +676,7 @@ management server(s). The ``outofbandmanagement.sync.poolsize`` is the maximum number of ipmitool background power state scanners that can run at a time. Based on the maximum number of hosts you've, you can increase/decrease the value depending on how much -stress your management server host can endure. It will take atmost number of +stress your management server host can endure. It will take at most number of total out-of-band-management enabled hosts in a round * ``outofbandmanagement.action.timeout`` / ``outofbandmanagement.sync.poolsize`` seconds to complete a background power-state sync scan in a single round. @@ -703,7 +703,7 @@ power management actions but in the UI a warning is displayed. Security -------- -Starting 4.11, CloudStack has an inbuilt certicate authority (CA) framework and +Starting 4.11, CloudStack has an inbuilt certificate authority (CA) framework and a default 'root' CA provider which acts as a self-signed CA. The CA framework participates in certificate issuance, renewal, revocation, and propagation of certificates during setup of a host. This framework is primary used to diff --git a/source/adminguide/index.rst b/source/adminguide/index.rst index f597cdda80..720e0cda60 100644 --- a/source/adminguide/index.rst +++ b/source/adminguide/index.rst @@ -139,6 +139,7 @@ Managing VM and Volume Allocation :maxdepth: 4 host_and_storage_tags + arch_types Managing Networks and Traffic ----------------------------- diff --git a/source/adminguide/locale/pot/hosts.pot b/source/adminguide/locale/pot/hosts.pot index 5ef7fbe455..1678b60fab 100644 --- a/source/adminguide/locale/pot/hosts.pot +++ b/source/adminguide/locale/pot/hosts.pot @@ -262,7 +262,7 @@ msgstr "" #: ../../hosts.rst:171 # 400a182ceace4cef87ffe6c731ea45cd -msgid "Then you may power down the Host, re-use its IP address, re-install it, etc" +msgid "Then you may power down the Host, reuse its IP address, re-install it, etc" msgstr "" #: ../../hosts.rst:176 @@ -545,7 +545,7 @@ msgstr "" #: ../../hosts.rst:410 # 4574765089c64df0a53ffd4b0d9052a0 -msgid "Fill in your desired over-provisioning multipliers in the fields CPU overcommit factor and RAM overcommit factor. The value which is intially shown in these fields is the default value inherited from the global configuration settings." +msgid "Fill in your desired over-provisioning multipliers in the fields CPU overcommit factor and RAM overcommit factor. The value which is initially shown in these fields is the default value inherited from the global configuration settings." msgstr "" #: ../../hosts.rst:421 @@ -772,7 +772,7 @@ msgstr "" #: ../../hosts.rst:530 # 47af367fd0e74e9c98c07d5fd93d9a6a -msgid "The former behaviour also is supported — VLAN is randomly allocated to a network from the VNET range of the physical network when the network turns to Implemented state. The VLAN is released back to the VNET pool when the network shuts down as a part of the Network Garbage Collection. The VLAN can be re-used either by the same network when it is implemented again, or by any other network. On each subsequent implementation of a network, a new VLAN can be assigned." +msgid "The former behaviour also is supported — VLAN is randomly allocated to a network from the VNET range of the physical network when the network turns to Implemented state. The VLAN is released back to the VNET pool when the network shuts down as a part of the Network Garbage Collection. The VLAN can be reused either by the same network when it is implemented again, or by any other network. On each subsequent implementation of a network, a new VLAN can be assigned." msgstr "" #: ../../hosts.rst:538 diff --git a/source/adminguide/locale/pot/management.pot b/source/adminguide/locale/pot/management.pot index cf7bfff0a7..04596b1bc7 100644 --- a/source/adminguide/locale/pot/management.pot +++ b/source/adminguide/locale/pot/management.pot @@ -298,7 +298,7 @@ msgstr "" #: ../../management.rst:345 # 653fb8fc18ea4f17ab01fe630ed6783b -msgid "CloudStack generates a syslog message for every alert. Each syslog message incudes the fields alertType, message, podId, dataCenterId, and clusterId, in the following format. If any field does not have a valid value, it will not be included." +msgid "CloudStack generates a syslog message for every alert. Each syslog message includes the fields alertType, message, podId, dataCenterId, and clusterId, in the following format. If any field does not have a valid value, it will not be included." msgstr "" #: ../../management.rst:354 diff --git a/source/adminguide/locale/pot/networking/inter_vlan_routing.pot b/source/adminguide/locale/pot/networking/inter_vlan_routing.pot index d7b6b8009d..bc8643d755 100644 --- a/source/adminguide/locale/pot/networking/inter_vlan_routing.pot +++ b/source/adminguide/locale/pot/networking/inter_vlan_routing.pot @@ -38,7 +38,7 @@ msgstr "" #: ../../networking/inter_vlan_routing.rst:37 # dca4c4be393c4d7bbcda75f49ffc8efc -msgid "The administrator can deploy a set of VLANs and allow users to deploy VMs on these VLANs. A guest VLAN is randomly alloted to an account from a pre-specified set of guest VLANs. All the VMs of a certain tier of an account reside on the guest VLAN allotted to that account." +msgid "The administrator can deploy a set of VLANs and allow users to deploy VMs on these VLANs. A guest VLAN is randomly allotted to an account from a pre-specified set of guest VLANs. All the VMs of a certain tier of an account reside on the guest VLAN allotted to that account." msgstr "" #: ../../networking/inter_vlan_routing.rst:43 diff --git a/source/adminguide/locale/pot/networking/ip_reservation_in_guest_networks.pot b/source/adminguide/locale/pot/networking/ip_reservation_in_guest_networks.pot index 4d28e100e1..315d15163e 100644 --- a/source/adminguide/locale/pot/networking/ip_reservation_in_guest_networks.pot +++ b/source/adminguide/locale/pot/networking/ip_reservation_in_guest_networks.pot @@ -68,7 +68,7 @@ msgstr "" #: ../../networking/ip_reservation_in_guest_networks.rst:60 # 0710411bb18c4764970dffcc49fe5dab -msgid "You cannot apply IP Reservation if any VM is alloted with an IP address that is outside the Guest VM CIDR." +msgid "You cannot apply IP Reservation if any VM is allotted with an IP address that is outside the Guest VM CIDR." msgstr "" #: ../../networking/ip_reservation_in_guest_networks.rst:63 diff --git a/source/adminguide/locale/pot/networking/virtual_private_cloud_config.pot b/source/adminguide/locale/pot/networking/virtual_private_cloud_config.pot index 9f29a3ac43..2bb1162f05 100644 --- a/source/adminguide/locale/pot/networking/virtual_private_cloud_config.pot +++ b/source/adminguide/locale/pot/networking/virtual_private_cloud_config.pot @@ -1547,7 +1547,7 @@ msgstr "" #: ../../networking/virtual_private_cloud_config.rst:1165 # 2e6d402ef3d044488597c6501ca6749d # f974a014675745ceae4ebb19113bc49e -msgid "Naviagte to Service Offerings and choose Network OfferingPublic IP Addresses." +msgid "Navigate to Service Offerings and choose Network OfferingPublic IP Addresses." msgstr "" #: ../../networking/virtual_private_cloud_config.rst:961 diff --git a/source/adminguide/locale/pot/networking2.pot b/source/adminguide/locale/pot/networking2.pot index 14666aa3e7..8f769f4eea 100644 --- a/source/adminguide/locale/pot/networking2.pot +++ b/source/adminguide/locale/pot/networking2.pot @@ -970,7 +970,7 @@ msgstr "" #: ../../networking2.rst:637 # 7cd262eb2864435f953ba2f8d9b2e0a0 -msgid "You cannot apply IP Reservation if any VM is alloted with an IP address that is outside the Guest VM CIDR." +msgid "You cannot apply IP Reservation if any VM is allotted with an IP address that is outside the Guest VM CIDR." msgstr "" #: ../../networking2.rst:642 @@ -5097,7 +5097,7 @@ msgstr "" #: ../../networking2.rst:4764 # 9914db80271c45879b3763424ebbbe4e -msgid "The administrator can deploy a set of VLANs and allow users to deploy VMs on these VLANs. A guest VLAN is randomly alloted to an account from a pre-specified set of guest VLANs. All the VMs of a certain tier of an account reside on the guest VLAN allotted to that account." +msgid "The administrator can deploy a set of VLANs and allow users to deploy VMs on these VLANs. A guest VLAN is randomly allotted to an account from a pre-specified set of guest VLANs. All the VMs of a certain tier of an account reside on the guest VLAN allotted to that account." msgstr "" #: ../../networking2.rst:4770 @@ -6272,7 +6272,7 @@ msgstr "" #: ../../networking2.rst:6482 # bfaeb8a949fb4d25a618540dcc365471 # 6c6e4ac1110442ba9ec325328e96bfb8 -msgid "Naviagte to Service Offerings and choose Network OfferingPublic IP Addresses." +msgid "Navigate to Service Offerings and choose Network OfferingPublic IP Addresses." msgstr "" #: ../../networking2.rst:6185 diff --git a/source/adminguide/locale/pot/networking_and_traffic.pot b/source/adminguide/locale/pot/networking_and_traffic.pot index bad4a1a647..619619d8ee 100644 --- a/source/adminguide/locale/pot/networking_and_traffic.pot +++ b/source/adminguide/locale/pot/networking_and_traffic.pot @@ -970,7 +970,7 @@ msgstr "" #: ../../networking/ip_reservation_in_guest_networks.rst:60 # 0e5b7bff020d494b9f4e85c641380036 -msgid "You cannot apply IP Reservation if any VM is alloted with an IP address that is outside the Guest VM CIDR." +msgid "You cannot apply IP Reservation if any VM is allotted with an IP address that is outside the Guest VM CIDR." msgstr "" #: ../../networking/ip_reservation_in_guest_networks.rst:63 @@ -5088,7 +5088,7 @@ msgstr "" #: ../../networking/inter_vlan_routing.rst:37 # 3e6de8dbeba5419abdb2b03019116141 -msgid "The administrator can deploy a set of VLANs and allow users to deploy VMs on these VLANs. A guest VLAN is randomly alloted to an account from a pre-specified set of guest VLANs. All the VMs of a certain tier of an account reside on the guest VLAN allotted to that account." +msgid "The administrator can deploy a set of VLANs and allow users to deploy VMs on these VLANs. A guest VLAN is randomly allotted to an account from a pre-specified set of guest VLANs. All the VMs of a certain tier of an account reside on the guest VLAN allotted to that account." msgstr "" #: ../../networking/inter_vlan_routing.rst:43 @@ -6263,7 +6263,7 @@ msgstr "" #: ../../networking/virtual_private_cloud_config.rst:1165 # d6358f1cb80b45c6becf012d6670f0ff # 19877c93762c4d95b38bfafc90fc110c -msgid "Naviagte to Service Offerings and choose Network OfferingPublic IP Addresses." +msgid "Navigate to Service Offerings and choose Network OfferingPublic IP Addresses." msgstr "" #: ../../networking/virtual_private_cloud_config.rst:963 diff --git a/source/adminguide/locale/pot/service_offerings.pot b/source/adminguide/locale/pot/service_offerings.pot index 6edb5a3e18..ed535bc4f2 100644 --- a/source/adminguide/locale/pot/service_offerings.pot +++ b/source/adminguide/locale/pot/service_offerings.pot @@ -416,7 +416,7 @@ msgstr "" #: ../../service_offerings.rst:304 # 665e3b5c270e42979195e2837591595b -msgid "Custom IOPS. If checked, the user can set their own IOPS. If not checked, the root administrator can define values. If the root admin does not set values when using storage QoS, default values are used (the defauls can be overridden if the proper parameters are passed into CloudStack when creating the primary storage in question)." +msgid "Custom IOPS. If checked, the user can set their own IOPS. If not checked, the root administrator can define values. If the root admin does not set values when using storage QoS, default values are used (the defaults can be overridden if the proper parameters are passed into CloudStack when creating the primary storage in question)." msgstr "" #: ../../service_offerings.rst:311 diff --git a/source/adminguide/locale/pot/systemvm.pot b/source/adminguide/locale/pot/systemvm.pot index 63cea3fd24..956c2fdabc 100644 --- a/source/adminguide/locale/pot/systemvm.pot +++ b/source/adminguide/locale/pot/systemvm.pot @@ -103,7 +103,7 @@ msgstr "" #: ../../systemvm.rst:66 # 6ea0f846b0a34711b7c0090cb48d8c32 -msgid "http://download.cloud.com/templates/4.2/64bit/systemvmtemplate64-2013-07-15-master-xen.vhd.bz2" +msgid "http://download.cloudstack.org/templates/4.2/64bit/systemvmtemplate64-2013-07-15-master-xen.vhd.bz2" msgstr "" #: ../../systemvm.rst:67 @@ -113,7 +113,7 @@ msgstr "" #: ../../systemvm.rst:67 # fe251bca2b854129890cba8e7ac9bbbf -msgid "http://download.cloud.com/templates/4.2/64bit/systemvmtemplate64-2013-07-15-master-kvm.qcow2.bz2" +msgid "http://download.cloudstack.org/templates/4.2/64bit/systemvmtemplate64-2013-07-15-master-kvm.qcow2.bz2" msgstr "" #: ../../systemvm.rst:70 diff --git a/source/adminguide/locale/pot/templates.pot b/source/adminguide/locale/pot/templates.pot index be9832fd02..e29f6e7beb 100644 --- a/source/adminguide/locale/pot/templates.pot +++ b/source/adminguide/locale/pot/templates.pot @@ -1160,7 +1160,7 @@ msgstr "" #: ../../templates.rst:1039 # 7dfc38f9a99c4d0bbb5007329350693e -msgid "`http://download.cloud.com/templates/4.2/bindir/cloud-set-guest-password.in `_" +msgid "`http://download.cloudstack.org/templates/4.2/bindir/cloud-set-guest-password.in `_" msgstr "" #: ../../templates.rst:1042 diff --git a/source/adminguide/locale/pot/troubleshooting.pot b/source/adminguide/locale/pot/troubleshooting.pot index 979a74e127..33da45dc8c 100644 --- a/source/adminguide/locale/pot/troubleshooting.pot +++ b/source/adminguide/locale/pot/troubleshooting.pot @@ -283,7 +283,7 @@ msgstr "" #: ../../troubleshooting.rst:244 # 5f383b9190f34ebcbdd6bb92b713ee21 -msgid "Below are a few troubleshooting steps to check whats going wrong with your network..." +msgid "Below are a few troubleshooting steps to check what's going wrong with your network..." msgstr "" #: ../../troubleshooting.rst:249 @@ -308,7 +308,7 @@ msgstr "" #: ../../troubleshooting.rst:271 # 417a511656394e62ab6533726322a54e -msgid "If the pings dont work, run *tcpdump(8)* all over the place to check who is gobbling up the packets. Ultimately, if the switches are not configured correctly, CloudStack networking wont work so fix the physical networking issues before you proceed to the next steps" +msgid "If the pings dont work, run *tcpdump(8)* all over the place to check who is gobbling up the packets. Ultimately, if the switches are not configured correctly, CloudStack networking won't work so fix the physical networking issues before you proceed to the next steps" msgstr "" #: ../../troubleshooting.rst:276 @@ -333,7 +333,7 @@ msgstr "" #: ../../troubleshooting.rst:321 # d7be5d89abc2416a81c2e11ae80e5c5e -msgid "KVM traffic labels require to be named as *\"cloudbr0\"*, *\"cloudbr2\"*, *\"cloudbrN\"* etc and the corresponding bridge must exist on the KVM hosts. If you create labels/bridges with any other names, CloudStack (atleast earlier versions did) seems to ignore them. CloudStack does not create the physical bridges on the KVM hosts, you need to create them **before** before adding the host to Cloudstack." +msgid "KVM traffic labels require to be named as *\"cloudbr0\"*, *\"cloudbr2\"*, *\"cloudbrN\"* etc and the corresponding bridge must exist on the KVM hosts. If you create labels/bridges with any other names, CloudStack (at least earlier versions did) seems to ignore them. CloudStack does not create the physical bridges on the KVM hosts, you need to create them **before** before adding the host to Cloudstack." msgstr "" #: ../../troubleshooting.rst:340 @@ -348,7 +348,7 @@ msgstr "" #: ../../troubleshooting.rst:385 # e75bf706d6a745c9a94ee34516e86d1f -msgid "The Internet would be accessible from both the SSVM and CPVM instances by default. Their public IPs will also be directly pingable from the Internet. Please note that these test would work only if your switches and traffic labels are configured correctly for your environment. If your SSVM/CPVM cant reach the Internet, its very unlikely that the Virtual Router (VR) can also the reach the Internet suggesting that its either a switching issue or incorrectly assigned traffic labels. Fix the SSVM/CPVM issues before you debug VR issues." +msgid "The Internet would be accessible from both the SSVM and CPVM instances by default. Their public IPs will also be directly pingable from the Internet. Please note that these test would work only if your switches and traffic labels are configured correctly for your environment. If your SSVM/CPVM can't reach the Internet, its very unlikely that the Virtual Router (VR) can also the reach the Internet suggesting that its either a switching issue or incorrectly assigned traffic labels. Fix the SSVM/CPVM issues before you debug VR issues." msgstr "" #: ../../troubleshooting.rst:417 @@ -358,12 +358,12 @@ msgstr "" #: ../../troubleshooting.rst:432 # fd961e75e43d4c48a4b779ef136e1d12 -msgid "However, the Virtual Router's (VR) Source NAT Public IP address **WONT** be reachable until appropriate Ingress rules are in place. You can add *Ingress* rules under *Network, Guest Network, IP Address, Firewall* setting page." +msgid "However, the Virtual Router's (VR) Source NAT Public IP address **WON'T** be reachable until appropriate Ingress rules are in place. You can add *Ingress* rules under *Network, Guest Network, IP Address, Firewall* setting page." msgstr "" #: ../../troubleshooting.rst:439 # 7a1ba3d03cd64a0cb60486d361453ebd -msgid "The VM Instances by default wont be able to access the Internet. Add Egress rules to permit traffic." +msgid "The VM Instances by default won't be able to access the Internet. Add Egress rules to permit traffic." msgstr "" #: ../../troubleshooting.rst:444 @@ -378,6 +378,6 @@ msgstr "" #: ../../troubleshooting.rst:454 # 5fff1dc7083a4412a9e4051f2e239180 -msgid "This section was contibuted by Shanker Balan and was originally published on `Shapeblue's blog `_" +msgid "This section was contributed by Shanker Balan and was originally published on `Shapeblue's blog `_" msgstr "" diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/hosts.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/hosts.po index 7b0786ac75..8b8abb5127 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/hosts.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/hosts.po @@ -317,7 +317,7 @@ msgstr "使用UI选项来移除主机。" # 400a182ceace4cef87ffe6c731ea45cd #: ../../hosts.rst:171 msgid "" -"Then you may power down the Host, re-use its IP address, re-install it, etc" +"Then you may power down the Host, reuse its IP address, re-install it, etc" msgstr "然后你可以关掉主机,重用它的IP地址,重新安装系统,等等。" # b9297a05564a41f8aa6995f8f1e2265a @@ -702,7 +702,7 @@ msgstr "选择你要操作的群集,点击编辑按钮。" #: ../../hosts.rst:410 msgid "" "Fill in your desired over-provisioning multipliers in the fields CPU " -"overcommit factor and RAM overcommit factor. The value which is intially shown" +"overcommit factor and RAM overcommit factor. The value which is initially shown" " in these fields is the default value inherited from the global " "configuration settings." msgstr "在CPU overcommit ratio和RAM overcommit ratio区域里填入你希望的超配系数。这里的初始值是从全局配置设置里继承而来的。" @@ -988,7 +988,7 @@ msgid "" "network from the VNET range of the physical network when the network turns " "to Implemented state. The VLAN is released back to the VNET pool when the " "network shuts down as a part of the Network Garbage Collection. The VLAN can" -" be re-used either by the same network when it is implemented again, or by " +" be reused either by the same network when it is implemented again, or by " "any other network. On each subsequent implementation of a network, a new " "VLAN can be assigned." msgstr "同样被支持—当网络转换为运行状态是,VLAN是随机地通过物理网络的VNET范围分配给网络。当网络作为网络垃圾回收过程的一部分而关闭时,VLAN会被回收到VNET池。当网络再次启用的时候VLAN还能被其重用,或者其他网络使用。在每个新启用的网络中,都有一个新的VLAN被分配。" diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/management.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/management.po index 585e04be42..6d55fdbe2f 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/management.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/management.po @@ -363,7 +363,7 @@ msgstr "Syslog警报详情" #: ../../management.rst:345 msgid "" "CloudStack generates a syslog message for every alert. Each syslog message " -"incudes the fields alertType, message, podId, dataCenterId, and clusterId, " +"includes the fields alertType, message, podId, dataCenterId, and clusterId, " "in the following format. If any field does not have a valid value, it will " "not be included." msgstr "CloudStack为每个警告生成一个syslog信息。每个syslog信息包含下列格式的字段alertType、message、podId、dataCenterId和clusterId。如果任何字段没有有效值的话,它将不会包含在内。" diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/inter_vlan_routing.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/inter_vlan_routing.po index c2517b7938..b784662c5f 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/inter_vlan_routing.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/inter_vlan_routing.po @@ -53,7 +53,7 @@ msgstr "主要的优势为:" #: ../../networking/inter_vlan_routing.rst:37 msgid "" "The administrator can deploy a set of VLANs and allow users to deploy VMs on" -" these VLANs. A guest VLAN is randomly alloted to an account from a pre-" +" these VLANs. A guest VLAN is randomly allotted to an account from a pre-" "specified set of guest VLANs. All the VMs of a certain tier of an account " "reside on the guest VLAN allotted to that account." msgstr "管理可以部署一个vlans集,同时运行用户部署虚拟机在这些vlan上。从预先指定的vlan集中随机的为租户分配一个来宾vlan.租户处于同一层的所有vm处于分配给这个租户的来宾vlan." diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/ip_reservation_in_guest_networks.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/ip_reservation_in_guest_networks.po index 2b2be7d4e8..d7e1080722 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/ip_reservation_in_guest_networks.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/ip_reservation_in_guest_networks.po @@ -93,7 +93,7 @@ msgstr "指定一个有效的客户虚拟机CIDR。只有不活动的IP在客户 # 0710411bb18c4764970dffcc49fe5dab #: ../../networking/ip_reservation_in_guest_networks.rst:60 msgid "" -"You cannot apply IP Reservation if any VM is alloted with an IP address that" +"You cannot apply IP Reservation if any VM is allotted with an IP address that" " is outside the Guest VM CIDR." msgstr "如果任一虚拟机被分配了客户虚拟机CIDR之外的IP地址时,IP预留将不能应用。" diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/virtual_private_cloud_config.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/virtual_private_cloud_config.po index c0efde4c74..553c3bf8c4 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/virtual_private_cloud_config.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/networking/virtual_private_cloud_config.po @@ -1820,7 +1820,7 @@ msgstr "使用用户或管理员登录到CloudStack用户界面。" # f974a014675745ceae4ebb19113bc49e #: ../../networking/virtual_private_cloud_config.rst:959 #: ../../networking/virtual_private_cloud_config.rst:1165 -msgid "Naviagte to Service Offerings and choose Network OfferingPublic IP Addresses." +msgid "Navigate to Service Offerings and choose Network OfferingPublic IP Addresses." msgstr "下拉选择方案,选择网络方案:" # 08107e25d3ae4ed5a4e72a9ef68249af diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/networking2.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/networking2.po index 2ec73d92ee..d5fb26845f 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/networking2.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/networking2.po @@ -1046,7 +1046,7 @@ msgstr "指定一个有效的客户虚拟机CIDR。只有不活动的IP在客户 # 7cd262eb2864435f953ba2f8d9b2e0a0 #: ../../networking2.rst:637 msgid "" -"You cannot apply IP Reservation if any VM is alloted with an IP address that" +"You cannot apply IP Reservation if any VM is allotted with an IP address that" " is outside the Guest VM CIDR." msgstr "如果任一虚拟机被分配了客户虚拟机CIDR之外的IP地址时,IP预留将不能应用。" @@ -6338,7 +6338,7 @@ msgstr "主要的优势为:" #: ../../networking2.rst:4764 msgid "" "The administrator can deploy a set of VLANs and allow users to deploy VMs on" -" these VLANs. A guest VLAN is randomly alloted to an account from a pre-" +" these VLANs. A guest VLAN is randomly allotted to an account from a pre-" "specified set of guest VLANs. All the VMs of a certain tier of an account " "reside on the guest VLAN allotted to that account." msgstr "管理可以部署一个vlans集,同时运行用户部署虚拟机在这些vlan上。从预先指定的vlan集中随机的为租户分配一个来宾vlan.租户处于同一层的所有vm处于分配给这个租户的来宾vlan." @@ -7792,7 +7792,7 @@ msgstr "使用用户或管理员登录到CloudStack用户界面。" # bfaeb8a949fb4d25a618540dcc365471 # 6c6e4ac1110442ba9ec325328e96bfb8 #: ../../networking2.rst:6177 ../../networking2.rst:6482 -msgid "Naviagte to Service Offerings and choose Network OfferingPublic IP Addresses." +msgid "Navigate to Service Offerings and choose Network OfferingPublic IP Addresses." msgstr "下拉选择方案,选择网络方案:" # 7d4dc49f6e224caa9bee24da2b622a4c diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/networking_and_traffic.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/networking_and_traffic.po index ec94b4aa4e..ae047772c5 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/networking_and_traffic.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/networking_and_traffic.po @@ -1127,7 +1127,7 @@ msgstr "指定一个有效的客户虚拟机CIDR。只有不活动的IP在客户 # 0e5b7bff020d494b9f4e85c641380036 #: ../../networking/ip_reservation_in_guest_networks.rst:60 msgid "" -"You cannot apply IP Reservation if any VM is alloted with an IP address that" +"You cannot apply IP Reservation if any VM is allotted with an IP address that" " is outside the Guest VM CIDR." msgstr "如果任一虚拟机被分配了客户虚拟机CIDR之外的IP地址时,IP预留将不能应用。" @@ -6544,7 +6544,7 @@ msgstr "主要的优势为:" #: ../../networking/inter_vlan_routing.rst:37 msgid "" "The administrator can deploy a set of VLANs and allow users to deploy VMs on" -" these VLANs. A guest VLAN is randomly alloted to an account from a pre-" +" these VLANs. A guest VLAN is randomly allotted to an account from a pre-" "specified set of guest VLANs. All the VMs of a certain tier of an account " "reside on the guest VLAN allotted to that account." msgstr "管理可以部署一个vlans集,同时运行用户部署虚拟机在这些vlan上。从预先指定的vlan集中随机的为租户分配一个来宾vlan.租户处于同一层的所有vm处于分配给这个租户的来宾vlan." @@ -8012,7 +8012,7 @@ msgstr "使用用户或管理员登录到CloudStack用户界面。" # 19877c93762c4d95b38bfafc90fc110c #: ../../networking/virtual_private_cloud_config.rst:959 #: ../../networking/virtual_private_cloud_config.rst:1165 -msgid "Naviagte to Service Offerings and choose Network OfferingPublic IP Addresses." +msgid "Navigate to Service Offerings and choose Network OfferingPublic IP Addresses." msgstr "下拉选择方案,选择网络方案:" # 34b1dc57da234cfcbef32cbb10126c3c diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/service_offerings.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/service_offerings.po index b4f468837e..70fa95b6fb 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/service_offerings.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/service_offerings.po @@ -567,7 +567,7 @@ msgstr "QoS 类型。三种可选:空 ( 无服务质量), hypervisor msgid "" "Custom IOPS. If checked, the user can set their own IOPS. If not checked, " "the root administrator can define values. If the root admin does not set " -"values when using storage QoS, default values are used (the defauls can be " +"values when using storage QoS, default values are used (the defaults can be " "overridden if the proper parameters are passed into CloudStack when creating" " the primary storage in question)." msgstr "订制 IOPS 。 如选中,用户可以设置自己的 IOPS。如未被选中,root 管理员则能够定义该值。如果使用存储 QoS时,root 管理员没有设置该值,则采用默认值(如果创建主存储时考虑到对应的参数被传递到 CloudStack 中,则默认值将被覆盖)" diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/systemvm.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/systemvm.po index c880c7e0d9..eb13e92bb2 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/systemvm.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/systemvm.po @@ -125,9 +125,9 @@ msgstr "XenServer" # 6ea0f846b0a34711b7c0090cb48d8c32 #: ../../systemvm.rst:66 msgid "" -"http://download.cloud.com/templates/4.2/64bit/systemvmtemplate64-2013-07-15" +"http://download.cloudstack.org/templates/4.2/64bit/systemvmtemplate64-2013-07-15" "-master-xen.vhd.bz2" -msgstr "http://download.cloud.com/templates/4.2/64bit/systemvmtemplate64-2013-07-15-master-xen.vhd.bz2" +msgstr "http://download.cloudstack.org/templates/4.2/64bit/systemvmtemplate64-2013-07-15-master-xen.vhd.bz2" # e8a51da6f6614114a91e4006eedfc912 #: ../../systemvm.rst:67 @@ -137,9 +137,9 @@ msgstr "KVM" # fe251bca2b854129890cba8e7ac9bbbf #: ../../systemvm.rst:67 msgid "" -"http://download.cloud.com/templates/4.2/64bit/systemvmtemplate64-2013-07-15" +"http://download.cloudstack.org/templates/4.2/64bit/systemvmtemplate64-2013-07-15" "-master-kvm.qcow2.bz2" -msgstr "http://download.cloud.com/templates/4.2/64bit/systemvmtemplate64-2013-07-15-master-kvm.qcow2.bz2" +msgstr "http://download.cloudstack.org/templates/4.2/64bit/systemvmtemplate64-2013-07-15-master-kvm.qcow2.bz2" # 2d498240d20c4683ab11ac2232135a16 #: ../../systemvm.rst:70 diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/templates.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/templates.po index 75af44dab5..d6f2ef47e2 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/templates.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/templates.po @@ -1487,10 +1487,10 @@ msgstr "下载cloud-set-guest-password脚本文件:" # 7dfc38f9a99c4d0bbb5007329350693e #: ../../templates.rst:1039 msgid "" -"`http://download.cloud.com/templates/4.2/bindir/cloud-set-guest-password.in " -"`_" -msgstr "`http://download.cloud.com/templates/4.2/bindir/cloud-set-guest-password.in `_" +msgstr "`http://download.cloudstack.org/templates/4.2/bindir/cloud-set-guest-password.in `_" # 6eead5e6602446c78f65d37e092b049f #: ../../templates.rst:1042 diff --git a/source/adminguide/locale/zh_CN/LC_MESSAGES/troubleshooting.po b/source/adminguide/locale/zh_CN/LC_MESSAGES/troubleshooting.po index b042c29a71..4a8e5409f2 100644 --- a/source/adminguide/locale/zh_CN/LC_MESSAGES/troubleshooting.po +++ b/source/adminguide/locale/zh_CN/LC_MESSAGES/troubleshooting.po @@ -338,7 +338,7 @@ msgstr "故障排查网络传输" # 5f383b9190f34ebcbdd6bb92b713ee21 #: ../../troubleshooting.rst:244 msgid "" -"Below are a few troubleshooting steps to check whats going wrong with your " +"Below are a few troubleshooting steps to check what's going wrong with your " "network..." msgstr "在下列故障排查步骤中检验你网络中出现的故障..." @@ -370,7 +370,7 @@ msgstr "在*host2 (kvm2)*上" msgid "" "If the pings dont work, run *tcpdump(8)* all over the place to check who is " "gobbling up the packets. Ultimately, if the switches are not configured " -"correctly, CloudStack networking wont work so fix the physical networking " +"correctly, CloudStack networking won't work so fix the physical networking " "issues before you proceed to the next steps" msgstr "如果ping不通,运行 *tcpdump(8)*在所有VLAN上检查丢失的数据包。最终,如果交换机配置失败,CloudStack网络将无法工作,所以在处理下一部前要确定物理网络设备的问题。" @@ -407,7 +407,7 @@ msgstr "列出正在使用的*CloudMonkey*" msgid "" "KVM traffic labels require to be named as *\"cloudbr0\"*, *\"cloudbr2\"*, " "*\"cloudbrN\"* etc and the corresponding bridge must exist on the KVM hosts." -" If you create labels/bridges with any other names, CloudStack (atleast " +" If you create labels/bridges with any other names, CloudStack (at least " "earlier versions did) seems to ignore them. CloudStack does not create the " "physical bridges on the KVM hosts, you need to create them **before** before" " adding the host to Cloudstack." @@ -440,7 +440,7 @@ msgid "" "The Internet would be accessible from both the SSVM and CPVM instances by " "default. Their public IPs will also be directly pingable from the Internet. " "Please note that these test would work only if your switches and traffic " -"labels are configured correctly for your environment. If your SSVM/CPVM cant" +"labels are configured correctly for your environment. If your SSVM/CPVM can't" " reach the Internet, its very unlikely that the Virtual Router (VR) can also" " the reach the Internet suggesting that its either a switching issue or " "incorrectly assigned traffic labels. Fix the SSVM/CPVM issues before you " @@ -458,16 +458,16 @@ msgstr "除非有些Egress规则,Virtual Router(VR)也是不能到达Internet # fd961e75e43d4c48a4b779ef136e1d12 #: ../../troubleshooting.rst:432 msgid "" -"However, the Virtual Router's (VR) Source NAT Public IP address **WONT** be " +"However, the Virtual Router's (VR) Source NAT Public IP address **WON'T** be " "reachable until appropriate Ingress rules are in place. You can add " "*Ingress* rules under *Network, Guest Network, IP Address, Firewall* setting" " page." -msgstr "尽管如此,Virtual Router(VR) Source NAT Pulic IP地址除非有近似的Ingress规则在此,要么**WONT** 达到。你可以添加 *Ingress* rules under *Network, Guest Network, IP Address, Firewall* 设置页。" +msgstr "尽管如此,Virtual Router(VR) Source NAT Pulic IP地址除非有近似的Ingress规则在此,要么**WON'T** 达到。你可以添加 *Ingress* rules under *Network, Guest Network, IP Address, Firewall* 设置页。" # 7a1ba3d03cd64a0cb60486d361453ebd #: ../../troubleshooting.rst:439 msgid "" -"The VM Instances by default wont be able to access the Internet. Add Egress " +"The VM Instances by default won't be able to access the Internet. Add Egress " "rules to permit traffic." msgstr "默认的VM Instances不能够连接Internet。添加Egress规则后可允许连接。" @@ -491,7 +491,7 @@ msgstr "在海量的实例中,问题会出现在交换层,原因是L3的配 # 5fff1dc7083a4412a9e4051f2e239180 #: ../../troubleshooting.rst:454 msgid "" -"This section was contibuted by Shanker Balan and was originally published on" +"This section was contributed by Shanker Balan and was originally published on" " `Shapeblue's blog `_" msgstr "这些内容有Shanker Balan贡献,其原文发布在`Shapeblue'博客中`_" diff --git a/source/adminguide/management.rst b/source/adminguide/management.rst index 78d49176e6..271faedd47 100644 --- a/source/adminguide/management.rst +++ b/source/adminguide/management.rst @@ -371,7 +371,7 @@ Syslog Alert Details ^^^^^^^^^^^^^^^^^^^^ CloudStack generates a syslog message for every alert. Each syslog -message incudes the fields alertType, message, podId, dataCenterId, and +message includes the fields alertType, message, podId, dataCenterId, and clusterId, in the following format. If any field does not have a valid value, it will not be included. @@ -517,6 +517,52 @@ rules. the global configuration. +Managing log files +------------------ + +The log files are located in `/var/log/cloudstack`. This directory has the +following subdirectories: + +- `management` for the Management Server +- `usage` for the Usage Server +- `agent` for the Agent for KVM hosts + +CloudStack uses log4j2 to manage log files. The log4j2 configuration file +is located in the corresponding subdirectories in the `/etc/cloudstack/` +directory and is named `log4j-cloud.xml`. + +By default, cloudstack uses `TimeBasedTriggeringPolicy` which rolls over +the log file every day and are kept indefinitely. The log files are +compressed and archived in the same directory. + +Over time, the logs can fill up the entire disk space. To avoid this, you can +update the log4j-cloud.xml file to change the log file rollover and retention +policy. You can change the rollover policy to `SizeBasedTriggeringPolicy` +and set the maximum size of the log file. You can also set the maximum number +of archived log files to keep. + +For example, to change the rollover policy for `management-server.log` to +`SizeBasedTriggeringPolicy` and set the maximum size of the log file to +100MB and keep the maximum of 15 archived log files, you can update the +`log4j-cloud.xml` file as follows: + +.. code-block:: diff + + - + + + + + + + - + + + + + + + +You can also checkout some configuration recipes from the log4j2 documentation +`here `_. + Stopping and Restarting the Management Server --------------------------------------------------- @@ -541,3 +587,53 @@ To start the Management Server: # service cloudstack-management start + +Management Server Statistics and Peers +-------------------------------------- + +Administrators are able to view the statistics and peers information of management server. + +#. Log in to the CloudStack UI as administrator + +#. In the left navigation bar, click Infrastructure. + +#. Click "Management servers", all management servers are listed. + +|management-servers-list.png| + +#. Click the management server you'd like to view. The statistics of the management server are displayed. + +|management-server-statistics.png| + +#. Navigate to the "Peers" tab. The peers of the management servers are listed + +|management-server-peers.png| + + +Global settings for management servers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. cssclass:: table-striped table-bordered table-hover + +======================================= ======================== +Configuration Description +======================================= ======================== +management.server.stats.interval Time interval in seconds, for management servers stats collection. Set to <= 0 to disable management servers stats. Default value is 60 +cluster.heartbeat.interval Interval (in milliseconds) to check for the heart beat between management server nodes. Default value is 1500 +cluster.heartbeat.threshold Threshold (in milliseconds) before self-fence the management server. The threshold should be larger than management.server.stats.interval. Default value is 150000 +======================================= ======================== + +.. note:: + - Every 60 seconds (configurable via management.server.stats.interval setting) each management server collects its statistics and publishes to all other management server peers. When other management server receives the published stats, it will set the peer state (owner is the receiver and peer is the sender) to Up. + - Every 1.5 seconds (configurable via cluster.heartbeat.interval), each management server writes heartbeat to CloudStack database, and check the stats of other management servers. + - If in the past 150 seconds (configurable via cluster.heartbeat.threshold), a management server does not write heartbeat and its peer states, its state and peer states will be set to Down by other management servers. + - In case a management server cannot write heartbeat to the database due to connection issue to the database, the host is set to Down state by other management server, when the database connection is restored, the management server will perform self-fencing and exit with code 219. + +.. |management-servers-list.png| image:: /_static/images/management-servers-list.png + :alt: List of management servers + +.. |management-server-statistics.png| image:: /_static/images/management-server-statistics.png + :alt: Details of management server + +.. |management-server-peers.png| image:: /_static/images/management-server-peers.png + :alt: List of management server peers diff --git a/source/adminguide/nas_plugin.rst b/source/adminguide/nas_plugin.rst new file mode 100644 index 0000000000..bbf3038eb6 --- /dev/null +++ b/source/adminguide/nas_plugin.rst @@ -0,0 +1,116 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information# + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. _NAS Backup and Recovery Plugin: + +NAS Backup and Recovery Plugin +============================== + +About the NAS Backup and Recovery Plugin +---------------------------------------- + +The NAS Backup and Recovery Plugin provider simple B&R operations for KVM +instances to any shared storage (NAS). It is based on `libvirt push backup mode +`_ +to take full instance backups (qcow2) and requires libvirt-7.2.0 and QEMU-4.2, +or high versions on the KVM hosts. + +The NAS B&R plugin requires admin to first add backup repositories which are +network-attached storage (shared storage). Currently it supports NFS, and may +support other shared storage such as CephFS and CIFS/Samba in future. + +When initiating B&R operations on KVM instance, the assigned backup offering +is used to infer backup repository (NAS) details which are then used to mount +the shared storage temporarily on the KVM host to perform instance backup/restore +disks operations. This also requires that admin installs NAS-storage specific +utilities on the KVM hosts such as nfs-utils/nfs-common (ceph-common, cifs-utils). + +Consider the following mount, typically performed on a KVM/Linux host to mount storage: + + mount -t -o
+ +Some examples for variety of shared storage can be: + + mount -t nfs 10.10.1.10:/export /target -o vers=4.2,defaults + + mount -t ceph 10.10.1.10,10.10.1.11,10.10.1.12:/ /target -o name=user,secret=xyz,defaults + +The backup repository is designed to accept these parameters (type, address and +mount options) as configurations to be used to execute mount operations such as +illustrated above. + +With 'nas' B&R plugin enabled, after a backup repositories are added, root +admins can create new backup offerings by selecting the zone and the backup +repository. These backup offerings are then assigned and used with KVM instances +to perform support B&R actions and operations. + +Using the NAS Backup and Recovery Plugin +---------------------------------------- +To use the NAS Backup and Recovery Plugin, the Backup and Recovery framework needs to be enabled first. Then the backup plugin 'nas' needs to be enabled on either the global or zone settings. + +================================= ======================== +Configuration Value +================================= ======================== +backup.framework.enabled true +backup.framework.provider.plugin nas +================================= ======================== + +Once the above two configurations are set, restart the cloudstack-management service. Once the service is restarted we can add the backup repository for the 'nas' Backup and Recovery plugin. +Navigate to the configuration -> Backup Repository. Click on 'Add Backup Repository' and fill the form. + +=================== ======================== +Field Value +=================== ======================== +Name A suitable name to represent the Backup Repository +Address URL, in case of NFS :/path +Type NFS ( only NFS type in 4.20) +label.mountopts Any mount point options to be passed while mouting this storage on the hypervisor. +Zone The zone in CloudStack with which this Backup Repository must be associated. +=================== ======================== + +.. image:: /_static/images/B&R-Backup-Respository.png + :align: center + :alt: NAS Backup repository + +Once the Backup Repository is created, we need to add a Backup Offering, in this plugin the Backup offering is a placeholder to associate an instance to a Backup Repository. While creating the Backup Offering, select the desired Backup Repository. Associate the Backup Offering on an instance to create an Adhoc or scheduled backup. + +Support Information and Limitation +---------------------------------- + +The NAS B&R plugin has been tested with EL8, EL9, Ubuntu 22.04 and 24.04. Older +KVM distros such as EL7, Ubuntu 20.04 etc may not work due to libvirt/qemu +version requirements. Other supported KVM-distros are not tested but may work +such as OpenSUSE 15, Debian 11 and Debian 12. + +Instance backups are full disk backups and limited by libvirt's ability to +initiate and handle backup. All such backups are exported and stored in qcow2 +format. Due to this, restore operation are supported for volumes of type qcow2 +and limited to NFS and local storage based primary storage pools. + +For running instances, their disks (of any format/storage type) are backed up by +libvirtd's push based efficient-backup mechanism exported as qcow2 disks on the +backup repository. + +For stopped instances, `qemu-img` is used to convert and export full-disk backup +in qcow2 format to the backup repository. + +For restore operations, the KVM instance must be stopped in CloudStack. +Currently, only volume(s) restoration is supported only to NFS and local storage +based primary storage pools, and restored volumes are fully baked disks (i.e. +not using any backing template file). + +Restoring fully expunged and unmanaged instances are not supported. Backup and +restore operations are not fully supported for CKS cluster instances and should +be avoided. diff --git a/source/adminguide/networker_plugin.rst b/source/adminguide/networker_plugin.rst index c3b2ba7319..9864d78f35 100644 --- a/source/adminguide/networker_plugin.rst +++ b/source/adminguide/networker_plugin.rst @@ -38,7 +38,7 @@ KVM Hypervisor(s) #. A BASH shell at minimum version 4.4.19 #. DELL EMC Networker client must be installed and in running state #. Hypervisor must be associated with the DELL EMC Networker server as CLIENT -#. DELL EMC Networker can connect and verify certificates to the Hyper-v Client +#. DELL EMC Networker can connect and verify certificates to the KVM Client #. A Hypervisor must be in UP and ENABLED state and resource state respectively in order to be able to get backups for the Instances running. #. A proper timezone set. Identical to the EMC Networker Server and Management server @@ -68,7 +68,7 @@ General Concepts placeholder for being able to backup and restore your Instances from all hosts within the cluster. #. Cross cluster restores are indirectly supported by restoring to the original cluster and then migrating the Virtual Machine to the destination cluster. -#. Any manual KVM backup you initiate (from the hyper-v command line) will be registered in Cloudstack automatically. +#. Any manual KVM backup you initiate (from the command line) will be registered in Cloudstack automatically. You need to use the client scripts and pass the proper parameters to do so. #. Any backup you expire/remove from the DELL EMC Networker side will be unregistered in Cloudstack automatically. diff --git a/source/adminguide/networking.rst b/source/adminguide/networking.rst index d68e60eeec..df18c7547d 100644 --- a/source/adminguide/networking.rst +++ b/source/adminguide/networking.rst @@ -288,6 +288,16 @@ To create a network offering: For more information on VPCs, see `“About Virtual Private Clouds” `_. + - **Network mode**. This option indicates the mode with which the network will operate. + Valid options are NATTED (default) or ROUTED. This applies on isolated networks only. + For more information on Network mode, see `“About Network + Mode” `_. + + - **Routing mode**. This option indicates the routing mode for the network offering. + Supported types are: Static or Dynamic. + For more information on Routing mode, see `“About Routing + Mode” `_. + - **Promiscuous Mode**. Applicable for guest networks on VMware hypervisor only. It accepts the following values for desired behaviour of the network elements: *Reject* - The switch drops any outbound frame from an Instance adapter with a source MAC address that is different from the one in the .vmx configuration file. diff --git a/source/adminguide/networking/advanced_zone_config.rst b/source/adminguide/networking/advanced_zone_config.rst index 3d8299c819..e2863df4bc 100644 --- a/source/adminguide/networking/advanced_zone_config.rst +++ b/source/adminguide/networking/advanced_zone_config.rst @@ -22,6 +22,8 @@ Within a zone that uses advanced networking, you need to tell the Management Server how the physical Network is set up to carry different kinds of traffic in isolation. +You can configure a zone with multiple Physical Networks having guest traffic type. In such zones, we need to tag the additional Physical networks. We must have one Physical Network that is not tagged for isolated/L2 network offerings not configured with any tags. For example the default network offerings. + Configure Guest Traffic in an Advanced Zone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -64,11 +66,13 @@ configure the base guest Network: - **Netmask**: The netmask in use on the subnet the Guest Instances will use. + - **CIDR Size**: The cidrsize of the subnet the Guest Instances will use. Available only when the selected Network offering supports ROUTED mode. + - **DNS**: A set of custom DNS that will be used by the guest Network. If not provided then DNS specified for the zone will be used. Available only when the selected Network offering supports DNS service. - **IPv6 DNS**: A set of custom IPv6 DNS that will be used by the guest Network. If not provided then IPv6 DNS specified for the zone will be used. Available only when the selected Network offering is IPv6 enabled and supports DNS service. - - **IPv4 address for the VR in this Network**: The source NAT address or primary public Network address to use by the guest Network. If not provided then a random address from the available pool of addresses wil be used. + - **IPv4 address for the VR in this Network**: The source NAT address or primary public Network address to use by the guest Network. If not provided then a random address from the available pool of addresses will be used. - **Network Domain**: A custom DNS suffix at the level of a Network. If you want to assign a special domain name to the Guest Instance Network, specify a diff --git a/source/adminguide/networking/dns_and_dhcp.rst b/source/adminguide/networking/dns_and_dhcp.rst index c84cffabe7..928c151607 100644 --- a/source/adminguide/networking/dns_and_dhcp.rst +++ b/source/adminguide/networking/dns_and_dhcp.rst @@ -17,6 +17,13 @@ DNS and DHCP ------------ -The Virtual Router provides DNS and DHCP services to the guests. It -proxies DNS requests to the DNS server configured on the Availability -Zone. +The Virtual Router & ConfigDrive (since Apache CloudStack 4.20) provides +DNS and DHCP services to the guests. It proxies DNS requests to the DNS +server configured on the Availability Zone. + +.. note:: + In case of a network with ConfigDrive, adding/removing nic to/from an + instance or updating the ip address of a nic will not be reflected in the + instance if the instance is already running. To do so, run + `cloud-init clean --machine-id -s` to clean the machine id and seed data. + Then reboot the instance to apply the changes. \ No newline at end of file diff --git a/source/adminguide/networking/dynamic_static_routing.rst b/source/adminguide/networking/dynamic_static_routing.rst new file mode 100644 index 0000000000..f86497ed4b --- /dev/null +++ b/source/adminguide/networking/dynamic_static_routing.rst @@ -0,0 +1,290 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information# + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +Dynamic and Static Routing +----------------------------- + +For VMs on Isolated networks, the IP of VMs are not publicly accessible. +To access the VMs from the Internet, users need to create Load balancing rules, +Port Forwarding rules, enable Static NAT, or enable VPN. + +The IPv6 static routing feature has been introduced in Apache CloudStack 4.17.0.0, so that +users are able to access the IPv6 address of guest VMs on Isolated networks from the Internet or public network. +For more information, see `“IPv6 support for isolated networks and VPC Network Tiers” <../plugins/ipv6.html#isolated-network-and-vpc-network-tier>`_. + +From Apache CloudStack 4.20.0.0, users are able to create isolated networks and VPCs with ROUTED mode. + +- Manage IPv4 subnets for Zones (ROOT admin/operator only) +- Create Networks with Static Routing for IPv4 +- Manage IPv4 Routing Firewall for Networks +- Manage AS number and BPG peers for Dynamic Routing (ROOT admin only) +- Create Networks with Dynamic Routing for IPv4 and IPv6 + + +About Network Mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Network mode indicates the mode with which the isolated network or VPC will operate. +There are two valid options + +- NATTED. This is the default network mode of isolated networks. The VR of isolated networks and VPCs provides Source NAT services, as well as Static NAT, Load Balancer, Port Forwarding, Vpn if the network offering supports. +- ROUTED. For isolated networks in ROUTED mode, the VR no longer supports Source NAT, Static NAT, Load Balancer, Port Forwarding and Vpn. The supported services are Dns, Dhcp, Userdata, Firewall (for isolated networks) and Network ACL (for vpc and vpc networks). + + +About Routing mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Routing mode indicates how routing will operate with the isolated networks with ROUTED network mode. +There are two valid options + +- Static. The operators need to add the static routes to the isolated networks or VPCs in the upstream router manually. +- Dynamic. The AS number will be automatically allocated, and BGP peer sessions will be set up automatically in the VR of the isolated networks or VPCs. The operators need to add the AS number ranges and BGP peers for each zone before creating network with Dynamic routing mode. + + +Manage IPv4 Subnets for Zone +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Like IPv6 prefixes, operators need to configure the IPv4 subnets for zone, which will be eventually used by guest networks. + +Supported CloudStack APIs for operators to manage the IPv4 subnets for zone are: + +- **createIpv4SubnetForZone** : create an IPv4 subnet for zone +- **dedicateIpv4SubnetForZone** : dedicate an IPv4 subnet for zone to a domain or an account +- **deleteIpv4SubnetForZone** : delete an IPv4 subnet for zone +- **listIpv4SubnetsForZone** : list IPv4 subnets for zone +- **releaseIpv4SubnetForZone** : release a dedicated IPv4 subnet for zone from a domain or an account +- **updateIpv4SubnetForZone** : update an IPv4 subnet for zone + +Operators (root admins) can manage the IPv4 subnets for zone by navigating to Infrastructure -> Zones -> IPv4 Subnets +|manage-ipv4-subnets-for-zone.png| + + +Manage IPv4 Subnets for Guest Networks +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Unlike IPv6 (each isolated network with IPv6 support gets a /64 IPv6 network), operators need to manage IPv4 subnets for guest networks. +An IPv4 subnet for guest networks is created from its parent which is a IPv4 subnet for zone. + +There are some global settings which can be set for each account. See below + +.. cssclass:: table-striped table-bordered table-hover + +================================================= ======================== +Configuration Description +================================================= ======================== +routed.ipv4.network.cidr.auto.allocation.enabled Whether the auto-allocation of network CIDR for routed network is enabled or not. True by default. +routed.ipv4.network.max.cidr.size The maximum value of the cidr size for isolated networks in ROUTED mode +routed.ipv4.network.min.cidr.size The minimum value of the cidr size for isolated networks in ROUTED mode +routed.ipv4.vpc.max.cidr.size The maximum value of the cidr size for VPC in ROUTED mode +routed.ipv4.vpc.min.cidr.size The minimum value of the cidr size for VPC in ROUTED mode +================================================= ======================== + +Supported CloudStack APIs for operators to manage the IPv4 subnets for guest networks are: + +- **createIpv4SubnetForGuestNetwork** : create an IPv4 subnet for guest networks +- **deleteIpv4SubnetForGuestNetwork** : delete an IPv4 subnet for guest networks +- **listIpv4SubnetsForGuestNetwork** : list IPv4 subnets for guest networks + +Operators (root admins) can manage the IPv4 subnet by navigating to Network -> IPv4 Subnets +|manage-ipv4-subnets-for-networks.png| + + +Create Network and VPC Offering with ROUTED mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To create network offering with ROUTED mode, see `“Creating a New Network Offering” `_. + +|routed-add-network-offering.png| + +To create VPC offering with ROUTED mode, see below + +|routed-add-vpc-offering.png| + + +Create Network with Static Routing for IPv4 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To create a network with static routing, users need to navigate to Network -> Add Network -> Isolated, and + +- Choose a network offering with ROUTED mode and routing mode is Static +- Specify the gateway and netmask (available for ROOT admin only) +- OR, specify the cidrsize (available for all users) + +|routed-add-network-cidrsize.png| + +If cidrsize is specified, CloudStack will allocate an IPv4 subnet for guest network to the net network + +- Check if there is an IPv4 subnet with same CIDR size available, +- If not, and setting "routed.ipv4.network.cidr.auto.allocation.enabled" is true for account, allocate an IPv4 subnet for the new network, from the IPv4 subnet for zone which the account can access. +- Otherwise, the network creation fails. + +When the network is implemented, the Ipv4 routes are displayed in the network details page. + +|routed-ipv4-routes.png| + +.. note:: + For networks or VPCs with ipv4 static routing, the administrator needs to add upstream IPv4 routes once a network or VPC is successfully deployed. + + +Create Network with Static Routing for IPv6 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The IPv6 static routing has been introduced in Apache CloudStack 4.17.0.0. +For more information, see `“IPv6 support for isolated networks and VPC Network Tiers” <../plugins/ipv6.html#isolated-network-and-vpc-network-tier>`_. + +Users can create network with static routing for both IPv4 and IPv6, if the network offering supports DualStack. + + +Manage IPv4 Routing Firewall +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Users can manage the IPv4 Routing firewalls by navigating to Network -> Guest Networks -> choose a network -> IPv4 Routing Firewall + +|routed-ipv4-routing-firewall.png| + +Supported CloudStack APIs for operators to manage the IPv4 Routing firewall rules are: + +- **createRoutingFirewallRule** : create an IPv4 routing firewall rule +- **updateRoutingFirewallRule** : update an IPv4 routing firewall rule +- **deleteRoutingFirewallRule** : delete an IPv4 routing firewall rule +- **listRoutingFirewallRules** : list IPv4 routing firewall rules + + +Manage AS number for Dynamic Routing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To create network with dynamic routing, operators must add AS number ranges in advance by navigating to Infrastructure -> Zones -> choose a zone -> AS Number. + +|dynamic-routing-as-number-ranges.png| + +Supported CloudStack APIs for operators to manage the AS number ranges and AS numbers are: + +- **createASNRange** : Creates a range of Autonomous Systems for BGP Dynamic Routing +- **listASNRanges** : List Autonomous Systems Number Ranges +- **deleteASNRange** : deletes a range of Autonomous Systems for BGP Dynamic Routing +- **listASNumbers** : List Autonomous Systems Numbers +- **releaseASNumber** : Releases an AS Number back to the pool + +Operators can list the AS numbers by navigating to Network -> AS Numbers + +|dynamic-routing-as-numbers.png| + + +Manage BPG peers for Dynamic Routing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To create network with dynamic routing, operators must add BGP peers in advance. Guest networks with Dynamic Routing will connect to all BGP peers the account can access. + +|dynamic-routing-bgp-peers.png| + +Supported CloudStack APIs for operators to manage the BGP peers are: + +- **createBgpPeer** : create a BGP peer +- **dedicateBgpPeer** : dedicate a BGP peer to a domain or an account +- **deleteBgpPeer** : delete a BGP peer +- **listBgpPeers** : list BGP peers +- **releaseBgpPeer** : release a dedicated BGP peer from a domain or an account +- **updateBgpPeer** : update a BGP peer + + +Create Network with Dynamic Routing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The steps to create a network with dynamic routing is almost same as the network with static routing. The only difference is that, users need to choose a network offering with routing mode is Dynamic. + +During the network creation, CloudStack will + +- Allocate an AS number to the network +- If the network owner does not have dedicated BGP peers, or account setting "use.system.bgp.peers" is set to true, configure BGP sessions in the network VR to connect to all BGP peers the network owner can access. +- If the network owner has dedicated BGP peers, and account setting "use.system.bgp.peers" is set to false, configure BGP sessions in the network VR to connect to all dedicated BGP peers of the domain and the network owner. + +ROOT admin can change BGP peers of an existing network with Dynamic routing. After that, the network VR will only connect to selected BGP peers. + +|dynamic-routing-change-network-bgp-peers.png| + + +Create VPC with Dynamic Routing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The creation of VPC with Dynamic routing is almost as VPC with static routing. CloudStack will allocate an AS number to the VPC, and +- If the VPC owner does not have dedicated BGP peers, or account setting "use.system.bgp.peers" is set to true, configure BGP sessions in the VPC VR to connect to all BGP peers the VPC owner can access. +- If the VPC owner has dedicated BGP peers, and account setting "use.system.bgp.peers" is set to false, configure BGP sessions in the VPC VR to connect to all dedicated BGP peers of the domain and the VPC owner. + +ROOT admin can change BGP peers of an existing VPC with Dynamic routing. After that, the VPC VR will only connect to selected BGP peers. + +|dynamic-routing-change-vpc-bgp-peers.png| + +.. note:: + If a BGP peer is added, removed or updated, the existing network VRs and VPC VRs will not be automatically reconfigured. Please restart the network or VPC to reconfigure the VRs. + + +CloudStack Kubernetes Service support on ROUTED networks and VPCs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To support CloudStack Kubernetes Service on ROUTED networks and VPCs, operators have to configure the networks. + +- The management server must be able to connect to the VMs on ROUTED networks or VPCs +- Some routing firewall Ingress rules (for ROUTED networks) or Network ACL Ingress rules (for ROUTED VPCs) must be configured to open the following ports. + +.. cssclass:: table-striped table-bordered table-hover + +================= ======================== +Ports Description +================= ======================== +22 The management server configures the CKS nodes via port 22. +6443 The port of Kubernetes API server. +8080 The port of Kubernetes Dashboard. +================= ======================== + +For more information, see `“CloudStack Kubernetes Service” <../plugins/cloudstack-kubernetes-service.html>`_. + + +.. |manage-ipv4-subnets-for-zone.png| image:: /_static/images/manage-ipv4-subnets-for-zone.png + :alt: Manage IPv4 subnets for zoone + +.. |manage-ipv4-subnets-for-networks.png| image:: /_static/images/manage-ipv4-subnets-for-networks.png + :alt: Manage IPv4 subnets for guest networks + +.. |routed-add-network-offering.png| image:: /_static/images/routed-add-network-offering.png + :alt: Add network offering with ROUTED mode + +.. |routed-add-vpc-offering.png| image:: /_static/images/routed-add-vpc-offering.png + :alt: Add vpc offering with ROUTED mode + +.. |routed-add-network-cidrsize.png| image:: /_static/images/routed-add-network-cidrsize.png + :alt: Add ROUTED network with specified cidr size + +.. |routed-ipv4-routes.png| image:: /_static/images/routed-ipv4-routes.png + :alt: IPv4 static routes + +.. |routed-ipv4-routing-firewall.png| image:: /_static/images/routed-ipv4-routing-firewall.png + :alt: IPv4 routing firewall rules + +.. |dynamic-routing-as-number-ranges.png| image:: /_static/images/dynamic-routing-as-number-ranges.png + :alt: AS number ranges for Dynamic Routing + +.. |dynamic-routing-as-numbers.png| image:: /_static/images/dynamic-routing-as-numbers.png + :alt: AS numbers for Dynamic Routing + +.. |dynamic-routing-bgp-peers.png| image:: /_static/images/dynamic-routing-bgp-peers.png + :alt: BGP peers for Dynamic Routing + +.. |dynamic-routing-change-network-bgp-peers.png| image:: /_static/images/dynamic-routing-change-network-bgp-peers.png + :alt: Change BGP peers for network with Dynamic Routing + +.. |dynamic-routing-change-vpc-bgp-peers.png| image:: /_static/images/dynamic-routing-change-vpc-bgp-peers.png + :alt: Change BGP peers for VPC with Dynamic Routing + diff --git a/source/adminguide/networking/ip_reservation_in_guest_networks.rst b/source/adminguide/networking/ip_reservation_in_guest_networks.rst index 32d4aff035..f32e0c1f08 100644 --- a/source/adminguide/networking/ip_reservation_in_guest_networks.rst +++ b/source/adminguide/networking/ip_reservation_in_guest_networks.rst @@ -57,7 +57,7 @@ machines: - Specify a valid Guest instance CIDR. IP Reservation is applied only if no active IPs exist outside the Guest instance CIDR. - You cannot apply IP Reservation if any instance is alloted with an IP + You cannot apply IP Reservation if any instance is allotted with an IP address that is outside the Guest instance CIDR. - To reset an existing IP Reservation, apply IP reservation by diff --git a/source/adminguide/networking/isolation_in_advanced_zone_with_vlan.rst b/source/adminguide/networking/isolation_in_advanced_zone_with_vlan.rst index 46e438fd09..d1a0945aab 100644 --- a/source/adminguide/networking/isolation_in_advanced_zone_with_vlan.rst +++ b/source/adminguide/networking/isolation_in_advanced_zone_with_vlan.rst @@ -20,7 +20,7 @@ Isolation in Advanced Zone Using Private VLANs About PVLANs (Secondary VLANs) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The clasic use-case for PVLANs is a shared backup network, where you wish all users' +The classic use-case for PVLANs is a shared backup network, where you wish all users' hosts to be able to communicate with a backup host, but not with each other. |pvlans.png| diff --git a/source/adminguide/networking/security_groups.rst b/source/adminguide/networking/security_groups.rst index 968589bc0e..241ef1c1ff 100644 --- a/source/adminguide/networking/security_groups.rst +++ b/source/adminguide/networking/security_groups.rst @@ -115,9 +115,12 @@ In order for security groups to function in a zone, the security groups feature must first be enabled for the zone. The administrator can do this when creating a new zone, by selecting a network offering that includes security groups. The procedure is described in Basic Zone -Configuration in the Advanced Installation Guide. The administrator can -not enable security groups for an existing zone, only when creating a -new zone. +Configuration in the Advanced Installation Guide. + +To enable security groups for an existing advanced zone which doesn't have +security groups enabled, the administrator can enable the +`SecurityGroupProvider` for the physical network of the zone. This will allow +user to create networks with Security Groups. Adding Ingress and Egress Rules to a Security Group diff --git a/source/adminguide/networking/using_remote_access.rst b/source/adminguide/networking/using_remote_access.rst index 3af1db746a..9c30a33e0d 100644 --- a/source/adminguide/networking/using_remote_access.rst +++ b/source/adminguide/networking/using_remote_access.rst @@ -24,7 +24,7 @@ Using Remote Access VPN :local: :depth: 1 -Remote Access VPN connection to VPC or Guest Network to access Instances and applications. This section considers you have enabled Remote acccess VPN, refer to: :ref:`remote-access-vpn`. +Remote Access VPN connection to VPC or Guest Network to access Instances and applications. This section considers you have enabled Remote access VPN, refer to: :ref:`remote-access-vpn`. When connected to a VPC via VPN, the client have access to all Network Tiers. diff --git a/source/adminguide/networking/virtual_private_cloud_config.rst b/source/adminguide/networking/virtual_private_cloud_config.rst index 75faf7bdc0..6b33883a6f 100644 --- a/source/adminguide/networking/virtual_private_cloud_config.rst +++ b/source/adminguide/networking/virtual_private_cloud_config.rst @@ -214,7 +214,7 @@ addresses in the form of a Classless Inter-Domain Routing (CIDR) block. - **IPv6 DNS**: A set of custom IPv6 DNS that will be used by this VPC. If not provided then IPv6 DNS specified for the zone will be used. Available only when the selected VPC offering is IPv6 enabled and supports DNS service. - - **IPv4 address for the VR in this VPC**: The source NAT address or primary public Network address to use by the guest Networks. If not provided then a random address from the available pool of addresses wil be used. + - **IPv4 address for the VR in this VPC**: The source NAT address or primary public Network address to use by the guest Networks. If not provided then a random address from the available pool of addresses will be used. - **Public MTU**: The MTU to be configured on the public interfaces of the VPC Network's VR @@ -266,6 +266,10 @@ other Network Tiers within the VPC. - **Name**: A unique name for the Network Tier you create. + .. note:: + Admins can choose to automatically prepend the VPC name to the Tier name during creation + using global configurations "vpc.tier.name.prepend" and "vpc.tier.name.prepend.delimiter". + - **Network Offering**: The following default Network offerings are listed: Internal LB, DefaultIsolatedNetworkOfferingForVpcNetworksNoLB, @@ -304,8 +308,8 @@ Configuring Network Access Control List ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: -Network Access Control Lists can only be created if the service -"NetworkACL" is supported by the created VPC. + Network Access Control Lists can only be created if the service + "NetworkACL" is supported by the created VPC. Define a Network Access Control List (ACL) to control incoming (ingress) and outgoing (egress) traffic between the associated Network Tier @@ -343,14 +347,14 @@ destination" and / or "allow all ingress source" rule to the ACL. Afterwards traffic can be white- or blacklisted. .. note:: -- ACL Rules in Cloudstack are stateful -- Source / Destination CIDRs are always external Networks -- ACL rules can also been seen on the virtual router of the VPC. Ingress - rules are listed in the table iptables table "filter" while egress rules - are placed in the "mangle" table -- ACL rules for ingress and egress are not correlating. For example a - egress "deny all" won't affect traffic in response to an allowed ingress - connection + - ACL Rules in Cloudstack are stateful + - Source / Destination CIDRs are always external Networks + - ACL rules can also been seen on the virtual router of the VPC. Ingress + rules are listed in the table iptables table "filter" while egress rules + are placed in the "mangle" table + - ACL rules for ingress and egress are not correlating. For example a + egress "deny all" won't affect traffic in response to an allowed ingress + connection Creating ACL Lists diff --git a/source/adminguide/networking/vnf_templates_appliances.rst b/source/adminguide/networking/vnf_templates_appliances.rst index fcc57bbac9..2ba354586a 100644 --- a/source/adminguide/networking/vnf_templates_appliances.rst +++ b/source/adminguide/networking/vnf_templates_appliances.rst @@ -15,14 +15,14 @@ VNF Templates and Appliances -======================= +============================ Virtualized Network Functions (VNFs) refers to virtualized software applications which offers network services, for example routers, firewalls, load balancers. Adding a VNF template from an URL -------- +----------------------------------------------------------- To create a VNF appliance, user needs to register a VNF template and add VNF settings. @@ -44,7 +44,7 @@ the same page or under Network -> VNF templates. Updating a VM template to VNF template -------- +----------------------------------------------------------- Users are able to update an existing VM template, which is uploaded from HTTP server or local, or created from volume, to be a VNF template. @@ -63,7 +63,7 @@ HTTP server or local, or created from volume, to be a VNF template. Updating the VNF settings of a VNF template -------------------- +----------------------------------------------------------- Users need to add the VNF nics and VNF details of the VNF templates. @@ -115,7 +115,7 @@ Users need to add the VNF nics and VNF details of the VNF templates. Deploying VNF appliances -------------------- +----------------------------------------------------------- #. Log in to the CloudStack UI as an administrator or end user. @@ -147,15 +147,15 @@ Deploying VNF appliances The following network rules will be applied. - If management network is an isolated network, CloudStack will acquire a public - IP, enable static nat on the VNF appliance, and create firewall rules to allow - traffic to ssh/http/https ports based on access_methods in VNF template details. + IP, enable static nat on the VNF appliance, and create firewall rules to allow + traffic to ssh/http/https ports based on access_methods in VNF template details. - If management network is a shared network with security groups, CloudStack will - create a new security group with rules to allow traffic to ssh/http/https ports - based on access_methods in VNF template details, and assign to the VNF appliance. + create a new security group with rules to allow traffic to ssh/http/https ports + based on access_methods in VNF template details, and assign to the VNF appliance. - If management network is a L2 network, VPC tier or Shared network without security - groups, no network rules will be configured. + groups, no network rules will be configured. #. Click on the "Launch VNF appliance" button diff --git a/source/adminguide/networking_and_traffic.rst b/source/adminguide/networking_and_traffic.rst index e2d794921f..b02d774182 100644 --- a/source/adminguide/networking_and_traffic.rst +++ b/source/adminguide/networking_and_traffic.rst @@ -33,6 +33,8 @@ providing networking features for guest traffic. .. include:: networking/multiple_guest_networks.rst +.. include:: networking/dynamic_static_routing.rst + .. include:: networking/network_permissions.rst .. include:: networking/ip_reservation_in_guest_networks.rst diff --git a/source/adminguide/projects.rst b/source/adminguide/projects.rst index 14fa8dc0db..65b0cd36e0 100644 --- a/source/adminguide/projects.rst +++ b/source/adminguide/projects.rst @@ -292,7 +292,7 @@ Working with Project Roles -------------------------- CloudStack allows adding project members with a desired project role. A project role will be assigned to the member in addition to their base -account role. Project Roles are retrictive in nature and can be used to +account role. Project Roles are restrictive in nature and can be used to further restrict certain API access to the members within the project. It is important to note that a project role cannot be used to elevate an existing user's permissions. Project roles can be created or managed diff --git a/source/adminguide/service_offerings.rst b/source/adminguide/service_offerings.rst index 9a3e690768..0a2662e1c1 100644 --- a/source/adminguide/service_offerings.rst +++ b/source/adminguide/service_offerings.rst @@ -212,7 +212,7 @@ To create a new compute offering: it enables the admin to set some boundaries. - **# of CPU cores**: The number of cores which should be allocated - to a system VM with this offering. If 'Custom constrained' is checked, the admin will + to the VM with this offering. If 'Custom constrained' is checked, the admin will be asked to enter the minimum and maximum number of CPUs that a user can request. If 'Custom unconstrained' is checked, this field does not appear as the user will be prompted to enter a value when creating their guest Instance. @@ -226,7 +226,7 @@ To create a new compute offering: will be prompted to enter a value when creating their guest Instance. - **Memory (in MB)**: The amount of memory in megabytes that the - system VM should be allocated. For example, “2048” would provide + VM should be allocated. For example, “2048” would provide a 2 GB RAM allocation. If 'Custom constrained' is selected, the admin will be asked to enter the minimum and maximum amount of RAM that a user can request. If 'Custom unconstrained' is selected, this field does @@ -238,7 +238,11 @@ To create a new compute offering: - **Network Rate**: Allowed data transfer rate in MB per second. - **Offer HA**: If yes, the administrator can choose to have the - system VM be monitored and as highly available as possible. + VM be monitored and as highly available as possible. + + .. note:: + The HA is offered when the VM High Availability manager is enabled in the zone using the setting 'vm.ha.enabled', by default this setting is enabled. + When disabled, alerts are sent during HA attempts when 'vm.ha.alerts.enabled' setting is enabled. - **Dynamic Scaling Enabled**: If yes, Instance can be dynamically scalable of cpu or memory @@ -319,6 +323,12 @@ To create a new compute offering: - **Storage Policy**: Name of the storage policy defined at vCenter, this is applicable only for VMware. When a specific Zone is selected, one of the storage policies can be selected from the list box. + - **Purge Resources**: Whether to cleanup instance and its associated resources from + database upon expunge. When set to true, the database records for the instances with the offering and its + associated resources such as volumes, NICs, etc will be purged immediately once the instance is + expunged. The duration between enpunge and purging of the records can be controlled using + the global configuration - _expunged.resource.purge.job.delay_. + - **Compute only Disk Offering**: When this flag is enabled, a compute only disk offering is created with the disk related information provided and then linked to the compute offering. Compute only disk offering is specific to the newly created compute offering to record the @@ -330,7 +340,7 @@ To create a new compute offering: - **Storage type**: The type of disk that should be allocated. Local allocates from storage attached directly to the host where the - system VM is running. Shared allocates from storage accessible via + VM is running. Shared allocates from storage accessible via NFS. - **Provisioning type**: The type of disk that should be allocated. @@ -362,7 +372,7 @@ To create a new compute offering: - **Custom IOPS** [1]_: If checked, the user can set their own IOPS. If not checked, the root administrator can define values. If the root admin does not set values when using storage QoS, default values - are used (the defauls can be overridden if the proper parameters + are used (the defaults can be overridden if the proper parameters are passed into CloudStack when creating the primary storage in question). @@ -383,7 +393,7 @@ To create a new compute offering: disk that represents the root disk. This does not apply for KVM. - **Storage Tags**: The tags that should be associated with the - primary storage used by the system VM. + primary storage used by the VM. When the flag is disabled @@ -400,7 +410,7 @@ To create a new compute offering: -.. [1] These options are dependant on the capabilities of the hypervisor or the shared storage system which the instances are on. +.. [1] These options are dependent on the capabilities of the hypervisor or the shared storage system which the instances are on. If the hypervisor or underlying storage don't support a particular capability in the offering, the setting will have no effect. @@ -460,7 +470,7 @@ To create a new disk offering: - **Custom IOPS** [2]_: If checked, the user can set their own IOPS. If not checked, the root administrator can define values. If the root admin does not set values when using storage QoS, default values - are used (the defauls can be overridden if the proper parameters + are used (the defaults can be overridden if the proper parameters are passed into CloudStack when creating the primary storage in question). @@ -508,7 +518,7 @@ To create a new disk offering: #. Click Add. -.. [2] These options are dependant on the capabilities of the hypervisor or the shared storage system which the instances are on. +.. [2] These options are dependent on the capabilities of the hypervisor or the shared storage system which the instances are on. If the hypervisor or underlying storage don't support a particular capability in the offering, the setting will have no effect. @@ -593,6 +603,10 @@ To create a system service offering: - **Offer HA**: If yes, the administrator can choose to have the system VM be monitored and as highly available as possible. + .. note:: + The HA is offered when the VM High Availability manager is enabled in the zone using the setting 'vm.ha.enabled', by default this setting is enabled. + When disabled, alerts are sent during HA attempts when 'vm.ha.alerts.enabled' setting is enabled. + - **Storage Tags**: The tags that should be associated with the primary storage used by the system VM. @@ -612,13 +626,11 @@ To create a system service offering: Network Throttling ------------------ -Network throttling is the process of controlling the network access and -bandwidth usage based on certain rules. CloudStack controls this +Network throttling is the process of controlling the network bandwith. CloudStack controls this behaviour of the guest networks in the cloud by using the network rate parameter. This parameter is defined as the default data transfer rate in Mbps (Megabits Per Second) allowed in a guest network. It defines the -upper limits for network utilization. If the current utilization is -below the allowed upper limits, access is granted, else revoked. +upper limits for network bandwith. You can throttle the network bandwidth either to control the usage above a certain limit for some accounts, or to control network congestion in a @@ -647,22 +659,22 @@ on different types of networks in CloudStack. .. cssclass:: table-striped table-bordered table-hover -=========================================== =============================== -Networks Network Rate Is Taken from -=========================================== =============================== -Guest network of Virtual Router Guest Network Offering -Public network of Virtual Router Guest Network Offering -Storage network of Secondary Storage VM System Network Offering -Management network of Secondary Storage VM System Network Offering -Storage network of Console Proxy VM System Network Offering -Management network of Console Proxy VM System Network Offering -Storage network of Virtual Router System Network Offering -Management network of Virtual Router System Network Offering -Public network of Secondary Storage instance System Network Offering -Public network of Console Proxy instance System Network Offering -Default network of a guest instance Compute Offering -Additional networks of a guest instance Corresponding Network Offerings -=========================================== =============================== +============================================ =============================== +Networks Network Rate Is Taken from +============================================ =============================== +Guest network of Virtual Router Guest Network Offering +Public network of Virtual Router Guest Network Offering +Storage network of Secondary Storage VM System Network Offering +Management network of Secondary Storage VM System Network Offering +Storage network of Console Proxy VM System Network Offering +Management network of Console Proxy VM System Network Offering +Storage network of Virtual Router System Network Offering +Management network of Virtual Router System Network Offering +Public network of Secondary Storage instance System Network Offering +Public network of Console Proxy instance System Network Offering +Default network of a guest instance Compute Offering +Additional networks of a guest instance Corresponding Network Offerings +============================================ =============================== A guest instance must have a default network, and can also have many additional networks. Depending on various parameters, such as the host diff --git a/source/adminguide/storage.rst b/source/adminguide/storage.rst index cf76a581d6..c2b086b037 100644 --- a/source/adminguide/storage.rst +++ b/source/adminguide/storage.rst @@ -171,22 +171,31 @@ In order to use multiple local storage pools, you need to local.storage.uuid=a43943c1-1759-4073-9db1-bc0ea19203aa,f5b1220b-4446-42dc-a872-cffd281f9f8c local.storage.path=/var/lib/libvirt/images,/var/lib/libvirt/images2 -# #. Restart cloudstack-agent service - Storage pools will be automatically created in libvirt by the CloudStack agent Adding a Local Storage Pool via UI -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When using UI, ensure that the scope of the storage is set to "Host", and ensure that the protocol is set to "Filesystem". |adding-local-pool-via-ui.png| +Adding a Local Storage Pool via Command Line +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Using Cloudmonkey command line. + + .. parsed-literal:: + + cmk create storagepool zoneid=07d64765-3123-4fc2-b947-25d2c36f5bb4 name=test provider=DefaultPrimary podid=0af34b96-e88d-440e-a6bd-c4e8aab4aa4a clusterid=49db6a16-2f6c-4583-9d07-37ccceb248ae url=file://10.9.8.7/var/lib/libvirt/images2 + Changing the Scope of the Primary Storage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Scope of a Primary Storage can be changed from Zone-wide to Cluster-wide and vice versa when the Primary Storage is in Disabled state. An action button is displayed in UI for each Primary Storage in Disabled state. @@ -215,7 +224,7 @@ combinations: - KVM with NFS -- KVM wite CEPH/RBD +- KVM with CEPH/RBD - VMWare with NFS @@ -276,7 +285,7 @@ templates, and ISOs. Setting NFS Mount Options on the Storage Pool -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NFS mount options can be added while creating an NFS storage pool for KVM hosts. When the storage pool is mounted on the KVM hypervisor host, @@ -393,6 +402,7 @@ under "Browser" tab for a secondary storage. Read only ~~~~~~~~~ + Secondary storages can also be set to read-only in order to cordon it off from being used for storing any further Templates, Volumes and Snapshots. @@ -401,7 +411,7 @@ from being used for storing any further Templates, Volumes and Snapshots. cmk updateImageStore id=4440f406-b9b6-46f1-93a4-378a75cf15de readonly=true Direct resources to a specific secondary storage -~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default, ACS allocates ISOs, volumes, snapshots, and templates to the freest secondary storage of the zone. In order to direct these resources to a specific secondary storage, the user can utilize the functionality of the dynamic secondary storage selectors using heuristic rules. This functionality utilizes JavaScript rules, defined by the user, to direct these resources to a specific secondary storage. When creating the heuristic rule, the script will have access to some preset variables with information about the secondary storage in the zone, about the resource the rule will be applied upon, and about the account that triggered the allocation. These variables are presented in the table below: @@ -409,39 +419,39 @@ By default, ACS allocates ISOs, volumes, snapshots, and templates to the freest | Resource | Variables | +===================================+===================================+ | Secondary Storage | ``id`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``name`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``usedDiskSize`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``totalDiskSize`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``protocol`` | +-----------------------------------+-----------------------------------+ | Snapshot | ``size`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``hypervisorType`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``name`` | +-----------------------------------+-----------------------------------+ | ISO/Template | ``format`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``hypervisorType`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``templateType`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``name`` | +-----------------------------------+-----------------------------------+ | Volume | ``size`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``format`` | +-----------------------------------+-----------------------------------+ | Account | ``id`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``name`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``domain.id`` | - | +-----------------------------------| + | +-----------------------------------+ | | ``domain.name`` | +-----------------------------------+-----------------------------------+ @@ -722,7 +732,7 @@ may take several minutes for the volume to be moved to the new Instance. Instance Storage Migration -~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~ Supported in XenServer, KVM, and VMware. @@ -771,7 +781,7 @@ There are two situations when you might want to migrate a disk: Migrating Storage For a Running Instance '''''''''''''''''''''''''''''''''''''''' -(Supported on XenServer and VMware) +(Supported on XenServer, KVM and VMware) #. Log in to the CloudStack UI as a user or admin. @@ -813,15 +823,17 @@ Migrating Storage and Attaching to a Different Instance Volume” <#attaching-a-volume>`_ -Migrating an Instance Root Volume to a New Storage Pool +Migrating an Instance Volume to a New Storage Pool ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -(XenServer, VMware) You can live migrate an Instance's root disk from one +(XenServer, VMware) You can live migrate an Instance's volumes from one storage pool to another, without stopping the Instance first. -(KVM) When migrating the root disk volume, the Instance must first be stopped, -and users can not access the Instance. After migration is complete, the Instance can -be restarted. +(KVM) KVM does not support volume live migration due to the limited possibility +to refresh VM XML domain. Therefore, to live migrate a volume between storage pools, +one must migrate the VM to a different host as well to force the VM XML domain update. +Use 'migrateVirtualMachineWithVolumes' instead or stop the Instance and then migrate +the volume. #. Log in to the CloudStack UI as a user or admin. @@ -1005,6 +1017,46 @@ True. Instances created from this service offering will have their disks reset upon reboot. See `“Creating a New Compute Offering” `_. +Volume delete protection +~~~~~~~~~~~~~~~~~~~~~~~~ + +CloudStack protects volumes from accidental deletion using a delete protection +flag, which is false by default. When delete protection is enabled for a volume, +it cannot be deleted through the UI or API. It can only be deleted after +removing delete protection from the volume. + +Delete protection can be enabled for a volume via updateVirtualMachine API. + +.. code:: bash + + cmk update volume id= deleteprotection=true + +To remove delete protection, use the following command: + +.. code:: bash + + cmk update volume id= deleteprotection=false + +To enable/disable delete protection for a volume using the UI, follow these steps: + +#. Log in to the CloudStack UI as a User or admin. + +#. In the navigation menu on the left, click Volumes under Storage. + +#. Choose the volume for which you want to enable/disable delete protection. + +#. Click on the Edit button |EditButton.png| + +#. Toggle the Delete Protection switch to enable or disable delete protection. + +#. Click Ok button to save the changes. + +.. note:: + The volume delete protection is only considered when the volume is being + deleted through the UI or via `deleteVolume` or `destroyVolume` API. If the + domain/project is deleted, the volumes under the domain/project will be + deleted irrespective of the delete protection status. + Volume Deletion and Garbage Collection ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1043,7 +1095,7 @@ Volume statistics are collected on a regular interval (defined by global setting volume.stats.interval with a default of 600 seconds). This feature is currently only available for VMware and KVM. Volume stats include include bytes/s and IO/s statistics as shown in the -API output bellow. +API output below. .. code:: bash @@ -1059,6 +1111,7 @@ API output bellow. "diskkbsread": 343124, "diskkbswrite": 217619, ... + Bytes read/write, as well as the total IO/s, are exposed via UI, as shown in the image below. |volume-metrics.png| @@ -1117,12 +1170,12 @@ Following is the example for checkVolume API usage and the result in the volume Importing and Unmanaging Volumes from Storage Pools -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since Apache CloudStack 4.19.1.0, importing and unmanaging volumes from primary storage pools are supported. .. note:: - Currenty the supported storage types are: NFS, Ceph and Local storage for KVM hypervisor. + Currently the supported storage types are: NFS, Ceph and Local storage for KVM hypervisor. #. Log in to the CloudStack UI as an administrator. @@ -1245,11 +1298,18 @@ is running, the global setting 'kvm.snapshot.enabled' must be set to 'True'. The Volume Snapshot creation has changed in recent versions: -Under the hood, first, a full Instance Snapshot is taken - this means that during the taking of -the Instance Snapshot the Instance will be in the "Paused" state (while RAM memory is being written to the -QCOW2 file), which means that Instance will be unavailable from the Network point of view. -When the Instance Snapshot is created, Instance is unpaused/resumed, the single Volume Snapshot is exported -to the Secondary Storage, and then the Instance Snapshots is removed from the Instance. +When the VM is running, a disk-only VM snapshot is taken, exclusively for the volume in question. +If the VM is stopped, the volume will be converted (with qemu-img convert). The final storage location is +determined by the ``snapshot.backup.to.secondary`` configuration; if it is false the snapshot will be copied +to a different directory in the same primary storage as the volume; if it is true the snapshot will be copied +to the secondary storage. If the snapshot is being taken in a file-based storage (NFS, SharedMountPoint, Local), +it will be copied directly to its final storage location, according to the configuration. + +Since 4.20.0.0, ACS supports incremental snapshots for the KVM hypervisor when using file-based storage (NFS, SharedMountPoint, Local), +to enable incremental snapshots the ``kvm.incremental.snapshot`` configuration must be enabled. Furthermore, in order to take incremental snapshots +the KVM host must have at least Libvirt version 7.6.0+ and qemu version 6.1+. The size of the snapshot chains +will be determined by the ``snapshot.delta.max`` configuration, which affects both KVM and XenServer snapshots. +More information on the incremental snapshot feature for KVM can be found in its `specification `_. Automatic Snapshot Creation and Retention @@ -1291,7 +1351,7 @@ incremental backups are supported, every N backup is a full backup. +------------------------------+------------------+------------------+-----+ | | VMware vSphere | Citrix XenServer | KVM | +==============================+==================+==================+=====+ -| Support incremental backup | No | Yes | No | +| Support incremental backup | No | Yes | Yes | +------------------------------+------------------+------------------+-----+ .. note:: @@ -1369,7 +1429,7 @@ Snapshot request fails and returns an error message. Snapshot Copy ~~~~~~~~~~~~~ -CloudStack allows copying an exisiting backed-up snapshot to multiple zones. +CloudStack allows copying an existing backed-up snapshot to multiple zones. Users can either use the UI in the snapshot details view or the `copySnapshot` API to copy a snapshot from one zone to other zone(s). Snapshot copies can be used for disastser recovery and creating volumes and templates in the @@ -1485,6 +1545,107 @@ Deleting objects from a bucket 2. Click on the |delete-button.png| button to delete the selected files from the bucket. +Shared FileSystems +------------------ + +CloudStack offers fully managed NFS Shared FileSystems to all users. +This section gives technical details on how to create/manage a Shared FileSystem +using basic lifecycle operations and also some implementation details. + +.. note:: + This feature is available only on advanced zones without security groups. + +Creating a New Shared FileSystem +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Log in to the CloudStack UI as a user or administrator. + +#. In the left navigation bar, click Storage. + +#. In the Select View, choose Shared FileSystems. + +Click on Create Shared FileSystem, provide the following details and then click OK. + +#. Name +#. Description +#. Zone +#. Format: Filesystem format (XFS, EXT4) which will be installed on the Shared FileSystem. +#. Network: Guest network to which the Shared FileSystem will be attached. +#. Compute offering: Offering using which the Shared FileSystem Instance will be deployed. +#. Disk offering: Offering used by the underlying data volume. +#. Size, MinIops and MaxIos: Displayed only when the disk offering takes custom size and custom iops. + +|create-sharedfs.png| + +Admins will see extra fields in the create form where they can specify the +account, domain and the project which will be owning the Shared FileSystem. +|create-sharedfs-admin.png| + +Access +~~~~~~ +The Shared FileSystem can be mounted by using the information given on the Access Tab. +|sharedfs-access-tab.png| + +Lifecycle Operations +~~~~~~~~~~~~~~~~~~~~ + +Supported lifecycle operations are : + +#. Update name and description of the Shared FileSystem + +#. Stop/Start Shared FileSystem - This will Stop and Start the Shared FileSystem Instance + +#. Restart Shared FileSystem - Reboots the Shared FileSystem Instance. If Cleanup option is provided then the + Instance state is cleaned up and restored to the original template. Configurations related to setting up the + NFS export will be done again. This will not affect the data on the volume attached to the Instance. + |restart-sharedfs.png| + +#. Change Disk Offering - The disk offering of the underlying volume can be changed. Whether live resize + is supported or not depends on the hyervisor. + Please note that the size of the Shared FileSystem can only be increased. + +#. Change Service Offering - The service offering of the Shared FileSystem Instance can be changed as required. + This can only be done when the Shared FileSystem is in Stopped state. + +#. Add/Remove Network - Guest networks can be added to or removed from the Shared FileSystem. + NFS share is exported to all networks. So instances on different networks can mount the + same share using the respective IP addresses as given on the Access tab. + APIs serving these operations are addNicToVirtualMachine and removeNicToVirtualMachine + called with the Shared FileSystem Instance ID. + Please note that the added networks must not be on overlapping CIDR ranges. + |add-remove-sharedfs-network.png| + +#. Destroy Shared FileSystem - The Shared FileSystem will be destroyed. It can be recovered before it automatically gets expunged. + Expunge timeout is given by the global setting 'sharedfs.cleanup.delay'. + + +Shared FileSystem Instance +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The Shared FileSystem Instance is stateless and HA enabled. A new instance is deployed and will start +serving the NFS share if the host or VM goes down. +The VM is installed with the SystemVM template which is also used by the CPVM and SSVM. + +The Shared FileSystem Instance can be seen in the Instances Tab as well. It's name is prefixed by the string +"sharedfs-" plus the Shared FileSystem name. Actions that might interfere with Shared FileSystem operations are blocked or not shown. +Basic operations like Start, Stop and Reboot are allowed for troubleshooting. +Users can access the VM using the 'View Console' button for troubleshooting although it is not +required during normal operations. + +Service Offering +~~~~~~~~~~~~~~~~ + +There are two global settings that control what should be the minimum RAM size and minimum +CPU count for the Shared FileSystem Instance : 'sharedfsvm.min.cpu.count' and 'sharedfsvm.min.ram.size`. +Only those offerings which meet these settings and have HA enabled are shown in the create form. + +Shared FileSystem Data Volume +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The data volume is also visible to the users. It is recommended to use the Shared FileSystem UI/API to +manage the data but users or admin can perform actions directly on the data volume or the root volume +as well if they wish. Attaching and detaching a disk is not allowed on a Shared FileSystem Instance. + .. |AttachDiskButton.png| image:: /_static/images/attach-disk-icon.png :alt: Attach Disk Button. .. |resize-volume-icon.png| image:: /_static/images/resize-volume-icon.png @@ -1515,6 +1676,8 @@ Deleting objects from a bucket :alt: Object store file upload .. |delete-button.png| image:: /_static/images/delete-button.png :alt: Delete button +.. |EditButton.png| image:: /_static/images/edit-icon.png + :alt: button to edit the properties of a volume .. |upload-button.png| image:: /_static/images/upload-button.png :alt: Upload button .. |adding-local-pool-via-ui.png| image:: /_static/images/adding-local-pool-via-ui.png @@ -1529,6 +1692,16 @@ Deleting objects from a bucket :alt: Import Volume .. |unmanage-volume.png| image:: /_static/images/unmanage-volume.png :alt: Unmanage Volume +.. |create-sharedfs.png| image:: /_static/images/create-sharedfs.png + :alt: Create Shared FileSystem +.. |create-sharedfs-admin.png| image:: /_static/images/create-sharedfs-admin.png + :alt: Create Shared FileSystem Admin Options +.. |restart-sharedfs.png| image:: /_static/images/restart-sharedfs.png + :alt: Restart Shared FileSystem +.. |sharedfs-access-tab.png| image:: /_static/images/sharedfs-access-tab.png + :alt: Shared FileSystem Access Tab +.. |add-remove-sharedfs-network.png| image:: /_static/images/add-remove-sharedfs-network.png + :alt: Shared FileSystem Networks .. |nfs-mount-options-create-zone-wizard.png| image:: /_static/images/nfs-mount-options-create-zone-wizard.png :alt: NFS mount options in create Zone wizard .. |nfs-mount-options-add-primary-storage.png| image:: /_static/images/nfs-mount-options-add-primary-storage.png diff --git a/source/adminguide/systemvm.rst b/source/adminguide/systemvm.rst index 756ae2f054..5ff7b97387 100644 --- a/source/adminguide/systemvm.rst +++ b/source/adminguide/systemvm.rst @@ -198,7 +198,7 @@ Console proxies can be restarted by administrators but this will interrupt existing console sessions for users. Creating an Instance Console Endpoint -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The access to an instance console is created by the API 'createConsoleEndpoint', for the instance specified in the parameter 'virtualmachineid'. By default, @@ -228,7 +228,7 @@ When ‘consoleproxy.extra.security.validation.enabled’ is false: then CloudSt does not require a token for validation. The websocket port is passed as a boot argument to the console proxy and the -management server decides between the secure or unsecure port (8443 or 8080) when +management server decides between the secure or insecure port (8443 or 8080) when setting the boot arguments for the CPVM. - The secure port 8443 is sent as a boot argument when: @@ -265,7 +265,7 @@ communication with SSL: Changing the Console Proxy SSL Certificate and Domains -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The administrator can configure SSL encryption by selecting a domain and uploading a new SSL certificate and private key. The domain must @@ -656,55 +656,55 @@ column in 'Failed'/'Passed' if there are health check failures of any type. Following global configs have been added for configuring health checks: - ``router.health.checks.enabled`` - If true, router health checks are allowed - to be executed and read. If false, all scheduled checks and API calls for on - demand checks are disabled. Default is true. + to be executed and read. If false, all scheduled checks and API calls for on + demand checks are disabled. Default is true. - ``router.health.checks.basic.interval`` - Interval in minutes at which basic - router health checks are performed. If set to 0, no tests are scheduled. Default - is 3 mins as per the pre 4.14 monitor services. + router health checks are performed. If set to 0, no tests are scheduled. Default + is 3 mins as per the pre 4.14 monitor services. - ``router.health.checks.advanced.interval`` - Interval in minutes at which - advanced router health checks are performed. If set to 0, no tests are scheduled. - Default value is 10 minutes. + advanced router health checks are performed. If set to 0, no tests are scheduled. + Default value is 10 minutes. - ``router.health.checks.config.refresh.interval`` - Interval in minutes at which - router health checks config - such as scheduling intervals, excluded checks, etc - is updated on virtual routers by the management server. This value should be - sufficiently high (like 2x) from the router.health.checks.basic.interval and - router.health.checks.advanced.interval so that there is time between new results - generation for passed data. Default is 10 mins. + router health checks config - such as scheduling intervals, excluded checks, etc + is updated on virtual routers by the management server. This value should be + sufficiently high (like 2x) from the router.health.checks.basic.interval and + router.health.checks.advanced.interval so that there is time between new results + generation for passed data. Default is 10 mins. - ``router.health.checks.results.fetch.interval`` - Interval in minutes at which - router health checks results are fetched by management server. On each result fetch, - management server evaluates need to recreate VR as per configuration of - 'router.health.checks.failures.to.recreate.vr'. This value should be sufficiently - high (like 2x) from the 'router.health.checks.basic.interval' and - 'router.health.checks.advanced.interval' so that there is time between new - results generation and fetch. + router health checks results are fetched by management server. On each result fetch, + management server evaluates need to recreate VR as per configuration of + 'router.health.checks.failures.to.recreate.vr'. This value should be sufficiently + high (like 2x) from the 'router.health.checks.basic.interval' and + 'router.health.checks.advanced.interval' so that there is time between new + results generation and fetch. - ``router.health.checks.failures.to.recreate.vr`` - Health checks failures defined - by this config are the checks that should cause router recreation. If empty the - recreate is not attempted for any health check failure. Possible values are comma - separated script names from systemvm’s /root/health_scripts/ (namely - cpu_usage_check.py, - dhcp_check.py, disk_space_check.py, dns_check.py, gateways_check.py, haproxy_check.py, - iptables_check.py, memory_usage_check.py, router_version_check.py), connectivity.test - or services (namely - loadbalancing.service, webserver.service, dhcp.service) + by this config are the checks that should cause router recreation. If empty the + recreate is not attempted for any health check failure. Possible values are comma + separated script names from systemvm’s /root/health_scripts/ (namely - cpu_usage_check.py, + dhcp_check.py, disk_space_check.py, dns_check.py, gateways_check.py, haproxy_check.py, + iptables_check.py, memory_usage_check.py, router_version_check.py), connectivity.test + or services (namely - loadbalancing.service, webserver.service, dhcp.service) - ``router.health.checks.to.exclude`` - Health checks that should be excluded when - executing scheduled checks on the router. This can be a comma separated list of - script names placed in the '/root/health_checks/' folder. Currently the following - scripts are placed in default systemvm Template - cpu_usage_check.py, - disk_space_check.py, gateways_check.py, iptables_check.py, router_version_check.py, - dhcp_check.py, dns_check.py, haproxy_check.py, memory_usage_check.py. + executing scheduled checks on the router. This can be a comma separated list of + script names placed in the '/root/health_checks/' folder. Currently the following + scripts are placed in default systemvm Template - cpu_usage_check.py, + disk_space_check.py, gateways_check.py, iptables_check.py, router_version_check.py, + dhcp_check.py, dns_check.py, haproxy_check.py, memory_usage_check.py. - ``router.health.checks.free.disk.space.threshold`` - Free disk space threshold - (in MB) on VR below which the check is considered a failure. Default is 100MB. + (in MB) on VR below which the check is considered a failure. Default is 100MB. - ``router.health.checks.max.cpu.usage.threshold`` - Max CPU Usage threshold as - % above which check is considered a failure. + % above which check is considered a failure. - ``router.health.checks.max.memory.usage.threshold`` - Max Memory Usage threshold - as % above which check is considered a failure. + as % above which check is considered a failure. The scripts for following health checks are provided in '/root/health_checks/'. These are not exhaustive and can be modified for covering other scenarios not covered. diff --git a/source/adminguide/templates.rst b/source/adminguide/templates.rst index ece4cd6a4f..5d5853d39b 100644 --- a/source/adminguide/templates.rst +++ b/source/adminguide/templates.rst @@ -136,7 +136,7 @@ in a private Zone, it is available only to Users in the domain assigned to that Zone. If a public Template is created in a public Zone, it is available to all Users in all domains. - +.. _creating-a-template-from-an-existing-virtual-machine: Creating a Template from an Existing Instance --------------------------------------------- @@ -268,6 +268,9 @@ To upload a Template: selecting an OS Type when registering a Template if the option 'Read Instance settings from OVA' is selected. In this case, the OS Type is obtained from the Template after it is registered. + - **Tag**: The tag for the template. This tag can be used with host tags to + allow deployment of Instances on specific hosts. + - **Userdata**: The registered Userdata are listed. Select the desired one. @@ -278,6 +281,8 @@ To upload a Template: - **Hypervisor**: The supported hypervisors are listed. Select the desired one. + - **Arch**: The supported arch types are listed. Select the desired one. + - **Format**. The format of the Template upload file, such as VHD or OVA. @@ -325,7 +330,7 @@ Example GUI dialog of uploading Template/ISO from local (browser) is given below |upload-iso-from-local.png| -Note that uploading multi-disk Templates is also supported. +Note that uploading multi-disk Templates is also supported as well as selecting the template/ISO arch type. Sharing Templates and ISOs with other Accounts/projects ------------------------------------------------------- @@ -542,6 +547,8 @@ part of a Template. prominent for Users to select. The ISO will appear in the Featured ISOs list. Only an administrator can make an ISO Featured. + - **Arch**: The supported arch types are listed. Select the desired one. + #. Click OK. The Management Server will download the ISO. Depending on the size of @@ -594,4 +601,6 @@ Attaching an ISO to a Instance .. |template-permissions-update-4.png| image:: /_static/images/template-permissions-update-4.png :alt: Revoking permsissons from both projects previously added .. |template-permissions-update-5.png| image:: /_static/images/template-permissions-update-5.png - :alt: Reseting (removing all) permissions + :alt: Resetting (removing all) permissions +.. |iso.png| image:: /_static/images/iso-icon.png + :alt: depicts adding an iso image diff --git a/source/adminguide/templates/_bypass-secondary-storage-kvm.rst b/source/adminguide/templates/_bypass-secondary-storage-kvm.rst index 0aa5376a32..080e5ef9a9 100644 --- a/source/adminguide/templates/_bypass-secondary-storage-kvm.rst +++ b/source/adminguide/templates/_bypass-secondary-storage-kvm.rst @@ -49,7 +49,8 @@ From CloudStack 4.14.0, system VM Templates also support direct download. An adm Uploading Certificates for Direct Downloads -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + For direct downloads over HTTPS, the KVM hosts must have valid certificates. These certificates can be either self-signed or signed and will allow the KVM hosts to access the Templates/ISOs and download them. CloudStack provides some APIs to handle certificates for direct downloads: @@ -85,7 +86,7 @@ CloudStack provides some APIs to handle certificates for direct downloads: upload templatedirectdownloadcertificate hypervisor=KVM name=CERTIFICATE_ALIAS zoneid=ZONE_ID certificate=CERTIFICATE_FORMATTED hostid=HOST_ID Synchronising Certificates for Direct Downloads -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ As new hosts may be added to a zone which do not include a certificate which was previously uploaded to pre-existing hosts. @@ -97,7 +98,7 @@ CloudStack provides a way to synchronize certificates across all the connected h - Upload missing certificates to hosts Direct Download Timeouts -~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^ With 4.14.0, ability to configure different timeout values for the direct downloading of Templates has been added. Three new global settings have been added for this: diff --git a/source/adminguide/templates/_cloud_init.rst b/source/adminguide/templates/_cloud_init.rst index 33e49683e7..13001f91e2 100644 --- a/source/adminguide/templates/_cloud_init.rst +++ b/source/adminguide/templates/_cloud_init.rst @@ -112,7 +112,7 @@ These features can be implemented in `“Linux Template creation process” <_cr If the cloud-init ssh module is set to run every boot, it will regenerate the certificate fingerprint of the host. This will cause a warning to anyone that logs in the system and also bring trouble to anyone trying to automate ssh access. - Disable cloud-init regenerating host certificates on boot. If Template certificates are deleted they will be regenerated by the OS on instnace first boot. + Disable cloud-init regenerating host certificates on boot. If Template certificates are deleted they will be regenerated by the OS on instance first boot. .. code:: bash @@ -176,7 +176,7 @@ These features can be implemented in `“Linux Template creation process” <_cr ~ CentOS - Centos 7 root volume is /dev/centos/root if no changes are done during installation. Change the value accordingly if setup is different. + CentOS root volume is /dev/centos/root if no changes are done during installation. Change the value accordingly if setup is different. .. code:: bash @@ -198,7 +198,7 @@ These features can be implemented in `“Linux Template creation process” <_cr .. warning:: - The example code above is based on XFS parition type. If ext4 partitioning is utilized replace **xfs_growfs** with **resize2fs** in the last code line. + The example code above is based on XFS partition type. If ext4 partitioning is utilized replace **xfs_growfs** with **resize2fs** in the last code line. It is possible to also use cloud-init `resize2fs module `_ . - **Enable autoresize on every boot** @@ -215,6 +215,21 @@ These features can be implemented in `“Linux Template creation process” <_cr Cloud-init can parse and execute user-data form Cloud-stack during Instance creation. This feature works as is without additional configuration. +#. **Network configuration with ConfigDrive** + + Cloud-init can fetch network configuration from ConfigDrive. To enable this, + ensure network configuration is not disabled in cloud-init configuration. + + .. code:: bash + + echo -e "\nnetwork: {}" >> /etc/cloud/cloud.cfg + + .. note:: + Adding/removing nic to/from an instance or updating the ip address of a nic + will not be reflected in the instance if the instance is already running. To + do so, run `cloud-init clean --machine-id -s` to clean the machine id and + seed data. Then reboot the instance to apply the changes. + #. **Cleanup** Once desired cloud-init features are implemented, clean cloud-init tracker files. diff --git a/source/adminguide/templates/_create_linux.rst b/source/adminguide/templates/_create_linux.rst index 895e2e0909..8977fa5ddd 100644 --- a/source/adminguide/templates/_create_linux.rst +++ b/source/adminguide/templates/_create_linux.rst @@ -112,7 +112,7 @@ templating of Centos and Ubuntu. deluser myuser --remove-home - User password management and reset cappabilities in GUI are available with: + User password management and reset capabilities in GUI are available with: * `Cloud-init integration `_ * `Adding Password Management to Your Templates `_ /Legacy for non systemd systems only/ @@ -213,5 +213,4 @@ templating of Centos and Ubuntu. #. **Create the Template!** You are now ready to create the Final Template, for more information see - `“Creating a Template from an Existing Virtual - Machine” <#creating-a-template-from-an-existing-instance>`_. + :ref:`creating-a-template-from-an-existing-virtual-machine`. diff --git a/source/adminguide/templates/_create_windows.rst b/source/adminguide/templates/_create_windows.rst index eeeb8768ac..6a86934461 100644 --- a/source/adminguide/templates/_create_windows.rst +++ b/source/adminguide/templates/_create_windows.rst @@ -49,7 +49,7 @@ An overview of the procedure is as follows: System Preparation for Windows Server 2008 R2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For Windows 2008 R2, you run Windows System Image Manager to create a custom sysprep response XML file. Windows System Image Manager is @@ -156,7 +156,7 @@ Use the following steps to run sysprep for Windows 2008 R2: System Preparation for Windows Server 2003 R2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Earlier versions of Windows have a different sysprep tool. Follow these steps for Windows Server 2003 R2. diff --git a/source/adminguide/templates/_password.rst b/source/adminguide/templates/_password.rst index 3378acb50e..4b62b372ec 100644 --- a/source/adminguide/templates/_password.rst +++ b/source/adminguide/templates/_password.rst @@ -42,7 +42,7 @@ boot it will not set the password but boot will continue normally. Linux OS Installation -~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^ Use the following steps to begin the Linux OS installation: @@ -78,7 +78,7 @@ Use the following steps to begin the Linux OS installation: Windows OS Installation -~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^ Download the installer, CloudInstanceManager.msi, from the `Download page `_ diff --git a/source/adminguide/troubleshooting.rst b/source/adminguide/troubleshooting.rst index 7bb889fbec..20d6d6a870 100644 --- a/source/adminguide/troubleshooting.rst +++ b/source/adminguide/troubleshooting.rst @@ -241,7 +241,7 @@ load balancing rules so that they continue to function. Troubleshooting Internet Traffic -------------------------------- -Below are a few troubleshooting steps to check whats going wrong with your +Below are a few troubleshooting steps to check what's going wrong with your network... @@ -270,7 +270,7 @@ Trouble Shooting Steps If the pings dont work, run *tcpdump(8)* all over the place to check who is gobbling up the packets. Ultimately, if the switches are not - configured correctly, CloudStack networking wont work so fix the + configured correctly, CloudStack networking won't work so fix the physical networking issues before you proceed to the next steps #. Ensure `Traffic Labels `_ are set for the Zone. @@ -321,7 +321,7 @@ Trouble Shooting Steps #. KVM traffic labels require to be named as *"cloudbr0"*, *"cloudbr2"*, *"cloudbrN"* etc and the corresponding bridge must exist on the KVM hosts. If you create labels/bridges with any other names, CloudStack - (atleast earlier versions did) seems to ignore them. CloudStack does not + (at least earlier versions did) seems to ignore them. CloudStack does not create the physical bridges on the KVM hosts, you need to create them **before** before adding the host to Cloudstack. @@ -386,7 +386,7 @@ Trouble Shooting Steps Instances by default. Their public IPs will also be directly pingable from the Internet. Please note that these test would work only if your switches and traffic labels are configured correctly for your - environment. If your SSVM/CPVM cant reach the Internet, its very + environment. If your SSVM/CPVM can't reach the Internet, its very unlikely that the Virtual Router (VR) can also the reach the Internet suggesting that its either a switching issue or incorrectly assigned traffic labels. Fix the SSVM/CPVM issues before you debug VR issues. @@ -430,7 +430,7 @@ Trouble Shooting Steps round-trip min/avg/max/stddev = 28.098/44.021/69.179/17.998 ms #. However, the Virtual Router's (VR) Source NAT Public IP address - **WONT** be reachable until appropriate Ingress rules are + **WON'T** be reachable until appropriate Ingress rules are in place. You can add *Ingress* rules under *Network, Guest Network, IP Address, Firewall* setting page. @@ -451,6 +451,6 @@ Trouble Shooting Steps In a vast majority of the cases, the problem has turned out to be at the switching layer where the L3 switches were configured incorrectly. -This section was contibuted by Shanker Balan and was originally published on +This section was contributed by Shanker Balan and was originally published on `Shapeblue's blog `_ diff --git a/source/adminguide/tuning.rst b/source/adminguide/tuning.rst index c1fb354560..ae7450ef77 100644 --- a/source/adminguide/tuning.rst +++ b/source/adminguide/tuning.rst @@ -91,6 +91,25 @@ at `MySQL Reference Manual `_. +Selecting Database Connection Pool Library +------------------------------------------ + +CloudStack uses JDBC connection pool to manage and use database connections +in an optimal manner. It allows using either +`HikariCP `_ or +`DBCP 2 `_ based on the preference for +individual CloudStack databases - cloud, cloud_usage, simulator. + +The following settings can be configured in the db.properties configuration +file for the management server or usage server: +db.cloud.connectionPoolLib +db.cloud_usage.connectionPoolLib +db.simulator.connectionPoolLib + +To use DBCP 2, the value for the configuration must be set to 'dbcp'. An +empty value or 'hikaricp' will allow using HikariCP. + + Monitor the Database Load ------------------------- @@ -143,3 +162,76 @@ Article `_.The article refers to XenServer 5.6, but the same information applies to XenServer 6 +Purging Expunged Resources +-------------------------- + +.. note:: + Currently only available for Instances and their linked resources. + +Over the time there are chances of piling up of millions of database records +for the removed or expunged resources. The presence of a lot of useless +records in the database can also affect the performance of the cloud so it is +needed to purge such entries in a systematic way. +CloudStack provides the following methods to allow purging of the expunged +resources and their database records: + +Using background task +~~~~~~~~~~~~~~~~~~~~~ + +A background task will run at regular intervals. The interval for the task and +other parameters for it such as resource types, start and end date and batch size +can also be controlled with the help of global settings. + +The following new global settings have been introduced which would allow +configuring background task for purging the expunged resources: + +.. cssclass:: table-striped table-bordered table-hover + +================================================ ================ =================================================================== +Global setting Default values Description +================================================ ================ =================================================================== +expunged.resources.purge.enabled false Whether to run a background task to purge the DB records of the expunged resources. +expunged.resources.purge.resources (empty) A comma-separated list of resource types that will be considered by the background task to purge the DB records of the expunged resources. Currently only VirtualMachine is supported. An empty value will result in considering all resource types for purging. +expunged.resources.purge.interval 86400 Interval (in seconds) for the background task to purge the DB records of the expunged resources. +expunged.resources.purge.delay 300 Initial delay (in seconds) to start the background task to purge the DB records of the expunged resources task. +expunged.resources.purge.batch.size 50 Batch size to be used during purging of the DB records of the expunged resources. +expunged.resources.purge.start.time (empty) Start time to be used by the background task to purge the DB records of the expunged resources. Use format yyyy-MM-dd or yyyy-MM-dd HH:mm:ss. +expunged.resources.purge.keep.past.days 30 The number of days in the past from the execution time of the background task to purge the DB records of the expunged resources for which the expunged resources must not be purged. To enable purging DB records of the expunged resource till the execution of the background task, set the value to zero. +================================================ ================ =================================================================== + + +Using API +~~~~~~~~~ + +An admin-only API `purgeExpungedResources` allows purging the expunged resources +with desired parameters. It will allow passing the following parameters - +resourcetype, batchsize, startdate, enddate. An example of purgeExpungedResources +API call is shown below: + + +.. parsed-literal:: + + > purge expungedresources startdate=2024-04-15 enddate=2024-04-20 resourcetype=VirtualMachine + { + "purgeexpungedresourcesresponse": { + "resourcecount": 6 + } + } + + +Using configuration in offerings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +..note:: + Available only for service offerings + +_purgeresources_ configuration for offerings can be used to allow immediately +purging a resource when it is expunged. The configuration can be set to true or +false _purgeresources_ parameter while creating the corresponding offerings. The +following global setting can be used to control the delay for purging the +resource after expunge: + +================================================ ================ =================================================================== +Global setting Default values Description +================================================ ================ =================================================================== +expunged.resource.purge.job.delay 180 Delay (in seconds) to execute the purging of the DB records of an expunged resource initiated by the configuration in the offering. Minimum value should be 180 seconds and if a lower value is set then the minimum value will be used. +================================================ ================ =================================================================== diff --git a/source/adminguide/ui.rst b/source/adminguide/ui.rst index 2591af71f6..33733f0e2a 100644 --- a/source/adminguide/ui.rst +++ b/source/adminguide/ui.rst @@ -192,12 +192,15 @@ apiBase Changes the suffix for the API endpoint docBase Changes the base URL for the documentation appTitle Changes the title of the portal footer Changes the footer text -loginFooter Configure to display text (HTML) in the footer at the login screen. +loginFavicon Changes the favicon of the login page +loginFooter Configure to display text (HTML) in the footer at the login screen +loginTitle Changes the title of the login page logo Changes the logo top-left side image +minilogo Changes the logo top-left side image when menu is collapsed banner Changes the login banner image error.404 Changes the image of error Page not found error.403 Changes the image of error Forbidden -error.500 Changes the image of error Internal Server Error. +error.500 Changes the image of error Internal Server Error ============================= ================================================================ .. parsed-literal:: @@ -326,7 +329,7 @@ that have a title, text (description), link and icon. }, Contextual help documentation URLs can be customized with the help of `docBase` and `docHelpMappings` properties. -To override a particular documentation URL, a mapping can be added for the URL path in the config. A documentation URL is formed by combining the `docBase` URL base and a path set in the source code. Adding a mapping for any particular path in the configuration will result in generating documetation URL with overridden path. +To override a particular documentation URL, a mapping can be added for the URL path in the config. A documentation URL is formed by combining the `docBase` URL base and a path set in the source code. Adding a mapping for any particular path in the configuration will result in generating documentation URL with overridden path. By default, `docHelpMappings` lists all existing documentation URL suffixes, mapped to themselves, in the configuration file that are used in the code. .. parsed-literal:: @@ -367,7 +370,7 @@ By default, `docHelpMappings` lists all existing documentation URL suffixes, map "adminguide/networking_and_traffic.html#creating-a-vpn-gateway-for-the-vpc": "adminguide/networking_and_traffic.html#creating-a-vpn-gateway-for-the-vpc", "adminguide/networking_and_traffic.html#enabling-or-disabling-static-nat": "adminguide/networking_and_traffic.html#enabling-or-disabling-static-nat", "adminguide/networking_and_traffic.html#load-balancing-across-tiers": "adminguide/networking_and_traffic.html#load-balancing-across-tiers", - "adminguide/networking_and_traffic.html#releasing-an-ip-address-alloted-to-a-vpc": "adminguide/networking_and_traffic.html#releasing-an-ip-address-alloted-to-a-vpc", + "adminguide/networking_and_traffic.html#releasing-an-ip-address-allotted-to-a-vpc": "adminguide/networking_and_traffic.html#releasing-an-ip-address-allotted-to-a-vpc", "adminguide/networking_and_traffic.html#reserving-public-ip-addresses-and-vlans-for-accounts": "adminguide/networking_and_traffic.html#reserving-public-ip-addresses-and-vlans-for-accounts", "adminguide/networking_and_traffic.html#restarting-and-removing-a-vpn-connection": "adminguide/networking_and_traffic.html#restarting-and-removing-a-vpn-connection", "adminguide/networking_and_traffic.html#security-groups": "adminguide/networking_and_traffic.html#security-groups", @@ -477,19 +480,26 @@ Example for adding custom plugins: plugins: [ { "name": "ExamplePlugin", - "icon": "appstore", + "icon": "appstore-outlined", "path": "example.html" }, { "name": "ExamplePlugin1", - "icon": "appstore", + "icon": "appstore-outlined", "path": "https://cloudstack.apache.org/" } ] ... } -`icon` for the plugin can be chosen from Ant Design icons listed at `Icon - Ant Design Vue https://www.antdv.com/components/icon/`_. +`icon` for the plugin can be chosen from Ant Design icons listed at `https://3x.antdv.com/components/icon `_. + +.. warning:: + Not all ant icons are supported at the moment. You will find a list of supported icons + within the github repository in ui/src/core/lazy_lib/icons_use.js. To use an icon you + need to transform the listed name. For example "PieChartOutlined" needs to be transformed + to "pie-chart-outlined", "ReadOutlined" needs to be tranformed to "read-outlined". + For displaying a custom HTML in the plugin, HTML file can be stored in the CloudStack management server's web application directory on the server, i.e., */usr/share/cloudstack-management/webapp* and `path` can be set to the name of the file. For displaying a service or a web page, URL can be set as the `path` of the plugin. |ui-custom-plugin.png| diff --git a/source/adminguide/usage.rst b/source/adminguide/usage.rst index 6467b7ef5b..773c69325e 100644 --- a/source/adminguide/usage.rst +++ b/source/adminguide/usage.rst @@ -263,6 +263,23 @@ max.project.secondary.storage (GB) Maximum secondary storage space that can be Default is 400. =================================== ================================================================= +The administrator can also set limits for specific tagged host and storage +resources for an account or domain. Such tags must be specified in the following +global settings: + +- `resource.limit.host.tags` - A comma-separated list of tags for host resource limits. It applies to resource types - User VM, CPU, Memory. +- `resource.limit.storage.tags` - A comma-separated list of tags for storage resource limits. It applies to resource types - Volume, Primary storage. + +The limits for tagged resources are a subset of the overall limits and the maximum +can be the value of the overall limit for the particular resource type. + +|accountlimits.png| + +The administrator can view used and available capacity of such tagged resource +along with the overall capacities in the zone and cluster view in the UI. + +|zonecapacities.png| + User Permission ~~~~~~~~~~~~~~~ @@ -931,3 +948,7 @@ aggregation. .. |editbutton.png| image:: /_static/images/edit-icon.png :alt: edits the settings. +.. |accountlimits.png| image:: /_static/images/account-limits.png + :alt: Configure account resource limits in UI. +.. |zonecapacities.png| image:: /_static/images/zone-capacities.png + :alt: Resource capacities for a zone. diff --git a/source/adminguide/veeam_plugin.rst b/source/adminguide/veeam_plugin.rst index 8afb80c26d..eaa63632ef 100644 --- a/source/adminguide/veeam_plugin.rst +++ b/source/adminguide/veeam_plugin.rst @@ -13,12 +13,12 @@ specific language governing permissions and limitations under the License. -.. _Veeam Backup and Recovery Plugin: +.. _Veeam Backup and Replication Plugin: -Veeam Backup and Recovery Plugin +Veeam Backup and Replication Plugin ================================= -About the Veeam Backup and Recovery Plugin +About the Veeam Backup and Replication Plugin ------------------------------------------- There are a couple of important concepts to understand before working with the Veeam plugin. @@ -44,10 +44,10 @@ There are a couple of important concepts to understand before working with the V remaining image(s) -Installing Veeam Backup and Recovery for use with CloudStack +Installing Veeam Backup and Replication for use with CloudStack ------------------------------------------------------------- -The B&R Veeam plugin has been tested against Veeam Backup and Recovery 11 and 12. The +The B&R Veeam plugin has been tested against Veeam Backup and Replication 11 and 12. The enterprise edition is required for the Enterprise Manager API. The final tested version of Veeam was on a Windows Server 2019 (with desktop), although much of the development work was done against a Windows Server 2016 OS (with desktop). @@ -129,6 +129,7 @@ Plug-in specific settings: ======================================= ======================== Configuration Description ======================================= ======================== +backup.framework.provider.plugin The backup and recovery provider plugin. Set this to 'veeam'. backup.plugin.veeam.url Veeam B&R server URL. Default: http://:9398/api/ backup.plugin.veeam.version Veeam B&R server version. CloudStack will get Veeam server version via PowerShell commands if it is 0 or not set backup.plugin.veeam.username Veeam B&R server username. Default: administrator diff --git a/source/adminguide/virtual_machines.rst b/source/adminguide/virtual_machines.rst index 468ec2c533..007c016743 100644 --- a/source/adminguide/virtual_machines.rst +++ b/source/adminguide/virtual_machines.rst @@ -136,8 +136,9 @@ To create an Instance from a Template: #. Select a Template or ISO. For more information about how the Templates came to be in this list, see `*Working with Templates* `_. -#. Be sure that the hardware you have allows starting the selected - service offering. +#. Select a service offering. Be sure that the hardware you have allows starting the selected + service offering. If the selected template has a tag associated with it + then only supported service offerings will be available for the selection. #. Select a disk offering. @@ -204,6 +205,19 @@ following techniques: updateVirtualMachine API. After installing the tools and updating the Instance, stop and start the Instance. +Instance Metdata +~~~~~~~~~~~~~~~~ + +CloudStack provides different means for controlling an instance's metadata. + +- 'extraconfig' parameter of 'deployVirtualMachine' or 'updateVirtualMachine' API methods + can be used for setting different metadata parameters for an instance. +- Zone-level configurations - 'vm.metadata.manufacturer' and 'vm.metadata.product' can be used + to set the manufacturer and product respectively in the instance metadata. However, a + custom value for these parameters may affect cloud-init functionality for the instance + when used with CloudStack datasource. One of the requirement for cloud-init functionality + to work with CloudStack datasource is that product value should contain 'CloudStack'. + Accessing Instances ------------------- @@ -275,6 +289,14 @@ CloudStack's database. The reboot process does not do this. When starting an Instance, admin Users have the option to specify a pod, cluster, or host. +.. note:: + When starting an instance, it's possible to specify a host for deployment, + even if the host's tags don't match the instance's tags. This can lead to a + mismatch between the VM's tags and the host's tags, which may not be + desirable. + + To avoid this, refer to the :ref:`strict-host-tags` section + Deleting Instance ------------------ @@ -415,6 +437,47 @@ The following table explains how an Instance name is displayed in different scen represents the value of the global configuration - instance.name +Instance delete protection +-------------------------- + +CloudStack protects instances from accidental deletion using a delete protection +flag, which is false by default. When delete protection is enabled for an +instance, it cannot be deleted through the UI or API. It can only be deleted +after removing delete protection from the instance. + +Delete protection can be enabled for an instance via updateVirtualMachine API. + +.. code:: bash + + cmk update virtualmachine id= deleteprotection=true + +To remove delete protection, use the following command: + +.. code:: bash + + cmk update virtualmachine id= deleteprotection=false + +To enable/disable delete protection for an instance using the UI, follow these steps: + +#. Log in to the CloudStack UI as a User or admin. + +#. In the navigation menu on the left, click Instances under Compute. + +#. Choose the Instance for which you want to enable/disable delete protection. + +#. Click on the Edit button |EditButton.png| + +#. Toggle the Delete Protection switch to enable or disable delete protection. + +#. Click Ok button to save the changes. + +.. note:: + The instance delete protection is only considered when the instance is being + deleted through the UI or via `destroyVirtualMachine` or `expungeVirtualMachine` + API. If the domain/project is deleted, the instances under the domain/project + will be deleted irrespective of the delete protection status. + + Changing the Service Offering for an Instance --------------------------------------------- @@ -440,6 +503,11 @@ Instance, you can change the Instance's compute offering. #. Click OK. +.. note:: + When changing the service offering for an instance, it's possible to have a + mismatch of host tags which can be problematic. + + For more information on how to prevent this, see :ref:`strict-host-tags`. .. _cpu-and-memory-scaling: @@ -473,8 +541,8 @@ Dynamic CPU and RAM scaling can be used in the following cases: update them using the following procedure. -Updating Existing Instances -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Enable Dynamic Scaling for Existing Instances +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you are upgrading from a previous version of CloudStack, and you want your existing Instances created with previous versions to have the dynamic @@ -636,6 +704,12 @@ To manually live migrate an Instance where i in [0,..,N] and N = number of volumes of the Instance +.. note:: + During live migration, there can be a mismatch between the instance's tags + with the destination host's tags which might be undesirable. + + For more details on how to prevent this, see :ref:`strict-host-tags`. + Moving Instance's Volumes Between Storage Pools (offline volume Migration) -------------------------------------------------------------------------- @@ -974,7 +1048,7 @@ like many other resources in CloudStack. KVM supports Instance Snapshots when using NFS shared storage. If raw block storage is used (i.e. Ceph), then Instance Snapshots are not possible, since there is no possibility to write RAM memory content anywhere. In such cases you can use as an alternative -`Storage-based VM Snapshots on KVM`_ +:ref:`Storage-based-Instance-Snapshots-on-KVM`. If you need more information about Instance Snapshots on VMware, check out the @@ -983,7 +1057,7 @@ VMware documentation and the VMware Knowledge Base, especially `_. -.. _`Storage-based Instance Snapshots on KVM`: +.. _Storage-based-Instance-Snapshots-on-KVM: Storage-based Instance Snapshots on KVM --------------------------------------- diff --git a/source/adminguide/virtual_machines/importing_unmanaging_vms.rst b/source/adminguide/virtual_machines/importing_unmanaging_vms.rst index ec158385b7..b15c9db653 100644 --- a/source/adminguide/virtual_machines/importing_unmanaging_vms.rst +++ b/source/adminguide/virtual_machines/importing_unmanaging_vms.rst @@ -14,13 +14,13 @@ under the License. About Import Export Instances -------------------------- +----------------------------- For certain hypervisors, CloudStack supports importing of Instances from Managed Hosts, External Hosts, Local Storage and Shared Storage, into CloudStack. Manage or Unmanage Instances on Managed Hosts -------------------------- +--------------------------------------------- .. note:: This is currently only available for **vSphere** and **KVM** clusters. @@ -72,7 +72,7 @@ Listing unmanaged Instances --------------------------- Prerequisites to list unmanaged Instances (vSphere or KVM) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order for CloudStack to list the Instances that are not managed by CloudStack on a host/cluster, the instances must exist on the hosts that are already part to the CloudStack. @@ -148,7 +148,7 @@ importUnmanagedInstance API - **migrateallowed** (Instance and its volumes are allowed to migrate to different host/storage pool when offering tags conflict with host/storage pool) - **forced** (If true, an Instance is imported despite some of its NIC's MAC addresses being already present) -.. note:: The `forced` parameter is false by default and thus prevents importing an Instance which has a NIC containing a MAC address that has been previously assigned by CloudStack to another existing VM. If it is set to true, importing a VM with such already-used MAC addresses of the NICS will be allowed. This should be done with a full understanding of possible consequences due to duplicate MAC addresses. +.. note:: The `forced` parameter is false by default and thus prevents importing an Instance which has a NIC containing a MAC address that has been previously assigned by CloudStack to another existing VM. If it is set to true, importing a VM with such already-used MAC addresses of the NICS will be allowed, however, the original MAC address will be replaced with a newly generated MAC address. **Response**: @@ -407,7 +407,8 @@ Unmanaging Instance actions - For the Instance being unmanaged: stopped and destroyed usage events (similar to the generated usage events when expunging an Instance), with types: ‘VM.STOP’ and ‘VM.DESTROY', unless the instance has been already stopped before being unmanaged and in this case only ‘VM.DESTROY' is generated. Import Instances from External Hosts -------------------------- +------------------------------------ + .. note:: This is currently only available for **KVM** hypervisor. External Host @@ -496,7 +497,7 @@ choose the temporary storage location on the external host for the converted fil - **details** (Map for Instance details) - **forced** (If true, an Instance is imported despite some of its NIC's MAC addresses being already present) -.. note:: The `forced` parameter is false by default and thus prevents importing an Instance which has a NIC containing a MAC address that has been previously assigned by CloudStack to another existing VM. If it is set to true, importing a VM with such already-used MAC addresses of the NICS will be allowed. This should be done with a full understanding of possible consequences due to duplicate MAC addresses. +.. note:: The `forced` parameter is false by default and thus prevents importing an Instance which has a NIC containing a MAC address that has been previously assigned by CloudStack to another existing VM. If it is set to true, importing a VM with such already-used MAC addresses of the NICS will be allowed, however, the original MAC address will be replaced with a newly generated MAC address. **Response**: @@ -504,7 +505,7 @@ choose the temporary storage location on the external host for the converted fil Same response as that of deployVirtualMachine API. Import Instances from Local/Shared Storage ----------------------------------------- +------------------------------------------ .. note:: This is currently only available for **KVM** hypervisor. @@ -540,7 +541,7 @@ The importVm API is utilized to create instances using QCOW2 file from an existi Same response as that of deployVirtualMachine API. Import Instances from Shared Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The importVm API is utilized to create instances using QCOW2 file from an existing Shared Storage pool of a KVM cluster within the CloudStack infrastructure. Only NFS Storage Pool are supported. diff --git a/source/adminguide/virtual_machines/user-data.rst b/source/adminguide/virtual_machines/user-data.rst index 1c715e1235..9077dd9a55 100644 --- a/source/adminguide/virtual_machines/user-data.rst +++ b/source/adminguide/virtual_machines/user-data.rst @@ -45,7 +45,7 @@ To register a new userdata: .. image:: /_static/images/register_userdata.png :width: 400px :align: center - :alt: Regiser userdata dialog box + :alt: Register userdata dialog box If userdata content has variables declared in it, user can register the Userdata with userdata parameters. @@ -65,7 +65,7 @@ Userdata has to be registered with userdata parameter "variable1" like below .. image:: /_static/images/register_userdata_with_variables.png :width: 400px :align: center - :alt: Regiser userdata with variables dialog box + :alt: Register userdata with variables dialog box If the variables in userdata content are of a predefined metadata like "public_hostname" or "instance_id", then userdata parameters should not declare these variables. That is @@ -250,7 +250,7 @@ This example uses cloud-init to automatically update all OS packages on the firs package_upgrade: true EOF -#. Deploy an instance with this user-data either by providing the UUID of the registerd userdata +#. Deploy an instance with this user-data either by providing the UUID of the registered userdata or by providing base64 encoded userdata: .. code:: bash diff --git a/source/conceptsandterminology/concepts.rst b/source/conceptsandterminology/concepts.rst index 141ec0df70..f621ffaa6d 100644 --- a/source/conceptsandterminology/concepts.rst +++ b/source/conceptsandterminology/concepts.rst @@ -174,7 +174,7 @@ Resources within the cloud are managed as follows: - Pods: A pod is usually a rack, or row of racks that includes a layer-2 switch and one or more clusters. -- Clusters: A cluster consists of one or more homogenous hosts and primary +- Clusters: A cluster consists of one or more homogeneous hosts and primary storage. - Host: A single compute node within a cluster; often a hypervisor. @@ -535,6 +535,16 @@ Users can create buckets within the object storage pool. The basic storage units of Object Store are objects. Any type of data, regardless of content type, is stored as an object. Buckets are logical containers for storing objects. +About Shared FileSystems +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +CloudStack users can setup CloudStack managed Shared FileSystems which can be mounted via NFS. +Users can choose the service offering, disk offering, filesystem format and network. +The Shared FileSystem is deployed on an Instance with the specified service offering. +A data volume is created using the given disk offering and attached to the Instance. +User can specify which filesystem to use (XFS, EXT4). +The filesystem is created on the data volume and exported via NFS. +All Instances in the guest network can mount and read/write to the Shared FileSystem. + About Physical Networks ~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/source/conceptsandterminology/locale/pot/administration_guide.pot b/source/conceptsandterminology/locale/pot/administration_guide.pot index 6610222d29..19f84e4f2d 100644 --- a/source/conceptsandterminology/locale/pot/administration_guide.pot +++ b/source/conceptsandterminology/locale/pot/administration_guide.pot @@ -779,7 +779,7 @@ msgstr "" #: ../../administration_guide.rst:331 # 3748d1e9df464087a1ffe1ff96240883 -msgid "Then you may power down the Host, re-use its IP address, re-install it, etc" +msgid "Then you may power down the Host, reuse its IP address, re-install it, etc" msgstr "" #: ../../administration_guide.rst:334 @@ -995,7 +995,7 @@ msgstr "" #: ../../administration_guide.rst:434 # 0e804d9f17db4c8a981a54fd41556317 -msgid "Fill in your desired over-provisioning multipliers in the fields CPU overcommit factor and RAM overcommit factor. The value which is intially shown in these fields is the default value inherited from the global configuration settings." +msgid "Fill in your desired over-provisioning multipliers in the fields CPU overcommit factor and RAM overcommit factor. The value which is initially shown in these fields is the default value inherited from the global configuration settings." msgstr "" #: ../../administration_guide.rst:437 diff --git a/source/conceptsandterminology/locale/pot/concepts.pot b/source/conceptsandterminology/locale/pot/concepts.pot index de31e099db..874ba784c0 100644 --- a/source/conceptsandterminology/locale/pot/concepts.pot +++ b/source/conceptsandterminology/locale/pot/concepts.pot @@ -248,7 +248,7 @@ msgstr "" #: ../../concepts.rst:134 # d33af50f42ee45deb987f9d2ade53d3b -msgid "Clusters: A cluster consists of one or more homogenous hosts and primary storage." +msgid "Clusters: A cluster consists of one or more homogeneous hosts and primary storage." msgstr "" #: ../../concepts.rst:135 diff --git a/source/conceptsandterminology/locale/pot/dev.pot b/source/conceptsandterminology/locale/pot/dev.pot index b83641edcd..a75b00a1cc 100644 --- a/source/conceptsandterminology/locale/pot/dev.pot +++ b/source/conceptsandterminology/locale/pot/dev.pot @@ -185,7 +185,7 @@ msgstr "" #: ../../dev.rst:136 # d443f5ac0cce419f98b4469aa4c74efc -msgid "To show how to sign a request, we will re-use the previous example." +msgid "To show how to sign a request, we will reuse the previous example." msgstr "" #: ../../dev.rst:140 diff --git a/source/conceptsandterminology/locale/pot/developer_guide.pot b/source/conceptsandterminology/locale/pot/developer_guide.pot index 0c8252b250..7dfd0042fc 100644 --- a/source/conceptsandterminology/locale/pot/developer_guide.pot +++ b/source/conceptsandterminology/locale/pot/developer_guide.pot @@ -323,7 +323,7 @@ msgstr "" #: ../../developer_guide.rst:334 # d99c1e63a66443539d663a0cba7beca0 -msgid "The Installing from source section will only get you to the point of runnign the management server, it does not get you any hypervisors. The simulator section gets you a simulated datacenter for testing. With DevCloud you can run at least one hypervisor and add it to your management server the way you would a real physical machine." +msgid "The Installing from source section will only get you to the point of running the management server, it does not get you any hypervisors. The simulator section gets you a simulated datacenter for testing. With DevCloud you can run at least one hypervisor and add it to your management server the way you would a real physical machine." msgstr "" #: ../../developer_guide.rst:340 @@ -443,12 +443,12 @@ msgstr "" #: ../../developer_guide.rst:460 # 7c104d4cd0fe475e863b91f53449a5c5 -msgid "The CloudStack API is a query based API using http that return results in XML or JSON. It is used to implement the default web UI. This API is not a standard like `OGF OCCI `__ or `DMTF CIMI `__ but is easy to learn. Mapping exists between the AWS API and the CloudStack API as will be seen in the next section. Recently a Google Compute Engine interface was also developed that maps the GCE REST API to the CloudStack API described here. The API `docs `__ are a good start to learn the extent of the API. Multiple clients exist on `github `__ to use this API, you should be able to find one in your favorite language. The reference documentation for the API and changes that might occur from version to version is availble `on-line `__. This short section is aimed at providing a quick summary to give you a base understanding of how to use this API. As a quick start, a good way to explore the API is to navigate the dashboard with a firebug console (or similar developer console) to study the queries." +msgid "The CloudStack API is a query based API using http that return results in XML or JSON. It is used to implement the default web UI. This API is not a standard like `OGF OCCI `__ or `DMTF CIMI `__ but is easy to learn. Mapping exists between the AWS API and the CloudStack API as will be seen in the next section. Recently a Google Compute Engine interface was also developed that maps the GCE REST API to the CloudStack API described here. The API `docs `__ are a good start to learn the extent of the API. Multiple clients exist on `GitHub `__ to use this API, you should be able to find one in your favorite language. The reference documentation for the API and changes that might occur from version to version is available `on-line `__. This short section is aimed at providing a quick summary to give you a base understanding of how to use this API. As a quick start, a good way to explore the API is to navigate the dashboard with a firebug console (or similar developer console) to study the queries." msgstr "" #: ../../developer_guide.rst:481 # 0bcb8dd851254f9b9b0240917b405d84 -msgid "In a succint statement, the CloudStack query API can be used via http GET requests made against your cloud endpoint (e.g http://localhost:8080/client/api). The API name is passed using the ``command`` key and the various parameters for this API call are passed as key value pairs. The request is signed using the access key and secret key of the user making the call. Some calls are synchronous while some are asynchronous, this is documented in the API `docs `__. Asynchronous calls return a ``jobid``, the status and result of a job can be queried with the ``queryAsyncJobResult`` call. Let's get started and give an example of calling the ``listUsers`` API in Python." +msgid "In a succinct statement, the CloudStack query API can be used via http GET requests made against your cloud endpoint (e.g http://localhost:8080/client/api). The API name is passed using the ``command`` key and the various parameters for this API call are passed as key value pairs. The request is signed using the access key and secret key of the user making the call. Some calls are synchronous while some are asynchronous, this is documented in the API `docs `__. Asynchronous calls return a ``jobid``, the status and result of a job can be queried with the ``queryAsyncJobResult`` call. Let's get started and give an example of calling the ``listUsers`` API in Python." msgstr "" #: ../../developer_guide.rst:493 @@ -458,7 +458,7 @@ msgstr "" #: ../../developer_guide.rst:504 # f06d2f79776845b69c69945a988dc02d -msgid "Open a Python shell and import the basic modules necessary to make the request. Do note that this request could be made many different ways, this is just a low level example. The ``urllib*`` modules are used to make the http request and do url encoding. The ``hashlib`` module gives us the sha1 hash function. It used to geenrate the ``hmac`` (Keyed Hashing for Message Authentication) using the secretkey. The result is encoded using the ``base64`` module." +msgid "Open a Python shell and import the basic modules necessary to make the request. Do note that this request could be made many different ways, this is just a low level example. The ``urllib*`` modules are used to make the http request and do url encoding. The ``hashlib`` module gives us the sha1 hash function. It used to generate the ``hmac`` (Keyed Hashing for Message Authentication) using the secretkey. The result is encoded using the ``base64`` module." msgstr "" #: ../../developer_guide.rst:524 @@ -483,12 +483,12 @@ msgstr "" #: ../../developer_guide.rst:586 # 34385cfcf355434bbfec59e948728211 -msgid "All the clients that you will find on github will implement this signature technique, you should not have to do it by hand. Now that you have explored the API through the UI and that you understand how to make low level calls, pick your favorite client of use `CloudMonkey `__. CloudMonkey is a sub-project of Apache CloudStack and gives operators/developers the ability to use any of the API methods. It has nice auto-completion and help feature as well as an API discovery mechanism since 4.2." +msgid "All the clients that you will find on GitHub will implement this signature technique, you should not have to do it by hand. Now that you have explored the API through the UI and that you understand how to make low level calls, pick your favorite client of use `CloudMonkey `__. CloudMonkey is a sub-project of Apache CloudStack and gives operators/developers the ability to use any of the API methods. It has nice auto-completion and help feature as well as an API discovery mechanism since 4.2." msgstr "" #: ../../developer_guide.rst:598 # 4bf1c04a89934aca9d24a5a585888739 -msgid "While the native CloudStack API is not a standard, CloudStack provides a AWS EC2 compatible interface. It has the great advantage that existing tools written with EC2 libraries can be re-used against a CloudStack based cloud. In the installation books we described how to run this interface from installing packages. In this section we show you how to compile the interface with ``maven`` and test it with Python boto module." +msgid "While the native CloudStack API is not a standard, CloudStack provides a AWS EC2 compatible interface. It has the great advantage that existing tools written with EC2 libraries can be reused against a CloudStack based cloud. In the installation books we described how to run this interface from installing packages. In this section we show you how to compile the interface with ``maven`` and test it with Python boto module." msgstr "" #: ../../developer_guide.rst:606 diff --git a/source/conceptsandterminology/locale/pot/networking.pot b/source/conceptsandterminology/locale/pot/networking.pot index edb353bc1d..8655112ad7 100644 --- a/source/conceptsandterminology/locale/pot/networking.pot +++ b/source/conceptsandterminology/locale/pot/networking.pot @@ -744,7 +744,7 @@ msgstr "" #: ../../networking/nicira-plugin.rst:7 # 0c134c3d04de4ba49f31e0c3f0108144 -msgid "The Nicira NVP plugin adds Nicira NVP as one of the available SDN implementations in CloudStack. With the plugin an exisiting Nicira NVP setup can be used by CloudStack to implement isolated guest networks and to provide additional services like routing and NAT." +msgid "The Nicira NVP plugin adds Nicira NVP as one of the available SDN implementations in CloudStack. With the plugin an existing Nicira NVP setup can be used by CloudStack to implement isolated guest networks and to provide additional services like routing and NAT." msgstr "" #: ../../networking/nicira-plugin.rst:13 @@ -1810,7 +1810,7 @@ msgstr "" #: ../../networking/vxlan.rst:152 # 6856ea7ca74549fb91a97823fda39f40 -msgid "This plugin requires an IPv4 address on the KVM host to terminate and originate VXLAN traffic. The address should be assinged to a physical interface or a bridge interface bound to a physical interface. Both a private address or a public address are fine for the purpose. It is not required to be in the same subnet for all hypervisors in a zone, but they should be able to reach each other via IP multicast with UDP/8472 port. A name of a physical interface or a name of a bridge interface bound to a physical interface can be used as a traffic label. Physical interface name fits for almost all cases, but if physical interface name differs per host, you may use a bridge to set a same name. If you would like to use a bridge name as a traffic label, you may create a bridge in this way." +msgid "This plugin requires an IPv4 address on the KVM host to terminate and originate VXLAN traffic. The address should be assigned to a physical interface or a bridge interface bound to a physical interface. Both a private address or a public address are fine for the purpose. It is not required to be in the same subnet for all hypervisors in a zone, but they should be able to reach each other via IP multicast with UDP/8472 port. A name of a physical interface or a name of a bridge interface bound to a physical interface can be used as a traffic label. Physical interface name fits for almost all cases, but if physical interface name differs per host, you may use a bridge to set a same name. If you would like to use a bridge name as a traffic label, you may create a bridge in this way." msgstr "" #: ../../networking/vxlan.rst:165 @@ -1866,7 +1866,7 @@ msgstr "" #: ../../networking/vxlan.rst:298 # 494b6216726d4b41ac1143c03c93ede2 -msgid "These iptable settings are not persistent accross reboots, we have to save them first." +msgid "These iptable settings are not persistent across reboots, we have to save them first." msgstr "" #: ../../networking/vxlan.rst:306 diff --git a/source/conceptsandterminology/network_setup.rst b/source/conceptsandterminology/network_setup.rst index 467130a270..eb4dd13425 100644 --- a/source/conceptsandterminology/network_setup.rst +++ b/source/conceptsandterminology/network_setup.rst @@ -668,7 +668,7 @@ offering as follows: #. Log in to the CloudStack UI as a user or admin. -#. Naviagte to Service Offerings and choose Network OfferingPublic IP Addresses. +#. Navigate to Service Offerings and choose Network OfferingPublic IP Addresses. #. Click Add Network Offering. diff --git a/source/conf.py b/source/conf.py index ae4b057a87..277e874e1e 100644 --- a/source/conf.py +++ b/source/conf.py @@ -24,9 +24,9 @@ author = 'Apache CloudStack Project' # The short X.Y version -version = '4.19' +version = '4.20' # The full version, including alpha/beta/rc tags -release = '4.19.1.0' +release = '4.20.0.0' rst_epilog = """ .. include:: /_global.rst diff --git a/source/developersguide/ansible.rst b/source/developersguide/ansible.rst index a91e71195a..f6bd73a0df 100644 --- a/source/developersguide/ansible.rst +++ b/source/developersguide/ansible.rst @@ -190,7 +190,7 @@ the file will look like this: yum: name=libselinux-python state=present - - name: Ensure cloudstack specfic my.cnf lines are present + - name: Ensure cloudstack specific my.cnf lines are present lineinfile: dest=/etc/my.cnf regexp=’$item’ insertafter=”symbolic-links=0″ line=’$item’ @@ -294,6 +294,10 @@ For the management server role we create a main.yml task like this: Save this as `/etc/ansible/roles/cloudstack-management/tasks/main.yml` +.. note:: In a production environment, selinux should be set to enforcing + and the necessary selinux policies are created to allow the + services to run. + Now we have some new elements to deal with. The Ansible Template module uses Jinja2 based templating.  As we’re doing a simplified example here, the Jinja Template for the cloudstack.repo won’t have any variables in @@ -350,13 +354,13 @@ storage.  The playbook for this would look as follows: - name: Seed secondary storage   command: - /usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt -m {{ tmp\_nfs\_path }} -u http://download.cloud.com/templates/4.2/systemvmtemplate-2013-06-12-master-kvm.qcow2.bz2 -h kvm -F + /usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt -m {{ tmp\_nfs\_path }} -u http://download.cloudstack.org/templates/4.2/systemvmtemplate-2013-06-12-master-kvm.qcow2.bz2 -h kvm -F   command: - /usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt -m {{ tmp\_nfs\_path }} -u http://download.cloud.com/templates/4.2/systemvmtemplate-2013-07-12-master-xen.vhd.bz2 -h xenserver -F + /usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt -m {{ tmp\_nfs\_path }} -u http://download.cloudstack.org/templates/4.2/systemvmtemplate-2013-07-12-master-xen.vhd.bz2 -h xenserver -F   command: - /usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt -m {{ tmp\_nfs\_path }} -u http://download.cloud.com/templates/4.2/systemvmtemplate-4.2-vh7.ov -h vmware -F + /usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt -m {{ tmp\_nfs\_path }} -u http://download.cloudstack.org/templates/4.2/systemvmtemplate-4.2-vh7.ov -h vmware -F Save this as `/etc/ansible/roles/cloudstack-manager/tasks/seedstorage.yml` diff --git a/source/developersguide/dev.rst b/source/developersguide/dev.rst index e85b943646..83847753a4 100644 --- a/source/developersguide/dev.rst +++ b/source/developersguide/dev.rst @@ -132,7 +132,7 @@ have both the API Key and Secret Key provided by the CloudStack administrator for your Account before proceeding with the signing process. -To show how to sign a request, we will re-use the previous example. +To show how to sign a request, we will reuse the previous example. .. parsed-literal:: diff --git a/source/developersguide/developer_guide.rst b/source/developersguide/developer_guide.rst index 262d06947f..9309c29c0f 100644 --- a/source/developersguide/developer_guide.rst +++ b/source/developersguide/developer_guide.rst @@ -204,7 +204,7 @@ Using Appliance for development ------------------------------- The Installing from source section will only get you to the point of -runnign the management server, it does not get you any hypervisors. The +running the management server, it does not get you any hypervisors. The simulator section gets you a simulated datacenter for testing. An appliance based development such as using ``mbx`` can allow you to run at least one hypervisor and add it to your management server the way you would a real physical machine. @@ -278,17 +278,17 @@ also developed that maps the GCE REST API to the CloudStack API described here. The API `docs `__ are a good start to learn the extent of the API. Multiple clients exist on -`github `__ +`GitHub `__ to use this API, you should be able to find one in your favorite language. The reference documentation for the API and changes that might -occur from version to version is availble +occur from version to version is available `on-line `__. This short section is aimed at providing a quick summary to give you a base understanding of how to use this API. As a quick start, a good way to explore the API is to navigate the dashboard with a firebug console (or similar developer console) to study the queries. -In a succint statement, the CloudStack query API can be used via http +In a succinct statement, the CloudStack query API can be used via http GET requests made against your cloud endpoint (e.g http://localhost:8080/client/api). The API name is passed using the ``command`` key and the various parameters for this API call are passed @@ -315,7 +315,7 @@ Open a Python shell and import the basic modules necessary to make the request. Do note that this request could be made many different ways, this is just a low level example. The ``urllib*`` modules are used to make the http request and do url encoding. The ``hashlib`` module gives -us the sha1 hash function. It used to geenrate the ``hmac`` (Keyed +us the sha1 hash function. It used to generate the ``hmac`` (Keyed Hashing for Message Authentication) using the secretkey. The result is encoded using the ``base64`` module. @@ -408,7 +408,7 @@ and the signature. Then do an http GET: } } -All the clients that you will find on github will implement this +All the clients that you will find on GitHub will implement this signature technique, you should not have to do it by hand. Now that you have explored the API through the UI and that you understand how to make low level calls, pick your favorite client of use diff --git a/source/developersguide/index.rst b/source/developersguide/index.rst index 85ac8821c0..842f4732f0 100644 --- a/source/developersguide/index.rst +++ b/source/developersguide/index.rst @@ -24,7 +24,7 @@ Developers Guide ================ -This is the Apache CloudStack developers guide. This section gives information for those wishing to develop CloudStack either contributing to the CloudStack core software or writing external plugins. Futher information can also be found at CloudStack's wiki https://cwiki.apache.org/confluence/display/CLOUDSTACK/Home and on the CloudStack mailing lists http://cloudstack.apache.org/mailing-lists.html +This is the Apache CloudStack developers guide. This section gives information for those wishing to develop CloudStack either contributing to the CloudStack core software or writing external plugins. Further information can also be found at CloudStack's wiki https://cwiki.apache.org/confluence/display/CLOUDSTACK/Home and on the CloudStack mailing lists http://cloudstack.apache.org/mailing-lists.html .. toctree:: :maxdepth: 2 diff --git a/source/installguide/building_from_source.rst b/source/installguide/building_from_source.rst index 87e720359b..0708418ee8 100644 --- a/source/installguide/building_from_source.rst +++ b/source/installguide/building_from_source.rst @@ -267,7 +267,7 @@ several other dependencies. Note that we recommend using Maven 3. While we have defined, and you have presumably already installed the bootstrap prerequisites, there are a number of build time prerequisites that need to be resolved. CloudStack uses maven for dependency -resolution. You can resolve the buildtime depdencies for CloudStack by +resolution. You can resolve the buildtime dependencies for CloudStack by running: .. parsed-literal:: @@ -483,9 +483,9 @@ Generating RPMs is done using the ``package.sh`` script: .. parsed-literal:: - $ ./package.sh -d centos63 + $ ./package.sh -d el8 -For other supported options(like centos7), run ``./package.sh --help`` +For other supported options, run ``./package.sh --help`` That will run for a bit and then place the finished packages in ``dist/rpmbuild/RPMS/x86_64/``. @@ -568,7 +568,7 @@ to build from source. page on the wiki. #. You may also need to download - `vhd-util `_, + `vhd-util `_, which was removed due to licensing issues. You'll copy vhd-util to the ``scripts/vm/hypervisor/xenserver/`` directory. diff --git a/source/installguide/configuration.rst b/source/installguide/configuration.rst index 5d11da5916..de57b0b7b3 100644 --- a/source/installguide/configuration.rst +++ b/source/installguide/configuration.rst @@ -277,7 +277,7 @@ and secondary storage. VPN, or load balancer support. - **Security Groups.** You can choose to enable Security Groups in your zone. - For further informations regarding Security Groups and there prequesits + For further information regarding Security Groups and there prequesits please refer to the Security Groups section in the documentation. #. The rest of the steps differ depending on whether you chose Basic or @@ -671,7 +671,7 @@ Core Zone - **Host Name.** (Obligatory) The DNS name or IP address of the host. - - **Username.** (Obligatory) Username of a user who has administrator / root privilidges on + - **Username.** (Obligatory) Username of a user who has administrator / root privileges on the specified host (using Linux-hosts usually root). - **Password.** (Obligatory) This is the password for the user named above (from @@ -807,7 +807,7 @@ To work with limited compute resources, an Edge zone will not deploy system VMs. - **Host Name.** (Obligatory) The DNS name or IP address of the host. - - **Username.** (Obligatory) Username of a user who has administrator / root privilidges on the specified host (using Linux-hosts usually root). + - **Username.** (Obligatory) Username of a user who has administrator / root privileges on the specified host (using Linux-hosts usually root). - **Authentication.** Atuthentication type used for the host, either Password or System SSH Key. @@ -873,6 +873,9 @@ You need to tell CloudStack about the hosts that it will manage. Hosts exist inside clusters, so before you begin adding hosts to the cloud, you must add at least one cluster. +.. note:: + Since CloudStack 4.20.0, it is possible to specify the hosts arch type which must be homogeneous within the cluster. AMD 64 bits (x86_64) and ARM 64 bits (aarch64) arch types are supported. The pre-existing clusters are set to arch type AMD 64 bits as well as new clusters in which the arch type is not specified. + Add Cluster: KVM or XenServer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -891,6 +894,8 @@ hosts and logged in to the CloudStack UI. #. Choose the hypervisor type for this cluster. +#. Choose the arch type of the hosts within the cluster. + #. Choose the pod in which you want to create the cluster. #. Enter a name for the cluster. This can be text of your choosing and @@ -1002,7 +1007,7 @@ XenServer and KVM hosts can be added to a cluster at any time. Requirements for XenServer and KVM Hosts -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +**************************************** .. warning:: Make sure the hypervisor host does not have any instances already running before @@ -1010,7 +1015,7 @@ Requirements for XenServer and KVM Hosts Configuration requirements: -- Each cluster must contain only hosts with the identical hypervisor. +- Each cluster must contain only hosts with the identical hypervisor and arch type. - For XenServer, do not put more than 8 hosts in a cluster. @@ -1019,9 +1024,11 @@ Configuration requirements: For hardware requirements, see the installation section for your hypervisor in the CloudStack Installation Guide. +.. note:: + Since CloudStack 4.20.0, the host arch type is auto detected when adding the host into CloudStack and it must match the cluster arch type for the operation to succeed. XenServer Host Additional Requirements -'''''''''''''''''''''''''''''''''''''' +************************************** If network bonding is in use, the administrator must cable the new host identically to other hosts in the cluster. @@ -1055,7 +1062,7 @@ bonds on the new hosts in the cluster. KVM Host Additional Requirements -'''''''''''''''''''''''''''''''' +******************************** - If shared mountpoint storage is in use, the administrator should ensure that the new host has all the same mountpoints (with storage @@ -1077,7 +1084,7 @@ KVM Host Additional Requirements defaults:cloudstack !requiretty Adding a XenServer Host -^^^^^^^^^^^^^^^^^^^^^^^ +*********************** #. If you have not already done so, install the hypervisor software on the host. You will need to know which version of the hypervisor @@ -1121,7 +1128,7 @@ Adding a XenServer Host Adding a KVM Host -^^^^^^^^^^^^^^^^^ +***************** The steps to add a KVM host are same as adding a XenServer Host as mentioned in the above section. @@ -1312,7 +1319,7 @@ ever one) CloudStack volume, so performance of the CloudStack volume does not vary depending on how heavily other tenants are using the system. -The createStoragePool API has been augmented to support plugable storage +The createStoragePool API has been augmented to support pluggable storage providers. The following is a list of parameters to use when adding storage to CloudStack that is based on the SolidFire plug-in: @@ -1384,7 +1391,7 @@ so performance of the CloudStack volume does not vary depending on how heavily o tenants are using the system. This volume migration is supported across PowerFlex storage pools only, which are on same or distinct storage instance. -The createStoragePool API has been augmented to support plugable storage +The createStoragePool API has been augmented to support pluggable storage providers. The following is a list of parameters to use when adding storage to CloudStack that is based on the PowerFlex plug-in: @@ -1406,7 +1413,7 @@ storage to CloudStack that is based on the PowerFlex plug-in: - url=[storage pool url] -The url parameter contains the PowerFlex storage pool details, specifed +The url parameter contains the PowerFlex storage pool details, specified in the following format: powerflex://:@/ @@ -1435,12 +1442,12 @@ leads to the dynamic creation of a StorPool volume, which has guaranteed performance. Such a StorPool volume is associated with one CloudStack volume, so performance of the CloudStack volume does not vary depending on how heavily other tenants are using the system. The volume migration is supported -accross non-managed storage pools (e.g. NFS/Local storage/Ceph) to StorPool, and -accross StorPool storage pools. +across non-managed storage pools (e.g. NFS/Local storage/Ceph) to StorPool, and +across StorPool storage pools. More technical details could be found on `StorPool Knowledge Base `_. -The createStoragePool API has been augmented to support plugable storage providers. +The createStoragePool API has been augmented to support pluggable storage providers. The following is a list of parameters to use when adding storage to CloudStack that is based on the StorPool plug-in: command=createStoragePool @@ -1482,7 +1489,7 @@ This documentation assumes you have the following configured in your environment - FiberChannel fabric and connectivity to every KVM host where volumes be attached to virtual machines. - Host definitions in the Primera Array that match the name of the hostwill in CloudStack. This can be fully-qualified or just the hostname. - Hostset defined to match the group of hosts associated with the Cloudstack cluster. -- Username and password to access the API with at least Edit privleges. +- Username and password to access the API with at least Edit privileges. - CPG (Common Provisioning Group) defined in the HPE Primera storage system where volumes and snapshots can be provisioned. When this storage pool is used with Compute or Disk Offerings, an administrator is @@ -1495,7 +1502,7 @@ HPE Primera Storage provider implementations, between HPE Primera Storage Pools NFS Storage Pools, and between other providers that support cross-provider volume migration. The createStoragePool API can be used to configure an HPE Primera storage pool with the -following paramaters: +following parameters: - command=createStoragePool - scope=[zone | cluster]. Note this must match your Hostset configuration (below) @@ -1505,10 +1512,10 @@ following paramaters: - name=[name for primary storage] - hypervisor=KVM - provider=Primera -- capacitybytes=The total capacity bytes avialable to the pool (before overprovisioning configuration is applied). If provided, this must be less than the total available capacity of the CPG on the storage system. If its not provided, defaults to the CPG maximum space. +- capacitybytes=The total capacity bytes available to the pool (before overprovisioning configuration is applied). If provided, this must be less than the total available capacity of the CPG on the storage system. If its not provided, defaults to the CPG maximum space. - url=[url to storage system] -The url parameter contains the HPE Primera storage pool details, specifed +The url parameter contains the HPE Primera storage pool details, specified in the following format: https://:@:/api/v1?cpg=&hostset=&api_skiptlsvalidation=" @@ -1526,7 +1533,7 @@ When a volume is created by the plugin, it will create bi-directional mappings i - vol: A root or data volume - snap: A snapshot volume - tpl: A template spooled to the storage device -- Each volume's description field in the HPE Primera storage system will have a formatted key/value pair with metadata mappings for the Cloudstack volume defintion (user volume name, volume uuid, account/project information) +- Each volume's description field in the HPE Primera storage system will have a formatted key/value pair with metadata mappings for the Cloudstack volume definition (user volume name, volume uuid, account/project information) - Each virtual volume's WWID will be stored in the volume's path field in Cloudstack Pure Flasharray API @@ -1540,7 +1547,7 @@ This documentation assumes you have the following configured in your environment - FiberChannel fabric and connectivity to every KVM host where volumes will be attached to virtual machines. - Host definitions in the Pure Flasharray that match the name of the host in CloudStack. This can be fully-qualified or just the hostname. - Hostgroup defined to match the group of hosts associated with the Cloudstack cluster. -- Username and password to access the API with at least Edit privleges. +- Username and password to access the API with at least Edit privileges. - Pure Flasharray pod defined in the HPE Primera storage system where volumes and snapshots can be provisioned. NOTE: This "pod" is not the same as a "pod" in Cloudstack. When this storage pool is used with Compute or Disk Offerings, an administrator is @@ -1553,7 +1560,7 @@ Pure Flasharray Storage provider implementations, between Pure Flasharray Storag NFS Storage Pools, and between other providers that support cross-provider volume migration. The createStoragePool API can be used to configure an Pure Flasharray storage pool with the -following paramaters: +following parameters: - command=createStoragePool - scope=[zone | cluster]. Note this must match your Hostset configuration (below) @@ -1566,7 +1573,7 @@ following paramaters: - capacitybytes=The total capacity bytes available to the pool (before overprovisioning configuration is applied). If provided, this must be less than the total available capacity of the Flasharray pod on the storage system. If its not provided, defaults to the Flasharray pod maximum space. - url=[url to storage system] -The url parameter contains the Pure Flasharray storage pool details, specifed +The url parameter contains the Pure Flasharray storage pool details, specified in the following format: https://:@:/api?pod=&hostgroup=&api_skiptlsvalidation=" @@ -1585,7 +1592,7 @@ When a volume is created by the plugin, it will create bi-directional mappings i - vol: A root or data volume - snap: A snapshot volume - tpl: A template spooled to the storage device -- Each volume's description field in the Pure Flasharray storage system will have a formatted key/value pair with metadata mappings for the Cloudstack volume defintion (user volume name, volume uuid, account/project information) +- Each volume's description field in the Pure Flasharray storage system will have a formatted key/value pair with metadata mappings for the Cloudstack volume definition (user volume name, volume uuid, account/project information) - Each virtual volume's WWID will be stored in the volume's path field in Cloudstack .. _add-secondary-storage: @@ -1712,7 +1719,7 @@ zone: - Path. The path to the zone's Secondary Staging Store. -Adding Object Storage +Add Object Storage ~~~~~~~~~~~~~~~~~~~~~~~~ You can add object storage pools at any time to add more capacity or providers to CloudStack diff --git a/source/installguide/hypervisor/hyperv.rst b/source/installguide/hypervisor/hyperv.rst index 792e51778b..d8a018074c 100644 --- a/source/installguide/hypervisor/hyperv.rst +++ b/source/installguide/hypervisor/hyperv.rst @@ -85,7 +85,7 @@ start: | | y | the file share for the Hyper-V deployment will be | | | | the new folder created in the \\Shares on the | | | | selected volume. You can create sub-folders for both | -| | | CloudStack Primary and Secondary storage within the | +| | | CloudStack Primary and Secondary storage within the | | | | share location. When you select the profile for the | | | | file shares, ensure that you select SMB Share | | | | -Applications. This creates the file shares with | @@ -99,17 +99,17 @@ start: +------------+----------+------------------------------------------------------+ | Virtual | | If you are using Hyper-V 2012 R2, manually create an | | Switch | | external virtual switch before adding the host to | -| | | CloudStack. If the Hyper-V host is added to the Hyper-V | -| | | manager, select the host, then click Virtual Switch | -| | | Manager, then New Virtual Switch. In the External | -| | | Network, select the desired NIC adapter and click | -| | | Apply. | +| | | CloudStack. If the Hyper-V host is added to the | +| | | Hyper-V manager, select the host, then click Virtual | +| | | Switch Manager, then New Virtual Switch. In the | +| | | External Network, select the desired NIC adapter and | +| | | click Apply. | | | | | | | | If you are using Windows 2012 R2, virtual switch is | | | | created automatically. | +------------+----------+------------------------------------------------------+ | Virtual | | Take a note of the name of the virtual switch. You | -| Switch | | need to specify that when configuring CloudStack | +| Switch | | need to specify that when configuring CloudStack | | Name | | physical network labels. | +------------+----------+------------------------------------------------------+ | Hyper-V | | - Add the Hyper-V domain users to the Hyper-V | @@ -122,13 +122,13 @@ start: | | | - This domain user should be part of the Hyper-V | | | | Administrators and Local Administrators group on | | | | the Hyper-V hosts that are to be managed by | -| | | CloudStack. | +| | | CloudStack. | | | | | | | | - The Hyper-V Agent service runs with the | | | | credentials of this domain user account. | | | | | | | | - Specify the credential of the domain user while | -| | | adding a host to CloudStack so that it can manage | +| | | adding a host to CloudStack so that it can manage | | | | it. | | | | | | | | - Specify the credential of the domain user while | @@ -152,6 +152,9 @@ start: | Dial-in | | | +------------+----------+------------------------------------------------------+ +.. NOTE: For this kind of content it might be better to use a CSV table: +.. https://docutils.sourceforge.io/docs/ref/rst/directives.html#csv-table + Hyper-V Installation Steps ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/source/installguide/hypervisor/kvm.rst b/source/installguide/hypervisor/kvm.rst index 05e11c291c..969730cc26 100644 --- a/source/installguide/hypervisor/kvm.rst +++ b/source/installguide/hypervisor/kvm.rst @@ -55,7 +55,7 @@ In addition, the following hardware requirements apply: - Within a single cluster, the hosts must be of the same distribution version. -- All hosts within a cluster must be homogenous. The CPUs must be of +- All hosts within a cluster must be homogeneous. The CPUs must be of the same type, count, and feature flags. - Must support HVM (Intel-VT or AMD-V enabled) @@ -90,7 +90,7 @@ host to work with CloudStack. .. warning:: Certain servers such as Dell provide the option to choose the Power Management Profile. The Active Power Controller enables Dell System DBPM (Demand Based Power Management) - which can restrict the visibility of the maximum CPU clock speed availble to the OS, + which can restrict the visibility of the maximum CPU clock speed available to the OS, which in turn can lead to CloudStack fetching the incorrect CPU speed of the server. To ensure that CloudStack can always fetch the maximum cpu speed on the server, ensure that "OS Control" is set as the Power Management Profile. @@ -160,11 +160,112 @@ KVM Instances. #. Repeat all of these steps on every hypervisor host. .. warning:: - CloudStack |version| requires Java 11 JRE. Installing CloudStack agent will - automatically install Java 11, but it's good to explicitly confirm that the Java 11 + CloudStack |version| requires Java 17 JRE. Installing CloudStack agent will + automatically install Java 17, but it's good to explicitly confirm that the Java 17 is the selected/active one (in case you had a previous Java version already installed) with ``alternatives --config java``, after CloudStack agent is installed. +Configure package repository +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +CloudStack is only distributed from source from the official mirrors. +However, members of the CloudStack community may build convenience +binaries so that users can install Apache CloudStack without needing to +build from source. + +If you didn't follow the steps to build your own packages from source in +the sections for `“Building RPMs from Source” +<../building_from_source.html#building-rpms-from-source>`__ or +`“Building DEB packages” <../building_from_source.html#building-deb-packages>`__ +you may find pre-built DEB and RPM packages for your convenience linked from +the `downloads `_ page. + +.. note:: + These repositories contain both the Management Server and KVM Hypervisor + packages. + +RPM package repository +~~~~~~~~~~~~~~~~~~~~~~ + +There is a RPM package repository for CloudStack so you can easily +install on RHEL and SUSE based platforms. + +If you're using an RPM-based system, you'll want to add the Yum +repository so that you can install CloudStack with Yum. + +In RHEL or CentOS: + +Yum repository information is found under ``/etc/yum.repos.d``. You'll +see several ``.repo`` files in this directory, each one denoting a +specific repository. + +To add the CloudStack repository, create +``/etc/yum.repos.d/cloudstack.repo`` and insert the following +information. + +In the case of RHEL being used, you can replace 'centos' by 'rhel' in the value of baseurl + +.. parsed-literal:: + + [cloudstack] + name=cloudstack + baseurl=http://download.cloudstack.org/centos/$releasever/|version|/ + enabled=1 + gpgcheck=0 + +Now you should now be able to install CloudStack using Yum. + +In SUSE: + +Zypper repository information is found under ``/etc/zypp/repos.d/``. You'll +see several ``.repo`` files in this directory, each one denoting a +specific repository. + +To add the CloudStack repository, create +``/etc/zypp/repos.d/cloudstack.repo`` and insert the following +information. + +.. parsed-literal:: + + [cloudstack] + name=cloudstack + baseurl=http://download.cloudstack.org/suse/|version|/ + enabled=1 + gpgcheck=0 + + +Now you should now be able to install CloudStack using zypper. + + +DEB package repository +~~~~~~~~~~~~~~~~~~~~~~ + +You can add a DEB package repository to your apt sources with the +following commands. Replace the code name with your Ubuntu LTS version : +Ubuntu 20.04 (focal), Ubuntu 22.04 (jammy), Ubuntu 24.04 (noble). + +Use your preferred editor and open (or create) +``/etc/apt/sources.list.d/cloudstack.list``. Add the community provided +repository to the file (replace "trusty" with "xenial" or "bionic" if it is the case): + +.. parsed-literal:: + + deb https://download.cloudstack.org/ubuntu focal |version| + +We now have to add the public key to the trusted keys. + +.. parsed-literal:: + + wget -O - https://download.cloudstack.org/release.asc |sudo tee /etc/apt/trusted.gpg.d/cloudstack.asc + +Now update your local apt cache. + +.. parsed-literal:: + + sudo apt update + +Your DEB package repository should now be configured and ready for use. + Install and configure the Agent ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -284,7 +385,7 @@ Here are some examples: host-passthrough may lead to migration failure,if you have this problem, you should use host-model or custom. guest.cpu.features will force cpu features as a required policy so make sure to put only those features that are provided - by the host CPU. As your kvm cluster needs to be made up of homogenous nodes anyway + by the host CPU. As your kvm cluster needs to be made up of homogeneous nodes anyway (see System Requirements), it might make most sense to use guest.cpu.mode=host-model or guest.cpu.mode=host-passthrough. @@ -300,7 +401,7 @@ cloudstack-agent and should already be installed. planning to automate the deployment and configuration of your KVM hosts. #. To avoid potential security attack to Instances, We need to turn - off libvirt to listen on unsecure TCP port. CloudStack will automatically + off libvirt to listen on insecure TCP port. CloudStack will automatically set up cloud keystore and certificates when the host is added to cloudstack. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in @@ -371,6 +472,19 @@ cloudstack-agent and should already be installed. #LIBVIRTD_ARGS="--listen" + Configure libvirt to connect to libvirtd and not to per-driver daemons, especially important on newer distros such as EL9 and Ubuntu 24.04. + Edit ``/etc/libvirt/libvirt.conf`` and add the following: + + .. parsed-literal:: + remote_mode="legacy" + + On Ubuntu 24.04 or newer set libvirtd mode to traditional mode (see https://libvirt.org/manpages/libvirtd.html#system-socket-activation): + + .. parsed-literal:: + + systemctl mask libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tls.socket libvirtd-tcp.socket + + #. Restart libvirt In RHEL or CentOS or SUSE or Ubuntu: @@ -428,6 +542,10 @@ ensure the Agent has all the required permissions. $ setenforce permissive +.. note:: In a production environment, selinux should be set to enforcing + and the necessary selinux policies are created to allow the + services to run. + #. Configure Apparmor (Ubuntu) @@ -491,7 +609,7 @@ There are many ways to configure your networking. Even within the scope of a giv network mode. Below are a few simple examples. .. note:: - Since Ubuntu 20.04 the standard for manging network connections is by + Since Ubuntu 20.04 the standard for managing network connections is by using NetPlan YAML files. Please refer to the Ubuntu man pages for further information and set up network connections figuratively. @@ -1378,7 +1496,7 @@ extra ports by executing the following iptable commands: $ iptables -I INPUT -p tcp -m tcp --dport 49152:49216 -j ACCEPT -These iptable settings are not persistent accross reboots, we have to +These iptable settings are not persistent across reboots, we have to save them first. .. parsed-literal:: @@ -1483,7 +1601,7 @@ perform. In case of KVM, UEFI enabled hypervisor hosts must have the ``ovmf`` or ``edk2-ovmf`` package installed. -You can find further informations regarding prerequisites at the CloudStack Wiki +You can find further information regarding prerequisites at the CloudStack Wiki (https://cwiki.apache.org/confluence/display/CLOUDSTACK/Enable+UEFI+booting+for+Instance) as well as limitations for using UEFI in CloudStack. diff --git a/source/installguide/hypervisor/lxc.rst b/source/installguide/hypervisor/lxc.rst index 03948bdcf7..e4bae66a3a 100644 --- a/source/installguide/hypervisor/lxc.rst +++ b/source/installguide/hypervisor/lxc.rst @@ -49,7 +49,7 @@ In addition, the following hardware requirements apply: - Within a single cluster, the hosts must be of the same distribution version. -- All hosts within a cluster must be homogenous. The CPUs must be of +- All hosts within a cluster must be homogeneous. The CPUs must be of the same type, count, and feature flags. - Must support HVM (Intel-VT or AMD-V enabled) @@ -158,7 +158,7 @@ In Ubuntu: $ apt-get install cloudstack-agent -Next step is to update the Agent configuration setttings. The settings +Next step is to update the Agent configuration settings. The settings are in ``/etc/cloudstack/agent/agent.properties`` #. Set the Agent to run in LXC mode: @@ -196,7 +196,7 @@ vital that libvirt is configured correctly. Libvirt is a dependency of cloudstack-agent and should already be installed. #. In order to have live migration working libvirt has to listen for - unsecured TCP connections. We also need to turn off libvirts attempt + insecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in ``/etc/libvirt/libvirtd.conf`` @@ -319,6 +319,10 @@ ensure the Agent has all the required permissions. $ setenforce permissive +.. note:: In a production environment, selinux should be set to enforcing + and the necessary selinux policies are created to allow the + services to run. + #. Configure Apparmor (Ubuntu) #. Check to see whether AppArmor is installed on your machine. If @@ -621,7 +625,7 @@ extra ports by executing the following iptable commands: $ iptables -I INPUT -p tcp -m tcp --dport 49152:49216 -j ACCEPT -These iptable settings are not persistent accross reboots, we have to +These iptable settings are not persistent across reboots, we have to save them first. .. parsed-literal:: diff --git a/source/installguide/hypervisor/vsphere.rst b/source/installguide/hypervisor/vsphere.rst index 391a1cf74e..64aaae18eb 100644 --- a/source/installguide/hypervisor/vsphere.rst +++ b/source/installguide/hypervisor/vsphere.rst @@ -28,7 +28,7 @@ System Requirements for vSphere Hosts Software requirements: ^^^^^^^^^^^^^^^^^^^^^^ -- vSphere and vCenter, versions 6.0, 6.5 or 6.7. +- vSphere and vCenter (see `"Supported Hypervisor Versions" <../../releasenotes/compat.html#supported-hypervisor-versions>`_) vSphere Standard is recommended. Note however that customers need to consider the CPU constraints in place with vSphere licensing. See @@ -74,7 +74,7 @@ Hardware requirements: - All hosts must be 64-bit and must support HVM (Intel-VT or AMD-V enabled). -- All hosts within a cluster must be homogenous. That means the CPUs +- All hosts within a cluster must be homogeneous. That means the CPUs must be of the same type, count, and feature flags. - 64-bit x86 CPU (more cores results in better performance) @@ -122,7 +122,7 @@ Other requirements: - vCenter must be configured to use the standard port 443 so that it can communicate with the CloudStack Management Server. -- You must re-install VMware ESXi if you are going to re-use a host +- You must re-install VMware ESXi if you are going to reuse a host from a previous install. - CloudStack requires VMware vSphere 6.0, 6.5 or 6.7. VMware vSphere 5.5 and older @@ -575,13 +575,13 @@ these credentials while configuring Nexus virtual switch. **Management IP Address** This is the IP address of the VSM appliance. This is the IP address you -specify in the virtual switch IP Address field while configuting Nexus virtual +specify in the virtual switch IP Address field while configuring Nexus virtual switch. **SSL** Should be set to Enable.Always enable SSL. SSH is usually enabled by default during the VSM installation. However, check whether the SSH connection to the -VSM is working, without which CloudStack failes to connect to the VSM. +VSM is working, without which CloudStack fails to connect to the VSM. Creating a Port Profile diff --git a/source/installguide/hypervisor/xenserver.rst b/source/installguide/hypervisor/xenserver.rst index 1d51145e83..9d31f68a10 100644 --- a/source/installguide/hypervisor/xenserver.rst +++ b/source/installguide/hypervisor/xenserver.rst @@ -44,7 +44,7 @@ System Requirements for XenServer Hosts - XCP-ng 8.2.0 -- You must re-install Citrix XenServer if you are going to re-use a +- You must re-install Citrix XenServer if you are going to reuse a host from a previous install. - Must support HVM (Intel-VT or AMD-V enabled) @@ -197,18 +197,18 @@ CSP functionality is already present in XenServer 6.1 For XenServer 6.0.2: - `http://download.cloud.com/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz - `_ + `http://download.cloudstack.org/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz + `_ For XenServer 5.6 SP2: - `http://download.cloud.com/releases/2.2.0/xenserver-cloud-supp.tgz - `_ + `http://download.cloudstack.org/releases/2.2.0/xenserver-cloud-supp.tgz + `_ For XenServer 6.0: - `http://download.cloud.com/releases/3.0/xenserver-cloud-supp.tgz - `_ + `http://download.cloudstack.org/releases/3.0/xenserver-cloud-supp.tgz + `_ #. Extract the file: diff --git a/source/installguide/locale/pot/building_from_source.pot b/source/installguide/locale/pot/building_from_source.pot index 14333758ee..8cd704cf30 100644 --- a/source/installguide/locale/pot/building_from_source.pot +++ b/source/installguide/locale/pot/building_from_source.pot @@ -233,7 +233,7 @@ msgstr "" #: ../../building_from_source.rst:194 # 283980d16b48466bb2a2d3b17ff1fede -msgid "While we have defined, and you have presumably already installed the bootstrap prerequisites, there are a number of build time prerequisites that need to be resolved. CloudStack uses maven for dependency resolution. You can resolve the buildtime depdencies for CloudStack by running:" +msgid "While we have defined, and you have presumably already installed the bootstrap prerequisites, there are a number of build time prerequisites that need to be resolved. CloudStack uses maven for dependency resolution. You can resolve the buildtime dependencies for CloudStack by running:" msgstr "" #: ../../building_from_source.rst:204 @@ -418,7 +418,7 @@ msgstr "" #: ../../building_from_source.rst:446 # d9d9d34be2604c18a3b9107dbf384181 -msgid "You may also need to download `vhd-util `_, which was removed due to licensing issues. You'll copy vhd-util to the ``scripts/vm/hypervisor/xenserver/`` directory." +msgid "You may also need to download `vhd-util `_, which was removed due to licensing issues. You'll copy vhd-util to the ``scripts/vm/hypervisor/xenserver/`` directory." msgstr "" #: ../../building_from_source.rst:451 diff --git a/source/installguide/locale/pot/configuration.pot b/source/installguide/locale/pot/configuration.pot index 9c4db09afe..17ba070c69 100644 --- a/source/installguide/locale/pot/configuration.pot +++ b/source/installguide/locale/pot/configuration.pot @@ -1674,7 +1674,7 @@ msgstr "" #: ../../configuration.rst:1264 # febca3ed36dc41f0bfe48a3170c1b282 -msgid "The createStoragePool API has been augmented to support plugable storage providers. The following is a list of parameters to use when adding storage to CloudStack that is based on the SolidFire plug-in:" +msgid "The createStoragePool API has been augmented to support pluggable storage providers. The following is a list of parameters to use when adding storage to CloudStack that is based on the SolidFire plug-in:" msgstr "" #: ../../configuration.rst:1268 diff --git a/source/installguide/locale/pot/hypervisor/kvm.pot b/source/installguide/locale/pot/hypervisor/kvm.pot index 7b75d4ce44..6a0a74a396 100644 --- a/source/installguide/locale/pot/hypervisor/kvm.pot +++ b/source/installguide/locale/pot/hypervisor/kvm.pot @@ -83,7 +83,7 @@ msgstr "" #: ../../hypervisor/kvm.rst:52 # fc001eaf8fb842d7adbbf5bb977be8fd -msgid "All hosts within a cluster must be homogenous. The CPUs must be of the same type, count, and feature flags." +msgid "All hosts within a cluster must be homogeneous. The CPUs must be of the same type, count, and feature flags." msgstr "" #: ../../hypervisor/kvm.rst:55 @@ -320,7 +320,7 @@ msgstr "" #: ../../hypervisor/kvm.rst:243 # ed8dd766459147cbb85ff50d90c4b80e -msgid "In order to have live migration working libvirt has to listen for unsecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in ``/etc/libvirt/libvirtd.conf``" +msgid "In order to have live migration working libvirt has to listen for insecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in ``/etc/libvirt/libvirtd.conf``" msgstr "" #: ../../hypervisor/kvm.rst:248 @@ -735,7 +735,7 @@ msgstr "" #: ../../hypervisor/kvm.rst:836 # f47cb1aac5b0422ea1409fd01b64e2db -msgid "These iptable settings are not persistent accross reboots, we have to save them first." +msgid "These iptable settings are not persistent across reboots, we have to save them first." msgstr "" #: ../../hypervisor/kvm.rst:845 diff --git a/source/installguide/locale/pot/hypervisor/lxc.pot b/source/installguide/locale/pot/hypervisor/lxc.pot index 81fef09a88..ec6e8ef5fa 100644 --- a/source/installguide/locale/pot/hypervisor/lxc.pot +++ b/source/installguide/locale/pot/hypervisor/lxc.pot @@ -80,7 +80,7 @@ msgstr "" #: ../../hypervisor/lxc.rst:52 # 9a162b24f7fd499c9793ac22d8cd360f -msgid "All hosts within a cluster must be homogenous. The CPUs must be of the same type, count, and feature flags." +msgid "All hosts within a cluster must be homogeneous. The CPUs must be of the same type, count, and feature flags." msgstr "" #: ../../hypervisor/lxc.rst:55 @@ -230,7 +230,7 @@ msgstr "" #: ../../hypervisor/lxc.rst:160 # 04df1ba82d804c3e93844dbf7cdd829d -msgid "Next step is to update the Agent configuration setttings. The settings are in ``/etc/cloudstack/agent/agent.properties``" +msgid "Next step is to update the Agent configuration settings. The settings are in ``/etc/cloudstack/agent/agent.properties``" msgstr "" #: ../../hypervisor/lxc.rst:163 @@ -262,7 +262,7 @@ msgstr "" #: ../../hypervisor/lxc.rst:197 # f54bd7715b5441f687683613bff00956 -msgid "In order to have live migration working libvirt has to listen for unsecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in ``/etc/libvirt/libvirtd.conf``" +msgid "In order to have live migration working libvirt has to listen for insecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in ``/etc/libvirt/libvirtd.conf``" msgstr "" #: ../../hypervisor/lxc.rst:202 @@ -591,7 +591,7 @@ msgstr "" #: ../../hypervisor/lxc.rst:623 # d20a63121d8d4a90b51c2e5eb1dc1b8e -msgid "These iptable settings are not persistent accross reboots, we have to save them first." +msgid "These iptable settings are not persistent across reboots, we have to save them first." msgstr "" #: ../../hypervisor/lxc.rst:632 diff --git a/source/installguide/locale/pot/hypervisor/vsphere.pot b/source/installguide/locale/pot/hypervisor/vsphere.pot index 2935e3a990..c3389a1553 100644 --- a/source/installguide/locale/pot/hypervisor/vsphere.pot +++ b/source/installguide/locale/pot/hypervisor/vsphere.pot @@ -78,7 +78,7 @@ msgstr "" #: ../../hypervisor/vsphere.rst:66 # 0a5dabc432dc454396baf7e656897fe6 -msgid "All hosts within a cluster must be homogenous. That means the CPUs must be of the same type, count, and feature flags." +msgid "All hosts within a cluster must be homogeneous. That means the CPUs must be of the same type, count, and feature flags." msgstr "" #: ../../hypervisor/vsphere.rst:69 @@ -163,7 +163,7 @@ msgstr "" #: ../../hypervisor/vsphere.rst:114 # 311e91fb341e4d30a649ba13d80e2e72 -msgid "You must re-install VMware ESXi if you are going to re-use a host from a previous install." +msgid "You must re-install VMware ESXi if you are going to reuse a host from a previous install." msgstr "" #: ../../hypervisor/vsphere.rst:117 @@ -830,12 +830,12 @@ msgstr "" #: ../../hypervisor/vsphere.rst:507 # c42b39990d6e4d0589467d0a4ff5be4c -msgid "**Management IP Address** This is the IP address of the VSM appliance. This is the IP address you specify in the virtual switch IP Address field while configuting Nexus virtual switch." +msgid "**Management IP Address** This is the IP address of the VSM appliance. This is the IP address you specify in the virtual switch IP Address field while configuring Nexus virtual switch." msgstr "" #: ../../hypervisor/vsphere.rst:512 # 26a1722f2c004284958a7d40035272d8 -msgid "**SSL** Should be set to Enable.Always enable SSL. SSH is usually enabled by default during the VSM installation. However, check whether the SSH connection to the VSM is working, without which CloudStack failes to connect to the VSM." +msgid "**SSL** Should be set to Enable.Always enable SSL. SSH is usually enabled by default during the VSM installation. However, check whether the SSH connection to the VSM is working, without which CloudStack fails to connect to the VSM." msgstr "" #: ../../hypervisor/vsphere.rst:519 diff --git a/source/installguide/locale/pot/hypervisor/xenserver.pot b/source/installguide/locale/pot/hypervisor/xenserver.pot index 7c7a65bdc6..73e52725c8 100644 --- a/source/installguide/locale/pot/hypervisor/xenserver.pot +++ b/source/installguide/locale/pot/hypervisor/xenserver.pot @@ -63,7 +63,7 @@ msgstr "" #: ../../hypervisor/xenserver.rst:40 # 5dee06ed5f2f472f9e0979ddbb241a63 -msgid "You must re-install Citrix XenServer if you are going to re-use a host from a previous install." +msgid "You must re-install Citrix XenServer if you are going to reuse a host from a previous install." msgstr "" #: ../../hypervisor/xenserver.rst:43 @@ -253,7 +253,7 @@ msgstr "" #: ../../hypervisor/xenserver.rst:193 # c7d858231a3142109c09bfcc3340078b -msgid "`http://download.cloud.com/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz `_" +msgid "`http://download.cloudstack.org/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz `_" msgstr "" #: ../../hypervisor/xenserver.rst:196 @@ -263,7 +263,7 @@ msgstr "" #: ../../hypervisor/xenserver.rst:198 # beac106ea20e420cae0ca5acb450646b -msgid "`http://download.cloud.com/releases/2.2.0/xenserver-cloud-supp.tgz `_" +msgid "`http://download.cloudstack.org/releases/2.2.0/xenserver-cloud-supp.tgz `_" msgstr "" #: ../../hypervisor/xenserver.rst:201 @@ -273,7 +273,7 @@ msgstr "" #: ../../hypervisor/xenserver.rst:203 # 998fd323b14c42149cc6633f4df39b83 -msgid "`http://download.cloud.com/releases/3.0/xenserver-cloud-supp.tgz `_" +msgid "`http://download.cloudstack.org/releases/3.0/xenserver-cloud-supp.tgz `_" msgstr "" #: ../../hypervisor/xenserver.rst:207 diff --git a/source/installguide/locale/pot/hypervisor_installation.pot b/source/installguide/locale/pot/hypervisor_installation.pot index def2f8a3a7..2dfbdfa1aa 100644 --- a/source/installguide/locale/pot/hypervisor_installation.pot +++ b/source/installguide/locale/pot/hypervisor_installation.pot @@ -104,7 +104,7 @@ msgstr "" #: ../../hypervisor_installation.rst:3366 # 86c9ea55ffec4f399b8e7918b7bb43d0 # e392215de57d4843958e657b3d6d64c7 -msgid "All hosts within a cluster must be homogenous. The CPUs must be of the same type, count, and feature flags." +msgid "All hosts within a cluster must be homogeneous. The CPUs must be of the same type, count, and feature flags." msgstr "" #: ../../hypervisor_installation.rst:75 @@ -423,7 +423,7 @@ msgstr "" #: ../../hypervisor_installation.rst:3539 # ff42e0ea9cf64b509d310c0071086352 # 5b89b062abbe4cc89b7fd10ece8eb9e8 -msgid "In order to have live migration working libvirt has to listen for unsecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in ``/etc/libvirt/libvirtd.conf``" +msgid "In order to have live migration working libvirt has to listen for insecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in ``/etc/libvirt/libvirtd.conf``" msgstr "" #: ../../hypervisor_installation.rst:308 @@ -968,7 +968,7 @@ msgstr "" #: ../../hypervisor_installation.rst:3983 # d68286a4499a42d0af59d58cf3307ce8 # cabb0609f32c4af983aec749c9a3bde6 -msgid "These iptable settings are not persistent accross reboots, we have to save them first." +msgid "These iptable settings are not persistent across reboots, we have to save them first." msgstr "" #: ../../hypervisor_installation.rst:911 @@ -1239,7 +1239,7 @@ msgstr "" #: ../../hypervisor_installation.rst:1022 # 7f67c993b17a4b25a8b436331132a38b -msgid "You must re-install Citrix XenServer if you are going to re-use a host from a previous install." +msgid "You must re-install Citrix XenServer if you are going to reuse a host from a previous install." msgstr "" #: ../../hypervisor_installation.rst:1031 @@ -1385,7 +1385,7 @@ msgstr "" #: ../../hypervisor_installation.rst:1181 # 54880e3acb4945b4ae3d3fb251252130 -msgid "`http://download.cloud.com/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz `_" +msgid "`http://download.cloudstack.org/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz `_" msgstr "" #: ../../hypervisor_installation.rst:1183 @@ -1395,7 +1395,7 @@ msgstr "" #: ../../hypervisor_installation.rst:1185 # b1533b01dec04d8ea37cb07d541d8013 -msgid "`http://download.cloud.com/releases/2.2.0/xenserver-cloud-supp.tgz `_" +msgid "`http://download.cloudstack.org/releases/2.2.0/xenserver-cloud-supp.tgz `_" msgstr "" #: ../../hypervisor_installation.rst:1187 @@ -1405,7 +1405,7 @@ msgstr "" #: ../../hypervisor_installation.rst:1189 # bc80c4fa363248c292882b511d90808a -msgid "`http://download.cloud.com/releases/3.0/xenserver-cloud-supp.tgz `_" +msgid "`http://download.cloudstack.org/releases/3.0/xenserver-cloud-supp.tgz `_" msgstr "" #: ../../hypervisor_installation.rst:1193 @@ -2573,7 +2573,7 @@ msgstr "" #: ../../hypervisor_installation.rst:2258 # 444698b909b14c1bafa79d28ca6d0e0c -msgid "All hosts within a cluster must be homogenous. That means the CPUs must be of the same type, count, and feature flags." +msgid "All hosts within a cluster must be homogeneous. That means the CPUs must be of the same type, count, and feature flags." msgstr "" #: ../../hypervisor_installation.rst:2286 @@ -2628,7 +2628,7 @@ msgstr "" #: ../../hypervisor_installation.rst:2331 # fa50c73129ea428e839debdffdc10099 -msgid "You must re-install VMware ESXi if you are going to re-use a host from a previous install." +msgid "You must re-install VMware ESXi if you are going to reuse a host from a previous install." msgstr "" #: ../../hypervisor_installation.rst:2336 @@ -3310,7 +3310,7 @@ msgstr "" #: ../../hypervisor_installation.rst:2754 # 7febd6557b3742e9a3d478672eba6cad -msgid "This is the IP address of the VSM appliance. This is the IP address you specify in the virtual switch IP Address field while configuting Nexus virtual switch." +msgid "This is the IP address of the VSM appliance. This is the IP address you specify in the virtual switch IP Address field while configuring Nexus virtual switch." msgstr "" #: ../../hypervisor_installation.rst:2758 @@ -3320,7 +3320,7 @@ msgstr "" #: ../../hypervisor_installation.rst:2756 # cab1502c5d5d4221873f13e86168b3d3 -msgid "Should be set to Enable.Always enable SSL. SSH is usually enabled by default during the VSM installation. However, check whether the SSH connection to the VSM is working, without which CloudStack failes to connect to the VSM." +msgid "Should be set to Enable.Always enable SSL. SSH is usually enabled by default during the VSM installation. However, check whether the SSH connection to the VSM is working, without which CloudStack fails to connect to the VSM." msgstr "" #: ../../hypervisor_installation.rst:2761 @@ -4196,7 +4196,7 @@ msgstr "" #: ../../hypervisor_installation.rst:3497 # 6e8937e78ff6442db35604533d058b4a -msgid "Next step is to update the Agent configuration setttings. The settings are in ``/etc/cloudstack/agent/agent.properties``" +msgid "Next step is to update the Agent configuration settings. The settings are in ``/etc/cloudstack/agent/agent.properties``" msgstr "" #: ../../hypervisor_installation.rst:3502 diff --git a/source/installguide/locale/pot/installation.pot b/source/installguide/locale/pot/installation.pot index e30f6448e1..a3ffcb6efe 100644 --- a/source/installguide/locale/pot/installation.pot +++ b/source/installguide/locale/pot/installation.pot @@ -495,7 +495,7 @@ msgstr "" #: ../../installation.rst:358 # d212ae279e594ea4a9b82dde3342f509 -msgid "Before setting up the Management Server, download vhd-util from `vhd-util `_." +msgid "Before setting up the Management Server, download vhd-util from `vhd-util `_." msgstr "" #: ../../installation.rst:361 @@ -1079,7 +1079,7 @@ msgstr "" #: ../../installation.rst:1085 # 60bd480d13124c16b4c8774b8abd5439 -msgid "Download vhd-util from `vhd-util `_" +msgid "Download vhd-util from `vhd-util `_" msgstr "" #: ../../installation.rst:1088 diff --git a/source/installguide/locale/pot/managing_networks.pot b/source/installguide/locale/pot/managing_networks.pot index 35b07554ad..821c40dc15 100644 --- a/source/installguide/locale/pot/managing_networks.pot +++ b/source/installguide/locale/pot/managing_networks.pot @@ -964,7 +964,7 @@ msgstr "" #: ../../managing_networks.rst:638 # 191010bd30424f3aa12cf309a48dcfc4 -msgid "You cannot apply IP Reservation if any VM is alloted with an IP address that is outside the Guest VM CIDR." +msgid "You cannot apply IP Reservation if any VM is allotted with an IP address that is outside the Guest VM CIDR." msgstr "" #: ../../managing_networks.rst:643 @@ -5090,7 +5090,7 @@ msgstr "" #: ../../managing_networks.rst:4657 # 37506050acaf40fda04742118858f794 -msgid "The administrator can deploy a set of VLANs and allow users to deploy VMs on these VLANs. A guest VLAN is randomly alloted to an account from a pre-specified set of guest VLANs. All the VMs of a certain tier of an account reside on the guest VLAN allotted to that account." +msgid "The administrator can deploy a set of VLANs and allow users to deploy VMs on these VLANs. A guest VLAN is randomly allotted to an account from a pre-specified set of guest VLANs. All the VMs of a certain tier of an account reside on the guest VLAN allotted to that account." msgstr "" #: ../../managing_networks.rst:4662 diff --git a/source/installguide/locale/pot/qig.pot b/source/installguide/locale/pot/qig.pot index d38faa0b42..8b94d471bd 100644 --- a/source/installguide/locale/pot/qig.pot +++ b/source/installguide/locale/pot/qig.pot @@ -442,12 +442,12 @@ msgstr "" #: ../../qig.rst:498 # 99f2fbc9c2454f3487ad43f84bb08a12 -msgid "In order to have live migration working libvirt has to listen for unsecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in /etc/libvirt/libvirtd.conf" +msgid "In order to have live migration working libvirt has to listen for insecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in /etc/libvirt/libvirtd.conf" msgstr "" #: ../../qig.rst:502 # 9fc492fb10044ee0844b1d25e91f50ee -msgid "Set the following paramaters:" +msgid "Set the following parameters:" msgstr "" #: ../../qig.rst:512 diff --git a/source/installguide/locale/zh_CN/LC_MESSAGES/building_from_source.po b/source/installguide/locale/zh_CN/LC_MESSAGES/building_from_source.po index ce68b1a7ff..f047dcc9b7 100644 --- a/source/installguide/locale/zh_CN/LC_MESSAGES/building_from_source.po +++ b/source/installguide/locale/zh_CN/LC_MESSAGES/building_from_source.po @@ -296,7 +296,7 @@ msgid "" "While we have defined, and you have presumably already installed the " "bootstrap prerequisites, there are a number of build time prerequisites that" " need to be resolved. CloudStack uses maven for dependency resolution. You " -"can resolve the buildtime depdencies for CloudStack by running:" +"can resolve the buildtime dependencies for CloudStack by running:" msgstr "虽然我们做了一些定义,并且可能已经安装了引导的前提条件,但仍有一些在编译时需要解决的先决条件。CloudStack使用Maven进行依赖性解析。您可以通过运行以下命令,来解决编译CloudStack时的依赖性:" # 7e08137b290649cda9b8e9b728ff33aa @@ -561,10 +561,10 @@ msgstr "由于这些模块需要的依赖项不能和CloudStack一起发行, #: ../../building_from_source.rst:446 msgid "" "You may also need to download `vhd-util " -"`_, which was " +"`_, which was " "removed due to licensing issues. You'll copy vhd-util to the " "``scripts/vm/hypervisor/xenserver/`` directory." -msgstr "你可能还需要下载`vhd-util `_, 也是由于授权问题而被移除。 复制vhd-util到该目录: ``scripts/vm/hypervisor/xenserver/``." +msgstr "你可能还需要下载`vhd-util `_, 也是由于授权问题而被移除。 复制vhd-util到该目录: ``scripts/vm/hypervisor/xenserver/``." # bfbd7215ff0345f183a7946f620b1368 #: ../../building_from_source.rst:451 diff --git a/source/installguide/locale/zh_CN/LC_MESSAGES/configuration.po b/source/installguide/locale/zh_CN/LC_MESSAGES/configuration.po index eeba00e931..c9d8030831 100644 --- a/source/installguide/locale/zh_CN/LC_MESSAGES/configuration.po +++ b/source/installguide/locale/zh_CN/LC_MESSAGES/configuration.po @@ -2161,7 +2161,7 @@ msgstr "" # febca3ed36dc41f0bfe48a3170c1b282 #: ../../configuration.rst:1264 msgid "" -"The createStoragePool API has been augmented to support plugable storage " +"The createStoragePool API has been augmented to support pluggable storage " "providers. The following is a list of parameters to use when adding storage " "to CloudStack that is based on the SolidFire plug-in:" msgstr "创建存储池的API已经被扩展到支持插件式存储供应商。下面给出了当向基于SolidFire插件的CloudStack添加存储时可使用的参数列表。" diff --git a/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/kvm.po b/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/kvm.po index e9cf577e45..abbaebaf6d 100644 --- a/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/kvm.po +++ b/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/kvm.po @@ -95,7 +95,7 @@ msgstr "同一集群中主机必须使用相同版本的Linux系统。" # fc001eaf8fb842d7adbbf5bb977be8fd #: ../../hypervisor/kvm.rst:52 msgid "" -"All hosts within a cluster must be homogenous. The CPUs must be of the same " +"All hosts within a cluster must be homogeneous. The CPUs must be of the same " "type, count, and feature flags." msgstr "同一群集中的所有节点架构必须一致。CPU的型号、数量和功能参数必须相同。" @@ -383,7 +383,7 @@ msgstr "CloudStack使用libvirt管理虚拟机。因此正确地配置libvirt至 # ed8dd766459147cbb85ff50d90c4b80e #: ../../hypervisor/kvm.rst:243 msgid "" -"In order to have live migration working libvirt has to listen for unsecured " +"In order to have live migration working libvirt has to listen for insecured " "TCP connections. We also need to turn off libvirts attempt to use Multicast " "DNS advertising. Both of these settings are in " "``/etc/libvirt/libvirtd.conf``" @@ -862,7 +862,7 @@ msgstr "RHEL 及 CentOS使用iptables作为防火墙,执行以下iptables命 # f47cb1aac5b0422ea1409fd01b64e2db #: ../../hypervisor/kvm.rst:836 msgid "" -"These iptable settings are not persistent accross reboots, we have to save " +"These iptable settings are not persistent across reboots, we have to save " "them first." msgstr "这些iptables配置并不会持久保存,重启之后将会消失,我们必须手动保存这些配置。" diff --git a/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/vsphere.po b/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/vsphere.po index dd7d536ba0..314e650167 100644 --- a/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/vsphere.po +++ b/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/vsphere.po @@ -101,7 +101,7 @@ msgstr "所有主机必须为64位架构并且支持HVM(启用Intel-VT或AMD-V) # 0a5dabc432dc454396baf7e656897fe6 #: ../../hypervisor/vsphere.rst:66 msgid "" -"All hosts within a cluster must be homogenous. That means the CPUs must be " +"All hosts within a cluster must be homogeneous. That means the CPUs must be " "of the same type, count, and feature flags." msgstr "同一群集中的所有节点必须为同一架构。CPU型号、数量和功能参数必须相同。" @@ -203,7 +203,7 @@ msgstr "必须配置vCenter使用443端口与CloudStack管理服务器通讯。" # 311e91fb341e4d30a649ba13d80e2e72 #: ../../hypervisor/vsphere.rst:114 msgid "" -"You must re-install VMware ESXi if you are going to re-use a host from a " +"You must re-install VMware ESXi if you are going to reuse a host from a " "previous install." msgstr "如果你计划利用之前安装的主机,那么必须重新安装VMware ESXi。" @@ -1042,7 +1042,7 @@ msgstr "" msgid "" "**Management IP Address** This is the IP address of the VSM appliance. This " "is the IP address you specify in the virtual switch IP Address field while " -"configuting Nexus virtual switch." +"configuring Nexus virtual switch." msgstr "" # 26a1722f2c004284958a7d40035272d8 @@ -1050,7 +1050,7 @@ msgstr "" msgid "" "**SSL** Should be set to Enable.Always enable SSL. SSH is usually enabled by" " default during the VSM installation. However, check whether the SSH " -"connection to the VSM is working, without which CloudStack failes to connect" +"connection to the VSM is working, without which CloudStack fails to connect" " to the VSM." msgstr "" diff --git a/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/xenserver.po b/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/xenserver.po index d87ecf4f31..fab9ab1dfd 100644 --- a/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/xenserver.po +++ b/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor/xenserver.po @@ -73,7 +73,7 @@ msgstr "XenServer 6.2.0" # 5dee06ed5f2f472f9e0979ddbb241a63 #: ../../hypervisor/xenserver.rst:40 msgid "" -"You must re-install Citrix XenServer if you are going to re-use a host from " +"You must re-install Citrix XenServer if you are going to reuse a host from " "a previous install." msgstr "如果你想使用以前装的某台主机,你必须重新安装Citrix XenServer." @@ -302,10 +302,10 @@ msgstr "适用于XenServer 6.0.2:" # c7d858231a3142109c09bfcc3340078b #: ../../hypervisor/xenserver.rst:193 msgid "" -"`http://download.cloud.com/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz " -"`_" -msgstr "`http://download.cloud.com/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz `_" +msgstr "`http://download.cloudstack.org/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz `_" # 2cbbc4bcdcdd42d0816a6b2df748a0d6 #: ../../hypervisor/xenserver.rst:196 @@ -315,9 +315,9 @@ msgstr "适用于XenServer 5.6 SP2:" # beac106ea20e420cae0ca5acb450646b #: ../../hypervisor/xenserver.rst:198 msgid "" -"`http://download.cloud.com/releases/2.2.0/xenserver-cloud-supp.tgz " -"`_" -msgstr "`http://download.cloud.com/releases/2.2.0/xenserver-cloud-supp.tgz `_" +"`http://download.cloudstack.org/releases/2.2.0/xenserver-cloud-supp.tgz " +"`_" +msgstr "`http://download.cloudstack.org/releases/2.2.0/xenserver-cloud-supp.tgz `_" # 62cd9713906c45dc96254605c7134b11 #: ../../hypervisor/xenserver.rst:201 @@ -327,9 +327,9 @@ msgstr "适用于XenServer 6.0:" # 998fd323b14c42149cc6633f4df39b83 #: ../../hypervisor/xenserver.rst:203 msgid "" -"`http://download.cloud.com/releases/3.0/xenserver-cloud-supp.tgz " -"`_" -msgstr "`http://download.cloud.com/releases/3.0/xenserver-cloud-supp.tgz `_" +"`http://download.cloudstack.org/releases/3.0/xenserver-cloud-supp.tgz " +"`_" +msgstr "`http://download.cloudstack.org/releases/3.0/xenserver-cloud-supp.tgz `_" # 50f6122c6c5e42dbb34f5942e788bd76 #: ../../hypervisor/xenserver.rst:207 diff --git a/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor_installation.po b/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor_installation.po index c31f471110..19d7b91ffe 100644 --- a/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor_installation.po +++ b/source/installguide/locale/zh_CN/LC_MESSAGES/hypervisor_installation.po @@ -108,7 +108,7 @@ msgstr "同一集群中主机必须使用相同版本的Linux系统。" # e392215de57d4843958e657b3d6d64c7 #: ../../hypervisor_installation.rst:70 ../../hypervisor_installation.rst:3366 msgid "" -"All hosts within a cluster must be homogenous. The CPUs must be of the same " +"All hosts within a cluster must be homogeneous. The CPUs must be of the same " "type, count, and feature flags." msgstr "同一群集中的所有节点架构必须一致。CPU的型号、数量和功能参数必须相同。" @@ -473,7 +473,7 @@ msgstr "CloudStack使用libvirt管理虚拟机。因此正确地配置libvirt至 #: ../../hypervisor_installation.rst:303 #: ../../hypervisor_installation.rst:3539 msgid "" -"In order to have live migration working libvirt has to listen for unsecured " +"In order to have live migration working libvirt has to listen for insecured " "TCP connections. We also need to turn off libvirts attempt to use Multicast " "DNS advertising. Both of these settings are in " "``/etc/libvirt/libvirtd.conf``" @@ -1082,7 +1082,7 @@ msgstr "RHEL 及 CentOS使用iptables作为防火墙,执行以下iptables命 #: ../../hypervisor_installation.rst:903 #: ../../hypervisor_installation.rst:3983 msgid "" -"These iptable settings are not persistent accross reboots, we have to save " +"These iptable settings are not persistent across reboots, we have to save " "them first." msgstr "这些iptables配置并不会持久保存,重启之后将会消失,我们必须手动保存这些配置。" @@ -1382,7 +1382,7 @@ msgstr "XenServer 6.2.0" # 7f67c993b17a4b25a8b436331132a38b #: ../../hypervisor_installation.rst:1022 msgid "" -"You must re-install Citrix XenServer if you are going to re-use a host from " +"You must re-install Citrix XenServer if you are going to reuse a host from " "a previous install." msgstr "如果你想使用以前装的某台主机,你必须重新安装Citrix XenServer." @@ -1565,10 +1565,10 @@ msgstr "适用于XenServer 6.0.2:" # 54880e3acb4945b4ae3d3fb251252130 #: ../../hypervisor_installation.rst:1181 msgid "" -"`http://download.cloud.com/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz " -"`_" -msgstr "`http://download.cloud.com/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz `_" +msgstr "`http://download.cloudstack.org/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz `_" # 4405c659cea34cb2895c0f43693473f1 #: ../../hypervisor_installation.rst:1183 @@ -1578,9 +1578,9 @@ msgstr "适用于XenServer 5.6 SP2:" # b1533b01dec04d8ea37cb07d541d8013 #: ../../hypervisor_installation.rst:1185 msgid "" -"`http://download.cloud.com/releases/2.2.0/xenserver-cloud-supp.tgz " -"`_" -msgstr "`http://download.cloud.com/releases/2.2.0/xenserver-cloud-supp.tgz `_" +"`http://download.cloudstack.org/releases/2.2.0/xenserver-cloud-supp.tgz " +"`_" +msgstr "`http://download.cloudstack.org/releases/2.2.0/xenserver-cloud-supp.tgz `_" # 994df5c8945149b7b8663a2e603fc847 #: ../../hypervisor_installation.rst:1187 @@ -1590,9 +1590,9 @@ msgstr "适用于XenServer 6.0:" # bc80c4fa363248c292882b511d90808a #: ../../hypervisor_installation.rst:1189 msgid "" -"`http://download.cloud.com/releases/3.0/xenserver-cloud-supp.tgz " -"`_" -msgstr "`http://download.cloud.com/releases/3.0/xenserver-cloud-supp.tgz `_" +"`http://download.cloudstack.org/releases/3.0/xenserver-cloud-supp.tgz " +"`_" +msgstr "`http://download.cloudstack.org/releases/3.0/xenserver-cloud-supp.tgz `_" # 09db532336c943caa7e4713f7f57deaa #: ../../hypervisor_installation.rst:1193 @@ -3112,7 +3112,7 @@ msgstr "所有主机必须为64位架构并且支持HVM(启用Intel-VT或AMD-V) # 444698b909b14c1bafa79d28ca6d0e0c #: ../../hypervisor_installation.rst:2258 msgid "" -"All hosts within a cluster must be homogenous. That means the CPUs must be " +"All hosts within a cluster must be homogeneous. That means the CPUs must be " "of the same type, count, and feature flags." msgstr "同一群集中的所有节点必须为同一架构。CPU型号、数量和功能参数必须相同。" @@ -3184,7 +3184,7 @@ msgstr "必须配置vCenter使用443端口与CloudStack管理服务器通讯。" # fa50c73129ea428e839debdffdc10099 #: ../../hypervisor_installation.rst:2331 msgid "" -"You must re-install VMware ESXi if you are going to re-use a host from a " +"You must re-install VMware ESXi if you are going to reuse a host from a " "previous install." msgstr "如果你计划利用之前安装的主机,那么必须重新安装VMware ESXi。" @@ -4037,7 +4037,7 @@ msgstr "**管理 IP**" #: ../../hypervisor_installation.rst:2754 msgid "" "This is the IP address of the VSM appliance. This is the IP address you " -"specify in the virtual switch IP Address field while configuting Nexus " +"specify in the virtual switch IP Address field while configuring Nexus " "virtual switch." msgstr "VSM appliance的IP地址。 当配置Nexus虚拟交换机时在虚拟交换机的IP地址区域输入的IP地址。" @@ -4051,7 +4051,7 @@ msgstr "**SSL**" msgid "" "Should be set to Enable.Always enable SSL. SSH is usually enabled by default" " during the VSM installation. However, check whether the SSH connection to " -"the VSM is working, without which CloudStack failes to connect to the VSM." +"the VSM is working, without which CloudStack fails to connect to the VSM." msgstr "应该设置为启用。总是启用SSL。在VSM安装期间通常会启用SSH功能。尽管如此仍需检查是否能够使用SSH连接到VSM,如果不能无法连接,CloudStack到VSM的连接会失败。" # 747a5560bbed48598ec8fb9ad2739dde @@ -5144,7 +5144,7 @@ msgstr "CloudStack使用代理管理LXC实例。管理服务器与代理通信 # 6e8937e78ff6442db35604533d058b4a #: ../../hypervisor_installation.rst:3497 msgid "" -"Next step is to update the Agent configuration setttings. The settings are " +"Next step is to update the Agent configuration settings. The settings are " "in ``/etc/cloudstack/agent/agent.properties``" msgstr "接下来更新代理配置。在 ``/etc/cloudstack/agent/agent.properties`` 中配置" diff --git a/source/installguide/locale/zh_CN/LC_MESSAGES/installation.po b/source/installguide/locale/zh_CN/LC_MESSAGES/installation.po index 69875ad842..73debd3f29 100644 --- a/source/installguide/locale/zh_CN/LC_MESSAGES/installation.po +++ b/source/installguide/locale/zh_CN/LC_MESSAGES/installation.po @@ -591,8 +591,8 @@ msgstr "这个步骤仅适用于安装了XenServer的hypervisor主机。" #: ../../installation.rst:358 msgid "" "Before setting up the Management Server, download vhd-util from `vhd-util " -"`_." -msgstr "在设置管理服务器前,下载 `vhd-util`_." +"`_." +msgstr "在设置管理服务器前,下载 `vhd-util`_." # d4c4d9a792ec46bbacc984b02fa8dd22 #: ../../installation.rst:361 @@ -1409,8 +1409,8 @@ msgstr "这个步骤仅适用于安装了XenServer的hypervisor主机。" #: ../../installation.rst:1085 msgid "" "Download vhd-util from `vhd-util " -"`_" -msgstr "下载 `vhd-util `_" +"`_" +msgstr "下载 `vhd-util `_" # 3b0ff1adaf804ec4b60b43307747e306 #: ../../installation.rst:1088 diff --git a/source/installguide/locale/zh_CN/LC_MESSAGES/managing_networks.po b/source/installguide/locale/zh_CN/LC_MESSAGES/managing_networks.po index 6bd38dd619..c32580f711 100644 --- a/source/installguide/locale/zh_CN/LC_MESSAGES/managing_networks.po +++ b/source/installguide/locale/zh_CN/LC_MESSAGES/managing_networks.po @@ -1044,7 +1044,7 @@ msgstr "指定一个有效的客户虚拟机CIDR。只有不活动的IP在客户 # 191010bd30424f3aa12cf309a48dcfc4 #: ../../managing_networks.rst:638 msgid "" -"You cannot apply IP Reservation if any VM is alloted with an IP address that" +"You cannot apply IP Reservation if any VM is allotted with an IP address that" " is outside the Guest VM CIDR." msgstr "如果任一虚拟机被分配了客户虚拟机CIDR之外的IP地址时,IP预留将不能应用。" @@ -6365,7 +6365,7 @@ msgstr "主要的优势为:" #: ../../managing_networks.rst:4657 msgid "" "The administrator can deploy a set of VLANs and allow users to deploy VMs on" -" these VLANs. A guest VLAN is randomly alloted to an account from a pre-" +" these VLANs. A guest VLAN is randomly allotted to an account from a pre-" "specified set of guest VLANs. All the VMs of a certain tier of an account " "reside on the guest VLAN allotted to that account." msgstr "管理可以部署一个vlans集,同时运行用户部署虚拟机在这些vlan上。从预先指定的vlan集中随机的为租户分配一个来宾vlan.租户处于同一层的所有vm处于分配给这个租户的来宾vlan." diff --git a/source/installguide/locale/zh_CN/LC_MESSAGES/qig.po b/source/installguide/locale/zh_CN/LC_MESSAGES/qig.po index 5289ea229d..74d85d4c20 100644 --- a/source/installguide/locale/zh_CN/LC_MESSAGES/qig.po +++ b/source/installguide/locale/zh_CN/LC_MESSAGES/qig.po @@ -588,14 +588,14 @@ msgstr "CloudStack使用libvirt管理虚拟机。因此正确的配置libvirt至 # 99f2fbc9c2454f3487ad43f84bb08a12 #: ../../qig.rst:498 msgid "" -"In order to have live migration working libvirt has to listen for unsecured " +"In order to have live migration working libvirt has to listen for insecured " "TCP connections. We also need to turn off libvirts attempt to use Multicast " "DNS advertising. Both of these settings are in /etc/libvirt/libvirtd.conf" msgstr "为了实现动态迁移,libvirt需要监听使用非加密的TCP连接。还需要关闭libvirts尝试使用组播DNS进行广播。这些都是在 /etc/libvirt/libvirtd.conf文件中进行配置。" # 9fc492fb10044ee0844b1d25e91f50ee #: ../../qig.rst:502 -msgid "Set the following paramaters:" +msgid "Set the following parameters:" msgstr "设置下列参数:" # 95403c520e0647c88d0026cb30086615 diff --git a/source/installguide/management-server/_database.rst b/source/installguide/management-server/_database.rst index 89e2f8c244..ca70a13e8c 100644 --- a/source/installguide/management-server/_database.rst +++ b/source/installguide/management-server/_database.rst @@ -166,6 +166,10 @@ MySQL. See :ref:`install-database-on-separate-node`. setenforce permissive +.. note:: In a production environment, selinux should be set to enforcing + and the necessary selinux policies are created to allow the + services to run. + #. Set up the database. The cloudstack-setup-databases script is used for creating the cloudstack @@ -430,7 +434,7 @@ The following command creates the cloud user on the database. want to expose your root credentials but still want the database to be prepared for first start up. These skipped steps will have had to be done manually prior to executing this script. This behaviour can be - envoked by passing the --schema-only flag. This flag conflicts with the + invoked by passing the --schema-only flag. This flag conflicts with the --deploy-as flag so the two cannot be used together. To set up the databases and user manually before executing the script with the flag, these commands can be executed: diff --git a/source/installguide/management-server/_nfs.rst b/source/installguide/management-server/_nfs.rst index 4ec71caa14..f3ca1d899a 100644 --- a/source/installguide/management-server/_nfs.rst +++ b/source/installguide/management-server/_nfs.rst @@ -54,7 +54,7 @@ from the Management Server. The exact commands for the following steps may vary depending on your operating system version. -The following steps asume you already have an NFS Server installed on your storage +The following steps assume you already have an NFS Server installed on your storage system. Please refer to the guide of your OS on how to install a NFS Server. .. warning:: @@ -253,8 +253,8 @@ operating system version. .. parsed-literal:: mkdir /primary - mount -t nfs :/export/primary + mount -t nfs :/export/primary /primary umount /primary mkdir /secondary - mount -t nfs :/export/secondary + mount -t nfs :/export/secondary /secondary umount /secondary diff --git a/source/installguide/management-server/_prerequisite.rst b/source/installguide/management-server/_prerequisite.rst index 648f1670d8..e932c2c603 100644 --- a/source/installguide/management-server/_prerequisite.rst +++ b/source/installguide/management-server/_prerequisite.rst @@ -103,7 +103,7 @@ node. will be installed. .. warning:: - CloudStack |version| requires Java 11 JRE. Installing CloudStack packages will - automatically install Java 11, but it's good to explicitly confirm that the Java 11 + CloudStack |version| requires Java 17 JRE. Installing CloudStack packages will + automatically install Java 17, but it's good to explicitly confirm that the Java 17 is the selected/active one (in case you had a previous Java version already installed) with ``alternatives --config java`` after CloudStack packages are already installed. diff --git a/source/installguide/overview/_requirements.rst b/source/installguide/overview/_requirements.rst index c46f90ed69..7d42d6f70d 100644 --- a/source/installguide/overview/_requirements.rst +++ b/source/installguide/overview/_requirements.rst @@ -29,7 +29,7 @@ Management Server may be placed on an Instance. - Operating system: - - Preferred: CentOS/RHEL 7.2+ or Ubuntu 16.04(.2) or higher + - Preferred: EL8+ or Ubuntu 22.04 or higher - 64-bit x86 CPU (more cores results in better performance) diff --git a/source/plugins/cloudian-connector.rst b/source/plugins/cloudian-connector.rst index addd869b3b..bfe1db3571 100644 --- a/source/plugins/cloudian-connector.rst +++ b/source/plugins/cloudian-connector.rst @@ -29,7 +29,7 @@ their own S3 storage areas. Compatibility ~~~~~~~~~~~~~ -The following table shows the compatiblity of Cloudian Connector with CloudStack. +The following table shows the compatibility of Cloudian Connector with CloudStack. .. cssclass:: table-striped table-bordered table-hover @@ -156,7 +156,15 @@ Cloudian ships with SSO disabled by default. You will need to enable it on each CMC server. Additionally, you will need to choose a unique SSO shared key that you will also configure in the CloudStack connector further below. -Edit Puppet config to enable SSO on all CMC servers: +HyperStore 8+ instructions to enable SSO on all CMC servers: + + :: + + # hsctl config set cmc.sso.enabled=true + # hsctl config set cmc.sso.sharedKey=YourSecretKeyHere + # hsctl config apply cmc + +Older HyperStore versions use Puppet. Edit Puppet config to enable SSO on all CMC servers: :: @@ -229,8 +237,7 @@ settings. To enable the connector, ensure that the global setting "cloudian.connector.enabled" is set to true. Finally, restart each of the management server(s) to reload and enable the connector. -For example, here is how you can restart the CloudStack management server -installed on CentOS7: +For example, here is how you can restart the CloudStack management server: :: diff --git a/source/plugins/cloudstack-kubernetes-provider.rst b/source/plugins/cloudstack-kubernetes-provider.rst index f093dee6a1..595c4fd51d 100644 --- a/source/plugins/cloudstack-kubernetes-provider.rst +++ b/source/plugins/cloudstack-kubernetes-provider.rst @@ -34,7 +34,7 @@ The Prebuilt containers are available on `Docker Hub 443/TCP 5d1h + nginx-deployment2 NodePort 10.103.111.85 80:32014/TCP 4s + + 2. Navigate to network and acquire a public IP. + + |cks-acquire-publicip.png| + + 3. Add a firewall rule on port 80 on the public IP address + + |cks-addfirewall.png| + + 4. Add a loadbalancer rule mentioning the private node port and add the corresponding kubernetes worker node. + + |cks-addloadbalancer.png| + + |cks-addnode.png| + .. |ckp-ip.png| image:: /_static/images/ckp-ip.png .. |ckp-ip-fw.png| image:: /_static/images/ckp-ip-fw.png .. |ckp-ip-lb.png| image:: /_static/images/ckp-ip-lb.png +.. |cks-acquire-publicip.png| image:: /_static/images/cks-acquire-publicip.png +.. |cks-addfirewall.png| image:: /_static/images/cks-addfirewall.png +.. |cks-addloadbalancer.png| image:: /_static/images/cks-addloadbalancer.png +.. |cks-addnode.png| image:: /_static/images/cks-addnode.png \ No newline at end of file diff --git a/source/plugins/cloudstack-kubernetes-service.rst b/source/plugins/cloudstack-kubernetes-service.rst index 70f394d515..cf191eebd3 100644 --- a/source/plugins/cloudstack-kubernetes-service.rst +++ b/source/plugins/cloudstack-kubernetes-service.rst @@ -293,7 +293,7 @@ scaleKubernetesCluster API can be used to scale a running (or stopped cluster) t - **serviceofferingid** (the ID of the new service offering for the Instances in the cluster) - **size** (number of Kubernetes cluster worker nodes) -Only running Kubernetes clusters can be scaled in size. When the service fails to scale the cluster, the cluster will show in Alert state else if the scaling is successfull cluster will show up in Running state. +Only running Kubernetes clusters can be scaled in size. When the service fails to scale the cluster, the cluster will show in Alert state else if the scaling is successful cluster will show up in Running state. .. note:: - Only up scaling is supported while scaling clusters for service offering. diff --git a/source/plugins/index.rst b/source/plugins/index.rst index 661ec82c3c..16f3eaa52b 100644 --- a/source/plugins/index.rst +++ b/source/plugins/index.rst @@ -32,6 +32,7 @@ This is the Apache CloudStack Plugins guide. This section gives information for cloudian-connector nicira-plugin + nsx-plugin vxlan ovs-plugin ipv6 diff --git a/source/plugins/ipv6.rst b/source/plugins/ipv6.rst index 81fe7a5cc0..d119ee292e 100644 --- a/source/plugins/ipv6.rst +++ b/source/plugins/ipv6.rst @@ -16,14 +16,7 @@ IPv6 Support in CloudStack =========================== - -CloudStack supports Internet Protocol version 6 (IPv6), the recent -version of the Internet Protocol (IP) that defines routing the network -traffic. IPv6 uses a 128-bit address that exponentially expands the -current address space that is available to the users. IPv6 addresses -consist of eight groups of four hexadecimal digits separated by colons, -for example, 5001:0dt8:83a3:1012:1000:8s2e:0870:7454. CloudStack -supports IPv6 for shared and isolated networks. It also supports IPv6 for VPC Network Tiers. +CloudStack has limited IPv6 support. It supports IPv6 for shared and isolated networks. It also supports IPv6 for VPC Network Tiers. Shared network -------------- @@ -282,9 +275,9 @@ Alternatively, ``createGuestNetworkIpv6Prefix`` API can be used to add a new gue Adding Network or VPC Offering with IPv6 Support ################################################ -To create an IPv6 suported network or VPC offering, global configuration - ``ipv6.offering.enabled`` must be set to **true**. +To create an IPv6 supported network or VPC offering, global configuration - ``ipv6.offering.enabled`` must be set to **true**. -With 4.17.0, a new paramter - ``internetprotocol`` has been added to: +With 4.17.0, a new parameter - ``internetprotocol`` has been added to: - the ``createNetworkOffering`` API which can be used to create a network offering with IPv6 support by using the value dualstack. - the ``createVPCOffering`` API which can be used to create a VPC offering with IPv6 support by using the value dualstack. Corresponding option has also been provided in the UI form creating network/VPC offering: @@ -311,8 +304,8 @@ For using and managing firewall rules with an IPv6 supported isolated network, C - ``listIpv6FirewallRules`` - To list existing IPv6 firewall rules for a network. - ``createIpv6FirewallRule`` - To create a new IPv6 firewall rules for a network. -- ``updateIpv6FirewallRule`` - To update an exisitng IPv6 firewall rules for a network. -- ``deleteIpv6FirewallRule`` - To delete an exisitng IPv6 firewall rules for a network. +- ``updateIpv6FirewallRule`` - To update an existing IPv6 firewall rules for a network. +- ``deleteIpv6FirewallRule`` - To delete an existing IPv6 firewall rules for a network. These operations are also available using UI in the network details view of an IPv6 supported network. diff --git a/source/plugins/nsx-plugin.rst b/source/plugins/nsx-plugin.rst new file mode 100644 index 0000000000..be130782c1 --- /dev/null +++ b/source/plugins/nsx-plugin.rst @@ -0,0 +1,229 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information# + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +The VMware NSX Plugin +===================== + +Introduction +------------ + +The VMware NSX Plugin introduces VMware NSX 4 as a network service provider in CloudStack to be able to create and manage Virtual Private Clouds (VPCs) in CloudStack, being able to orchestrate the following network functionalities: + +- Routing between VPC network tiers (NSX segments) +- Access Lists (ACLs) between VPC tiers and "public" network (TCP, UDP, ICMP) both as global egress rules and “public” IP specific ingress rules. +- ACLs between VPC network tiers (TCP, UDP, ICMP) +- Port Forwarding between “public” networks and VPC network tier +- External load balancing – between VPCs network tiers and “public” networks (runs on Edge Cluster) +- Internal load balancing – between VPC network tiers +- Password injection, UserData and SSH Keys +- External, Internal DNS +- DHCP +- Kubernetes host orchestration, supporting CKS on VPCs + +Supported Versions +------------------ + +.. cssclass:: table-striped table-bordered table-hover + ++--------------+----------------------+--------------------+ +| Hypervisor | CloudStack Version | VMware NSX Version | ++==============+======================+====================+ +| VMware | >= 4.20 | 4.1.0 | ++--------------+----------------------+--------------------+ + +Table: Supported Versions + +Configuration +------------- + +Prerequisites +~~~~~~~~~~~~~ + +The VMware NSX plugin is enabled by the 'nsx.plugin.enable' setting, false by default. It enables the NSX Plugin on CloudStack when it is set to true. The global setting is non-dynamic, that is, the management server would need to be restarted after being modified. + +Prior to creating the zone, ensure that the global setting: 'vmware.management.portgroup' is set to the correct management network for ESXi hosts. + +Zone creation +~~~~~~~~~~~~~ + +For an NSX-based zone, the administrator will have to create at least 2 physical networks, one for Public and Guest networks with **NSX** isolation method and one for Management (and / or storage networks), +which uses VLAN isolation method. + +**Physical network for Public and Guest traffic:** + Isolation method: NSX + VLAN ID must be empty + vSwitch type: distributed virtual switch (dvSwitch) + vSwitch name: name of the dvSwitch to handle NSX traffic + +**Physical network for Management traffic:** + Isolation method: VLAN + VLAN ID: ID for Management traffic + vSwitch type: distributed virtual switch (dvSwitch) + vSwitch name: name of the dvSwitch to handle Management traffic. + + +An example of physical networks setup: + +.. |nsx-phy-networks.png| image:: /_static/images/nsx-phy-networks.png + :alt: Physical Networks with NSX + +The next stage of zone creation would be to link the NSX controller to the CloudStack. + +.. |nsx-provider.png| image:: /_static/images/nsx-provider.png + :alt: NSX Provider details + +The administrator then needs to setup the IP ranges for Public and NSX Public traffic. + - Public Traffic: IP range for non-NSX public traffic used by system VMs + - NSX Public Traffic: IP range to use for Public IP Addresses on NSX-based VPCs or Isolated Networks. + +.. |nsx-public-traffic.png| image:: /_static/images/nsx-public-traffic.png + :alt: NSX Traffic + +The subsequent steps of zone creation remain unchanged and once the zone is successfully created and enabled, the system VMs come up with IPs from the Public IP Range (not the NSX public IP range). + +VPC creation on NSX +~~~~~~~~~~~~~~~~~~~~ + +When a VPC is created in CloudStack, the following operations occur on NSX end: + - CloudStack creates a Tier1 Gateway with the following: + + - ID and name on NSX: D-A-Z-V + - Linked Tier0 Gateway: the Tier0 Gateway provided on the NSX Controller creation in CloudStack + - For NAT mode VPCs, the following Route Advertisement settings are enabled: + - All IPSec Local Endpoints + - All NAT IPs + - All LB VIP Routes + - For ROUTED mode VPCs, the following Route Advertisement settings are enabled: + - All IPSec Local Endpoints + - All NAT IPs + - All LB VIP Routes + - All Connected Segments & Service Ports + + - The VPC Virtual Router acquires a free IP address on from the NSX Public Range and sets it as the Source NAT IP of the VPC. + - A new NAT Rule is created on NSX: + - ID and Name: D-A-Z-V-NAT + - Action: SNAT + - Source IP: Any + - Destination IP: Any + - Translated IP: The Source NAT IP of the VPC (selected from the NSX Public Range) + + +VPC Tier creation on NSX +~~~~~~~~~~~~~~~~~~~~~~~~~ + +On VPC network tier creation, CloudStack creates the following NSX elements: + - A Segment is linked to the VPC Tier1 Gateway with the following: + - ID and name on NSX: D-A-Z-V-S + - Linked Tier1 Gateway: The VPC Tier1 Gateway with name: D-A-Z-V + - Linked Transport Zone: The Transport Zone provided on NSX Controller creation in CloudStack + - Subnets: The VPC network tier CIDR provided on CloudStack + + - A Group under Inventory with the following: + - ID and name on NSX: D-A-Z-V-S (same as the segment) + - Group members: The created NSX segment + +VPC network ACL creation +~~~~~~~~~~~~~~~~~~~~~~~~~ + +CloudStack allows creating ACL rules for NSX based network tiers. The supported protocols for creating NSX based ACL rules are:are TCP, UDP and ICMP. +Network ACLs can be assigned to any network tier in the VPC during network tier creation or an existing ACL on the network tier can be replaced. + +VPC tier Implementation +~~~~~~~~~~~~~~~~~~~~~~~~ + +When the first VM is created on the network tier, CloudStack creates the following NSX elements: + + - A DHCP Relay Networking Profile is created; associated to the segment: + - ID and name: D-A-Z-V-S-Relay + - Server IP address: A free IP on the network tier CIDR is selected. + + - A Distributed Firewall policy: + - ID and name: D-A-Z-V-S (same as the segment) + - Applied to the Group: D-A-Z-V-S + + - Distributed Firewall policy rules under the created policy: + - ID and name: D-A-Z-V-S-R where r_id is the 'id' column on the 'network_acl_items' table for all the rules on the selected Network ACL + - Action: Allow or Drop depending on the CloudStack ALC rule action (Allow or Deny) + - Service: + - Any: for the default 'Allow all' and 'Deny all' CloudStack ACLs + - In case there is a default service for the selected protocol and port then CloudStack uses the pre-existing one. In case it does not exist, then a new service is created, matching the protocol + + - After acquiring a new Public IP Address on a VPC, users can: + - Make the acquired IP address the Source NAT IP: This will replace the current NAT rule associated with the VPC Tier 1 Gateway, replacing the Translated IP for the new one. + - Enable Static NAT: a new NAT rule is created on NSX with: + - ID and name: D-A-Z-V-STATICNAT + - Action: DNAT + - Destination IP: The acquired NSX Public IP address + - Translated IP: The Guest VM IP address + + - Create Port Forwarding rules: For each CloudStack Port Forwarding rule, a new NAT rule is created on NSX, with: + - ID and name: D-A-Z-V-PF where pf_id is the 'id' column on the 'port_forwarding_rules' table, for the created rule + - Gateway: The VPC Tier 1 Gateway (with name D-A-Z-V) + - Action: DNAT + - Source IP: Any + - Destination IP: The acquired NSX Public IP address + - Destination Port: The start-end port range + - Translated IP: The guest IP of the VM + - Translated Port: The start-end port range + + - Create Load Balancing rules: There will be one load balancer created per VPC if load balancer rules are created for a specific VPC. For every subsequent load balancer rule created, additional virtual servers and server pools are added to the load balancer: + - ID and name: D-A-Z-V-LB where lb_id is the 'id' column on the 'load_balancing_rules' table, for the created rule + - Attachment: Tier 1 Gateway with ID and name: D-A-Z-V + - Virtual Server: a new Virtual Server is created, with: + - ID and name: D-A-Z-V-LB-VS + - IP address: The acquired NSX Public IP address + - Port: The public port + - Type: TCP or UDP depending on the selected protocol + - Server Pool: a new Server Pool is created, with: + - ID and name: D-A-Z-V-LB-SP + - Algorithm: Supported values: Round-robin, least connection + - Members: All the selected VMs are added as server pool members, with: + - ID and name: D-A-Z-V-VM + - IP address: The VM Guest IP address + - Port: The private port + - Active Monitor: a new Active Monitor is created, with: + - ID and name: D-A-Z-V-LB-SP---AM, where PROTO is the selected Protocol, and PORT is the selected Private Port + - Passive Monitor: default passive monitor + +.. note:: + + The following notations were used in the above section: + + - d_id: the 'id' column on the 'domain' table for the caller domain + - a_id: the 'id' column of the 'accounts' table for the owner account + - z_id: the 'id' column of the 'datacenter' table for the zone + - v_id: the 'id' column of the 'vpcs' table for the new VPC being created + - s_id: the 'id' column of the 'networks' table for the network tier being created + + +CKS on NSX +~~~~~~~~~~~ + +To enable CKS clusters on NSX networks respective default network offerings have been created for isolated and VPC tiers. + +**DefaultNSXNetworkOfferingforKubernetesService** - is the default pre-created NSX-based network offering for enabling deployment of CKS clusters on isolated networks. +**DefaultNSXVPCNetworkOfferingforKubernetesService** - is the default pre-created NSX-based network offering to enable CKS cluster deployment on VPC tiers. + + +When deploying CKS clusters, it is possible to either select a pre-existing network or allow CloudStack create a new network for the cluster during the deployment. If one chooses the latter means of cluster deployment on a NSX-based environment, it would be needed that the 'cloud.kubernetes.cluster.network.offering' global setting be updated to point to either the default offerings or the appropriate NSX-based offering created. + +All the network resources required by the CKS cluster such as load balancer, firewall rules, port forwarding rules, etc., will be created on and provided by NSX. + +Additional Notes +~~~~~~~~~~~~~~~~~ + +- Ports 67-68 need to be manually opened for network tiers of VPCs created in NSX based zones with default_deny ACL for DHCP to work as expected. +- When creating routed VPC networks in NSX-enabled zones, ensure that no 2 VPCs use the same CIDR, to prevent IP conflicts upstream (BGP). diff --git a/source/plugins/vxlan.rst b/source/plugins/vxlan.rst index 69556abff8..a4726426d9 100644 --- a/source/plugins/vxlan.rst +++ b/source/plugins/vxlan.rst @@ -17,68 +17,50 @@ The VXLAN Plugin ================ -System Requirements for VXLAN ------------------------------ +General +------- +CloudStack supports VXLAN technology to enhance scalability and flexibility in networking designs. -In CloudStack 4.X.0, this plugin only supports the KVM hypervisor with the -standard linux bridge. +Using VXLAN (Virtual Extensible LAN) instead of traditional VLAN (Virtual LAN) for layer 2 isolation method offers several key benefits, especially for modern data centers and cloud networking environments that require high scalability and flexibility. -The following table lists the requirements for the hypervisor. +VXLAN overcomes the limitations of traditional VLANs by providing a highly scalable, flexible, and efficient networking solution. It enables the creation of a large number of isolated virtual networks over a common physical infrastructure, +supports better utilization of network resources through Layer 3 routing capabilities, and simplifies network management and provisioning. -.. cssclass:: table-striped table-bordered table-hover +When deploying a VXLAN-based network, there are two options to choose from: -+----------------+-----------------------------------------------+----------------------------------------------------------------------------------------------------------------+ -| Item | Requirement | Note | -+================+===============================================+================================================================================================================+ -| Hypervisor | KVM | OvsVifDriver is not supported by this plugin in CloudStack 4.X, use BridgeVifDriver (default). | -+----------------+-----------------------------------------------+----------------------------------------------------------------------------------------------------------------+ -| Linux kernel | version >= 3.7, VXLAN kernel module enabled | It is recommended to use kernel >=3.9, since Linux kernel categorizes the VXLAN driver as experimental <3.9. | -+----------------+-----------------------------------------------+----------------------------------------------------------------------------------------------------------------+ -| iproute2 | matches kernel version | | -+----------------+-----------------------------------------------+----------------------------------------------------------------------------------------------------------------+ + • Multicast + • EVPN using BGP -Table: Hypervisor Requirement for VXLAN +While Multicast is the easiest to set up VXLAN isolation, EVPN offers much more control, scalability, and flexibility. Therefore, it is chosen for most VXLAN network deployments. +.. warning:: + Deploying VXLAN, especially with EVPN, requires extensive networking knowledge, which isn't covered by this documentation or CloudStack in general. + Make sure to familiarize yourself with VXLAN, BGP and EVPN before attempting to deploy this network technology. -Linux Distributions that meet the requirements ----------------------------------------------- - -The following table lists distributions which meet requirements. - -.. cssclass:: table-striped table-bordered table-hover - -+----------------+-------------------+-------------------------------------------+----------------------------------------------------------------+ -| Distribution | Release Version | Kernel Version (Date confirmed) | Note | -+================+===================+===========================================+================================================================+ -| Ubuntu | 13.04 | 3.8.0 (2013/07/23) | | -+----------------+-------------------+-------------------------------------------+----------------------------------------------------------------+ -| Fedora | >= 17 | 3.9.10 (2013/07/23) | Latest kernel packages are available in "update" repository. | -+----------------+-------------------+-------------------------------------------+----------------------------------------------------------------+ -| CentOS | >= 6.5 | 2.6.32-431.3.1.el6.x86\_64 (2014/01/21) | | -+----------------+-------------------+-------------------------------------------+----------------------------------------------------------------+ - -Table: List of Linux distributions which meet the hypervisor -requirements - +System Requirements / Networking for VXLAN +------------------------------------------ -Check the capability of your system -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To check the capability of your system, execute the following commands. +The following table lists the requirements for using VXLAN in your deployment: -:: - $ sudo modprobe vxlan && echo $? - # Confirm the output is "0". - # If it's non-0 value or error message, your kernel doesn't have VXLAN kernel module. ++---------------------+-----------------------------------------------+----------------------------------------------------------------------------------------------------------------+ +| Item | Requirement | Note | ++=====================+===============================================+================================================================================================================+ +| Hypervisor | KVM | Only the BridgeVifDriver (default) is supported | ++---------------------+-----------------------------------------------+----------------------------------------------------------------------------------------------------------------+ +| Network Card (NIC) | VXLAN offloading | A NIC with VXLAN-offloading support is recommended. For example Mellanox ConnectX-5 or Intel X710 | ++---------------------+-----------------------------------------------+----------------------------------------------------------------------------------------------------------------+ +| IP Protocol | IPv4 or IPv6 | CloudStack is agnostic to the IP-protocol being used as underlay. Both IPv4 and IPv6 are supported | ++---------------------+-----------------------------------------------+----------------------------------------------------------------------------------------------------------------+ +| MTU | >=1550 | VXLAN has an overhead of 50 bytes, therefore 1550 is the minimum. See the notes below | ++---------------------+-----------------------------------------------+----------------------------------------------------------------------------------------------------------------+ +| BGP Routing | FRRouting (>=10) | BGP Routing Daemon only required for EVPN. Version >=10 is recommended | ++---------------------+-----------------------------------------------+----------------------------------------------------------------------------------------------------------------+ - $ ip link add type vxlan help - # Confirm the output is usage of the command and that it's for VXLAN. - # If it's not, your iproute2 utility doesn't support VXLAN. - -Important note on MTU size -~~~~~~~~~~~~~~~~~~~~~~~~~~ +MTU size +~~~~~~~~ When new VXLAN interfaces are created, kernel will obtain current MTU size of the physical interface (ethX or the bridge) and then create VXLAN interface/bridge that are exactly 50 bytes smaller than the MTU on physical interface/bridge. @@ -87,101 +69,27 @@ have MTU of 1500 bytes, meaning that your physical interface/bridge must have MT In order to configure "jumbo frames" you can i.e. make physical interface/bridge with 9000 bytes MTU, then all the VXLAN interfaces will be created with MTU of 8950 bytes, and then MTU size inside Instance can be set to 8950 bytes. -Important note on max number of multicast groups (and thus VXLAN interfaces) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Default value of "net.ipv4.igmp_max_memberships" (cat /proc/sys/net/ipv4/igmp_max_memberships) is "20", which means that host can be joined to max 20 multicast groups (attach max 20 multicast IPs on the host). -Since all VXLAN (VTEP) interfaces provisioned on host are multicast-based (belong to certain multicast group, and thus has it's own multicast IP that is used as VTEP), this means that you can not provision more than 20 (working) VXLAN interfaces per host. -On Linux kernel 3.x you actually can provision more than 20, but ARP request will silently fail and cause client's networking problems -On Linux kernel 4.x you can NOT provision (start) more than 20 VXLAN interfaces and error message "No buffer space available" can be observed in Cloudstack Agent logs after provisioning required bridges and VXLAN interfaces. -Increase needed parameter to sane value (i.e. 100 or 200) as required. -If you need to operate more than 20 Instances from different client's Network, this change above is required. - -Advanced: Build kernel and iproute2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Even if your system doesn't support VXLAN, you can compile the kernel -and iproute2 by yourself. The following procedure is an example for -CentOS 6.4. - - -Build kernel -^^^^^^^^^^^^ - -:: - - $ sudo yum groupinstall "Development Tools" - $ sudo yum install ncurses-devel hmaccalc zlib-devel binutils-devel elfutils-libelf-devel bc - - $ KERNEL_VERSION=3.10.4 - # Declare the kernel version you want to build. - - $ wget https://www.kernel.org/pub/linux/kernel/v3.x/linux-${KERNEL_VERSION}.tar.xz - $ tar xvf linux-${KERNEL_VERSION}.tar.xz - $ cd linux-${KERNEL_VERSION} - $ cp /boot/config-`uname -r` .config - $ make oldconfig - # You may keep hitting enter and choose the default. - - $ make menuconfig - # Dig into "Device Drivers" -> "Network device support", - # then select "Virtual eXtensible Local Area Network (VXLAN)" and hit space. - # Make sure it indicates "" (build as module), then Save and Exit. - - # You may also want to check "IPv4 NAT" and its child nodes in "IP: Netfilter Configuration" - # and "IPv6 NAT" and its child nodes in "IPv6: Netfilter Configuration". - # In 3.10.4, you can find the options in - # "Networking support" -> "Networking options" - # -> "Network packet filtering framework (Netfilter)". - - $ make # -j N - # You may use -j N option to make the build process parallel and faster, - # generally N = 1 + (cores your machine have). - - $ sudo make modules_install - $ sudo make install - # You would get an error like "ERROR: modinfo: could not find module XXXX" here. - # This happens mainly due to config structure changes between kernel versions. - # You can ignore this error, until you find you need the kernel module. - # If you feel uneasy, you can go back to make menuconfig, - # find module XXXX by using '/' key, enable the module, build and install the kernel again. - - $ sudo vi /etc/grub.conf - # Make sure the new kernel isn't set as the default and the timeout is long enough, - # so you can select the new kernel during boot process. - # It's not a good idea to set the new kernel as the default until you confirm the kernel works fine. - - $ sudo reboot - # Select the new kernel during the boot process. - +In general it is recommend to use an MTU of at least 9000 bytes or larger. Most VXLAN capable network cards and switch support an MTU of up to 9216. -Build iproute2 -^^^^^^^^^^^^^^ +Using an MTU of 9216 bytes allows for using Jumbo Frames (9000) within guest networks. -:: - - $ sudo yum install db4-devel - $ git clone git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/iproute2.git - $ cd iproute2 - $ git tag - # Find the version that matches the kernel. - # If you built kernel 3.10.4 as above, it would be v3.10.0. +VXLAN using Multicast +--------------------- +The default mode for using VXLAN is Multicast. The required configuration is described below. - $ git checkout v3.10.0 - $ ./configure - $ make # -j N - $ sudo make install +Important note on max number of multicast groups +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Default value of "net.ipv4.igmp_max_memberships" (cat /proc/sys/net/ipv4/igmp_max_memberships) is "20", which means that host can be joined to max 20 multicast groups (attach max 20 multicast IPs on the host). -.. note:: Please use rebuild kernel and tools at your own risk. +Since all VXLAN (VTEP) interfaces provisioned on host are multicast-based (belong to certain multicast group, and thus has it is own multicast IP that is used as VTEP), this means that you can not provision more than 20 (working) VXLAN interfaces per host. +Under Linux you can NOT by default provision (start) more than 20 VXLAN interfaces and the error message "No buffer space available" will appear in the Cloudstack Agent logs after provisioning the required bridges and VXLAN interfaces. -Configure CloudStack to use VXLAN Plugin -------------------------------------- +Increase the needed parameter to an appropriate value (i.e. 100 or 200) as required. -Configure hypervisor -~~~~~~~~~~~~~~~~~~~~ +If you need to operate more than 20 Instances from different client networks, the change above is required. Configure hypervisor: KVM ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -195,7 +103,7 @@ Create bridge interface with IPv4 address ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This plugin requires an IPv4 address on the KVM host to terminate and -originate VXLAN traffic. The address should be assinged to a physical +originate VXLAN traffic. The address should be assigned to a physical interface or a bridge interface bound to a physical interface. Both a private address or a public address are fine for the purpose. It is not required to be in the same subnet for all hypervisors in a zone, but @@ -267,7 +175,7 @@ When you configured ``cloudbr1`` as below, address 192.168.42.11 netmask 255.255.255.240 gateway 192.168.42.1 - dns-nameservers 8.8.8.8 8.8.4.4 + dns-nameservers 9.9.9.9 dns-domain lab.example.org # Public network @@ -299,7 +207,7 @@ you would change the configuration similar to below. address 192.168.42.11 netmask 255.255.255.240 gateway 192.168.42.1 - dns-nameservers 8.8.8.8 8.8.4.4 + dns-nameservers 9.9.9.9 dns-domain lab.example.org # Public network @@ -313,7 +221,7 @@ you would change the configuration similar to below. # Private network auto cloudbr1 iface cloudbr1 inet static - addres 192.0.2.X + address 192.0.2.X netmask 255.255.255.0 bridge_ports eth0.300 bridge_fd 5 @@ -321,77 +229,142 @@ you would change the configuration similar to below. bridge_maxwait 1 -Configure iptables to pass XVLAN packets +Configure iptables to pass VXLAN packets ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Since VXLAN uses UDP packet to forward encapsulated the L2 frames, UDP/8472 port must be opened. -Configure in RHEL or CentOS -''''''''''''''''''''''''''' - -RHEL and CentOS use iptables for firewalling the system, you can open -extra ports by executing the following iptable commands: - -:: - - $ sudo iptables -I INPUT -p udp -m udp --dport 8472 -j ACCEPT - - -These iptable settings are not persistent accross reboots, we have to -save them first. - -:: - - $ sudo iptables-save > /etc/sysconfig/iptables - - -With this configuration you should be able to restart the Network, -although a reboot is recommended to see if everything works properly. +Make sure that your firewall (firewalld, ufw, ...) allows UDP packets on port 8472, as an example: :: - $ sudo service network restart - $ sudo reboot + $ sudo firewall-cmd --zone=public --permanent --add-port=8472/udp + $ sudo ufw allow proto udp from any to any port 8472 -.. warning:: - Make sure you have an alternative way like IPMI or ILO to reach the machine - in case you made a configuration error and the Network stops functioning! +VXLAN using EVPN +--------------------- +Using VXLAN with BGP+EVPN as underlay is more complex to set up, but does allow for more scaling and provides much more flexibility. -Configure in Ubuntu -''''''''''''''''''' +This documentation can not cover all elements of deploying BGP+EVPN in your environment. -The default firewall under Ubuntu is UFW (Uncomplicated FireWall), which -is a Python wrapper around iptables. +It is recommend to read `this blogpost `_ before you continue. -To open the required ports, execute the following commands: +The main items for using EVPN: -:: +- BGP Routing Daemon on the hypervisor +- No LACP/Bonding will be used +- The modified script (modifyvxlan-evpn.sh) is required and this might require tailoring to your situation +- BGP+EVPN capable and enabled network environment - $ sudo ufw allow proto udp from any to any port 8472 +EVPN Bash script +~~~~~~~~~~~~~~~~ +The default 'modifyvxlan.sh' script installed by CloudStack uses Multicast for VXLAN. -.. note:: - By default UFW is not enabled on Ubuntu. Executing these commands with the - firewall disabled does not enable the firewall. +A different version of this script is available which will use EVPN instead of Multicast and ships with CloudStack by default. -With this configuration you should be able to restart the Network, -although a reboot is recommended to see if everything works properly. +In order to use this script create a symlink on **each** KVM hypervisor :: - - $ sudo service networking restart - $ sudo reboot - -.. warning:: - Make sure you have an alternative way like IPMI or ILO to reach the machine - in case you made a configuration error and the Network stops functioning! - + $ cd /usr/share + $ ln -s cloudstack-common/scripts/vm/network/vnet/modifyvxlan-evpn.sh modifyvxlan.sh + +This script is also available in the CloudStack `GIT repository `_. + +View the contents of the script to understand its inner workings, some key items: + +- VXLAN (vtep) devices are created using 'nolearning', disabling the use of multicast +- UDP port 4789 (RFC 7348) +- IPv4 is used as underlay +- It assumes an IPv4 (/32) address is configured on the loopback interface and will be the VTEP source + +BGP routing daemon +~~~~~~~~~~~~~~~~~~~ +Using `FRRouting `_ as routing daemon is recommended, but not required. In general FRR is a BGP routing daemon with extensive EVPN support. + +Refer to the FRRouting documentation on how to install the proper packages and get started with FRR. + +A minimal configuration for FRR could look like this: + +.. code-block:: bash + + frr defaults traditional + hostname hypervisor01 + log syslog informational + no ipv6 forwarding + service integrated-vtysh-config + ! + interface ens2f0np0 + no ipv6 nd suppress-ra + ! + interface ens2f1np1 + no ipv6 nd suppress-ra + ! + interface lo + ip address 10.255.192.12/32 + ipv6 address 2001:db8:100::1/128 + ! + router bgp 4200800212 + bgp router-id 10.255.192.12 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + no bgp network import-check + neighbor uplinks peer-group + neighbor uplinks remote-as external + neighbor uplinks ebgp-multihop 255 + neighbor ens2f0np0 interface peer-group uplinks + neighbor ens2f1np1 interface peer-group uplinks + ! + address-family ipv4 unicast + network 10.255.192.12/32 + neighbor uplinks activate + neighbor uplinks next-hop-self + neighbor uplinks soft-reconfiguration inbound + neighbor uplinks route-map upstream-v4-in in + neighbor uplinks route-map upstream-v4-out out + exit-address-family + ! + address-family ipv6 unicast + network 2001:db8:100::1/128 + neighbor uplinks activate + neighbor uplinks soft-reconfiguration inbound + neighbor uplinks route-map upstream-v6-in in + neighbor uplinks route-map upstream-v6-out out + exit-address-family + ! + address-family l2vpn evpn + neighbor uplinks activate + advertise-all-vni + advertise-svi-ip + exit-address-family + + +This configuration will: + +- Establish two BGP sessions using BGP Unnumbered over the two uplinks (ens2f0np0 and ens2f1np1) +- These BGP sessions are usually established with two Top-of-Rack (ToR) switches/routers which are BGP+EVPN capable +- Enable the families ipv4, ipv6 and evpn +- Announce the IPv4 (10.255.192.12/32) and IPv6 (2001:db8:100::1/128) loopback addresses +- Advertise all VXLAN networks (VNI) detected locally on the hypervisor (vxlan network devices) +- Use ASN 4200800212 for this hypervisor (each node has it is own unique ASN) + +BGP and EVPN in the upstream network +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This documentation does not cover configuring BGP and EVPN in the upstream network. + +This will differ per network and is therefore difficult to capture in this documentation. A couple of key items though: + +- Each hypervisor with establish eBGP session(s) with the Top-of-Rack router(s) in it is rack +- These Top-of-Rack devices will connect to (a) Spine router(s) +- On the Spine router(s) the VNIs will terminate and they will act as IPv4/IPv6 gateways + +The exact BGP and EVPN configuration will differ per networking vendor and thus differs per deployment. Setup zone using VXLAN -~~~~~~~~~~~~~~~~~~~~~~ +---------------------- In almost all parts of zone setup, you can just follow the advanced zone setup instruction in "CloudStack Installation Guide" to use this plugin. It @@ -401,7 +374,7 @@ Network to use VXLAN as the isolation method for Guest Network. Configure the physical Network -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. figure:: /_static/images/vxlan-physicalnetwork.png @@ -416,7 +389,7 @@ should have an IPv4 address. See ? for details. Configure the guest traffic -^^^^^^^^^^^^^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. figure:: /_static/images/vxlan-vniconfig.png diff --git a/source/quickinstallationguide/qig.rst b/source/quickinstallationguide/qig.rst index 142a5e2723..da74fe90cf 100644 --- a/source/quickinstallationguide/qig.rst +++ b/source/quickinstallationguide/qig.rst @@ -40,7 +40,8 @@ get you up and running with CloudStack with a minimum amount of trouble. The requirement is that you enable "Enable Nested VT-x/AMD-V" as the Extended Feature on the System page of the Settings of the Instance. You will want to create an Instance of "Red Hat (64-bit)" type and 40+GB disk space. You will need to have 1 NIC in your Instance, bridged to the NIC of your laptop/desktop - (wifi or wired NIC, doesn't matter), and optimally to set Adapter Type="Paravirtualized Network (virtio-net)" + (bridging to a wireless adapter does frequently cause connectivity issues, so avoid it, and instead bridge to the wired adapted), + and optimally to set Adapter Type="Paravirtualized Network (virtio-net)" for somewhat better network performance (Settings of Instance, Network section, Adapter1, expand "Advanced"). Make sure the NIC on your Instance is configured as promiscuous (in VirtualBox, choose "Allow All" or just "Allow Instances" as the Promiscuous Mode), so that it can pass traffic from @@ -48,13 +49,11 @@ get you up and running with CloudStack with a minimum amount of trouble. enough CPU cores (3+) for demo purposes. -High level overview of the process +High-level overview of the process ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This guide will focus on building a CloudStack cloud using KVM on CentOS -7.9 with NFS storage and layer-2 isolation using VLANs, -(flat home network can be used for this as well) and on a single piece of -hardware (server/VM) +This guide will focus on building a CloudStack cloud using KVM on an EL8 distro with NFS storage and layer-2 isolation using VLANs, +(flat home network can be used for this as well) and on a single piece of hardware (server/VM) KVM, or Kernel-based Virtual Machine is a virtualization technology for the Linux kernel. KVM supports native virtualization atop processors with hardware @@ -68,8 +67,11 @@ To complete this guide you'll need the following items: #. At least one computer which supports and has enabled hardware virtualization. -#. An `CentOS 7.9 minimal x86_64 install ISO, on bootable media - `_ +#. A minimal EL8 distro like + + #. Oracle Linux 8 - https://yum.oracle.com/oracle-linux-isos.html + #. Rocky Linux 8 - https://rockylinux.org/download + #. AlmaLinux OS 8 - https://almalinux.org/get-almalinux/ #. A /24 network with the gateway being at (e.g.) xxx.xxx.xxx.1, no DHCP is needed on this network and none of the computers running CloudStack will have a @@ -86,8 +88,7 @@ CloudStack. We will go over the steps to prepare now. Operating System ~~~~~~~~~~~~~~~~ -Using the CentOS 7.9.2009 minimal x86_64 install ISO, you'll need to install -CentOS 7 on your hardware. The defaults will generally be acceptable for this +Install preferred EL8 distro on your hardware. The defaults will generally be acceptable for this installation - but make sure to configure IP address/parameters so that you can later install needed packages from the internet. Later, we will change the Network configuration as needed. @@ -97,7 +98,7 @@ server - through SSH. It is always wise to update the system before starting: .. parsed-literal:: - # yum -y upgrade + # dnf -y upgrade .. _conf-network: @@ -105,14 +106,13 @@ It is always wise to update the system before starting: Configuring the Network ^^^^^^^^^^^^^^^^^^^^^^^ -Before going any further, make sure that "bridge-utils" and "net-tools" are installed and available: - -.. parsed-literal:: - # yum install bridge-utils net-tools -y +Starting with EL8, we must use the Network Manager to configure all network interfaces +(instead of using the network-scripts we have used for so many years). -Connecting via the console or SSH, you should login as root. We will start by creating -the bridge that Cloudstack will use for networking. Create and open -/etc/sysconfig/network-scripts/ifcfg-cloudbr0 and add the following settings: +We will start by creating the bridge that Cloudstack will use for networking. +To avoid remote (ssh) disconnections, you should be logging to the server locally, +via console/physical screen (or save the commands below as a script and then run it +via remote ssh session) .. note:: IP Addressing - Throughout this document we are assuming that you will have @@ -126,56 +126,24 @@ the bridge that Cloudstack will use for networking. Create and open :: - DEVICE=cloudbr0 - TYPE=Bridge - ONBOOT=yes - BOOTPROTO=static - IPV6INIT=no - IPV6_AUTOCONF=no - DELAY=5 - IPADDR=172.16.10.2 #(or e.g. 192.168.1.2) - GATEWAY=172.16.10.1 #(or e.g. 192.168.1.1 - this would be your physical/home router) - NETMASK=255.255.255.0 - DNS1=8.8.8.8 - DNS2=8.8.4.4 - STP=yes - USERCTL=no - NM_CONTROLLED=no - -Save the configuration and exit. We will then edit the NIC so that it -makes use of this bridge. - -Open the configuration file of your NIC (e.g. /etc/sysconfig/network-scripts/ifcfg-eth0) -and edit it as follows: + #create an "empty” bridge, add eth0 to the bridge, set static IP and reactivate the whole configuration + nmcli connection add type bridge con-name cloudbr0 ifname cloudbr0 + nmcli connection modify eth0 master cloudbr0 + nmcli connection up eth0 + nmcli connection modify cloudbr0 ipv4.addresses '172.16.10.2/24' ipv4.gateway '172.16.10.1' ipv4.dns '8.8.8.8' ipv4.method manual && nmcli connection up cloudbr0 .. note:: Interface name (eth0) used as example only. Replace eth0 with your default ethernet interface name. -.. parsed-literal:: - TYPE=Ethernet - BOOTPROTO=none - DEFROUTE=yes - NAME=eth0 - DEVICE=eth0 - ONBOOT=yes - BRIDGE=cloudbr0 - -.. note:: - If your physical nic (eth0 in the case of our example) has already been - setup before following this guide, make sure that there is no duplication - between IP configuration of /etc/config/network-scripts/ifcfg-cloudbr0 and - /etc/sysconfig/network-scripts/ifcfg-eth0 which will cause a failure that - would prevent the network from starting. Basically, IP configuration - of eth0 should be moved to the bridge and eth0 will be added to the bridge. +Optionally, we can install the net-tools: +.. parsed-literal:: + # dnf install net-tools -y -Now that we have the configuration files properly set up, we need to run a few -commands to start up the network: +Now that we have the configuration files properly set up, let's reboot: .. parsed-literal:: - # systemctl disable NetworkManager; systemctl stop NetworkManager - # systemctl enable network # reboot .. _conf-hostname: @@ -197,23 +165,19 @@ At this point it will likely return: localhost -To rectify this situation - we'll set the hostname by editing the /etc/hosts -file so that it follows a similar format to this example (remember to replace -the IP with your IP which might be e.g. 192.168.1.2): +To rectify this situation - we'll set the hostname so that it follows a similar format to this example: .. parsed-literal:: - 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 - ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 - 172.16.10.2 srvr1.cloud.priv + hostnamectl set-hostname server.local --static -After you've modified that file, go ahead and restart the network using: +After you've modified that file, go ahead and reboot: .. parsed-literal:: - # systemctl restart network + # reboot -Now recheck with the +Now recheck the hostname with the .. parsed-literal:: @@ -227,9 +191,10 @@ and ensure that it returns a FQDN response SELinux ^^^^^^^ -At the moment, for CloudStack to work properly SELinux must be set to -permissive or disabled. We want to both configure this for future boots and modify it in -the current running system. +In an ideal environment, selinux should be set to enforcing and the necessary +selinux policies are created to allow the services to run. For this guide, +we will set selinux to permissive mode. This will allow us to install and +configure the services without having to worry about selinux policies. To configure SELinux to be permissive in the running system we need to run the following command: @@ -257,8 +222,8 @@ To ensure that it remains in that state we need to configure the file .. _conf-ntp: -NTP -^^^ +NTP (Chrony) +^^^^^^^^^^^^ NTP configuration is a necessity for keeping all of the clocks in your cloud servers in sync. However, NTP is not installed by default. So we'll install @@ -266,15 +231,15 @@ and and configure NTP at this stage. Installation is accomplished as follows: .. parsed-literal:: - # yum -y install ntp + # dnf -y install chrony The actual default configuration is fine for our purposes, so we merely need to enable it and set it to start on boot as follows: .. parsed-literal:: - # systemctl enable ntpd - # systemctl start ntpd + # systemctl enable chronyd + # systemctl start chronyd .. _qigconf-pkg-repo: @@ -313,7 +278,7 @@ start out by installing nfs-utils. .. parsed-literal:: - # yum -y install nfs-utils + # dnf -y install nfs-utils We now need to configure NFS to serve up two different shares. This is handled in the /etc/exports file. You should ensure that it has the following content: @@ -332,31 +297,17 @@ appropriately on them with the following commands: # mkdir -p /export/primary # mkdir /export/secondary -CentOS 7.x releases use NFSv4 by default. NFSv4 requires that domain setting -matches on all clients. In our case, the domain is cloud.priv, so ensure that -the domain setting in /etc/idmapd.conf is uncommented and set as follows: +NFSv4 requires that domain setting matches on all clients. In our case, the +domain is "local", so ensure that the domain setting in /etc/idmapd.conf is uncommented and set as follows: .. parsed-literal:: - Domain = cloud.priv - -Now you'll need to add the configuration values at the bottom in the file -/etc/sysconfig/nfs (or merely uncomment and set them) - -.. parsed-literal:: - - LOCKD_TCPPORT=32803 - LOCKD_UDPPORT=32769 - MOUNTD_PORT=892 - RQUOTAD_PORT=875 - STATD_PORT=662 - STATD_OUTGOING_PORT=2020 + Domain = local For simplicity, we need to disable the firewall, so that it will not block connections. .. note:: - Configuration of the firewall on CentOS7 is beyond the purview of this - guide. + Configuration of the firewall is beyond the purview of this guide. To do so, simply use the following two commands: @@ -371,9 +322,9 @@ it on the host by executing the following commands: .. parsed-literal:: # systemctl enable rpcbind - # systemctl enable nfs + # systemctl enable nfs-server # systemctl start rpcbind - # systemctl start nfs + # systemctl start nfs-server Management Server Installation @@ -388,23 +339,13 @@ Database Installation and Configuration We'll start with installing MySQL and configuring some options to ensure it runs well with CloudStack. -First, as CentOS 7 no longer provides the MySQL binaries, we need to add a MySQL community repository, -that will provide MySQL Server (and the Python MySQL connector later) : - .. parsed-literal:: - # yum -y install wget - # wget http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm - # rpm -ivh mysql-community-release-el7-5.noarch.rpm -Install by running the following command: + # dnf -y install mysql-server -.. parsed-literal:: - - # yum -y install mysql-server - -This should install MySQL 5.x, as of the time of writing this guide. +This should install MySQL 8.x, as of the time of writing this guide. With MySQL now installed we need to make a few configuration changes to -/etc/my.cnf. Specifically we need to add the following options to the [mysqld] +/etc/my.cnf.d/mysql-server.cnf. Specifically, we need to add the following options to the [mysqld] section: .. parsed-literal:: @@ -415,18 +356,6 @@ section: log-bin=mysql-bin binlog-format = 'ROW' -.. note:: - - For Ubuntu 16.04 and later, make sure you specify a ``server-id`` in your ``.cnf`` file for binary logging. Set the ``server-id`` according to your database setup. - -.. parsed-literal:: - - server-id=source-01 - innodb_rollback_on_timeout=1 - innodb_lock_wait_timeout=600 - max_connections=350 - log-bin=mysql-bin - binlog-format = 'ROW' Now that MySQL is properly configured we can start it and configure it to start on boot as follows: @@ -436,19 +365,6 @@ start on boot as follows: # systemctl enable mysqld # systemctl start mysqld - -MySQL Connector Installation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Install Python MySQL connector from the MySQL community repository (which we've added previously): - -.. parsed-literal:: - - # yum -y install mysql-connector-python - -Please note that the previously required ``mysql-connector-java`` library is now bundled with CloudStack -Management server and is no longer required to be installed separately. - Installation ~~~~~~~~~~~~ @@ -457,17 +373,17 @@ following command: .. parsed-literal:: - # yum -y install cloudstack-management + # dnf -y install cloudstack-management -CloudStack |version| requires Java 11 JRE. Installing the management server -will automatically install Java 11, but it's good to explicitly confirm that Java 11 +CloudStack |version| requires Java 17 JRE. Installing the management server +will automatically install Java 17, but it's good to explicitly confirm that Java 17 is the selected/active one (in case you had a previous Java version already installed): .. parsed-literal:: $ alternatives --config java -Make sure that Java 11 is selected. +Make sure that Java 17 is selected. With the application itself installed we can now setup the database, we'll do that with the following command and options: @@ -541,12 +457,11 @@ Installation ~~~~~~~~~~~~ Installation of the KVM agent is trivial with just a single command, but -afterwards we'll need to configure a few things. We need to install the EPEL repository also. +afterwards we'll need to configure a few things. .. parsed-literal:: - # yum -y install epel-release - # yum -y install cloudstack-agent + # dnf -y install cloudstack-agent KVM Configuration @@ -575,11 +490,11 @@ and should already be installed. #. Even though we are using a single host, the following steps are recommended to get faimilar with the general requirements. - In order to have live migration working libvirt has to listen for unsecured + In order to have live migration working libvirt has to listen for insecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in /etc/libvirt/libvirtd.conf - Set the following paramaters: + Set the following parameters: :: @@ -598,6 +513,12 @@ and should already be installed. #LIBVIRTD_ARGS="--listen" +# As of EL8, we'll have to do the socket masking: + + .. parsed-literal:: + + # systemctl mask libvirtd.socket libvirtd-ro.socket libvirtd-admin.socket libvirtd-tls.socket libvirtd-tcp.socket + #. Restart libvirt .. parsed-literal:: @@ -813,6 +734,7 @@ That's it, you are done with installation of your Apache CloudStack demo cloud. To check the health of your CloudStack installation, go to Infrastructure --> System VMs and refresh the UI from time to time - you should see “S-1-VM” and “V-2-VM” system VMs (SSVM and CPVM) in State=Running and Agent State=Up + After that you can go to Images --> Templates, click on the built-in Template named "CentOS 5.5(64-bit) no GUI (KVM)", then click on "Zones" tab - and observe how the Status is moving from a few percents downloaded up to fully downloaded, after which the Status will show as "Download Complete" and "Ready" column will say "Yes". diff --git a/source/releasenotes/about.rst b/source/releasenotes/about.rst index eb5d5f66b2..943777b58e 100644 --- a/source/releasenotes/about.rst +++ b/source/releasenotes/about.rst @@ -17,128 +17,44 @@ What's New in |release| ======================= -Apache CloudStack |release| is a 4.19 LTS minor release with over 300 fixes -and improvements since the 4.19.0.0 release. Some of the highlights include: - -• Improvements for VMware migration to KVM -• Support to manage/unmanage DATA volume of a primary storage on KVM -• Support for NFS mount options for a primary storage on KVM -• Support to change storage pool scope from Cluster to Zone and vice versa -• Support for RHEL/OL/Rocky/Alma Linux in the same cluster -• Import from remote KVM enhancements -• Storage plugins, PowerFlex, StorPool, Linstor related fixes and improvements -• Some CKS, Veeam (B&R) related fixes and improvements -• Several UI fixes and improvements - -The full list of fixes and improvements can be found in the project release notes at -https://docs.cloudstack.apache.org/en/4.19.1.0/releasenotes/changes.html - -What's in since 4.19.0.0 -======================== - -Apache CloudStack 4.19.0.0 is the initial 4.19 LTS release with 300+ new -features, improvements and bug fixes since 4.18, including 26 major +Apache CloudStack 4.20.0.0 is the initial 4.20 LTS release with 190+ new +features, improvements and bug fixes since 4.19, including 15 major new features. Some of the highlights include: -• CloudStack Object Storage Feature -• VMware to KVM Migration -• KVM Import -• CloudStack DRS -• OAuth2 Authentication -• VNF Appliances Support -• CloudStack DRS -• CloudStack Snapshot Copy -• Scheduled Instance Lifecycle Operations -• Guest OS Management -• Pure Flash Array and HPE-Primera Support -• User-specified source NAT -• Storage Browser -• Safe CloudStack Shutdown -• New CloudStack Dashboard -• Domain migration -• Flexible tags for hosts and storage pools -• Support for Userdata in Autoscale Groups -• KVM Host HA for StorPool storage -• Dynamic secondary storage selection -• Domain VPCs -• Global ACL for VPCs - -The full list of new features can be found in the project release notes at -https://docs.cloudstack.apache.org/en/4.19.0.0/releasenotes/changes.html - -.. _guestosids - -Possible Issue with volume snapshot revert with KVM -=================================================== - -Between versions 4.17.x, 4.18.0 and 4.18.1, KVM volume snapshot backups were -not full snapshots and they rely on the primary storage as a backing store. -To prevent any loss of data, care must be taken during revert operation and -it must be ensured that the source primary storage snapshot file is present -if the snapshot is created with any of these CloudStack versions. - -Users will have a backing store in their volume snapshots in the following cases: +• Webhooks +• Dynamic and Static Routing +• Ceph RGW Object Store Support +• NSX integration +• Shared Filesystems +• Multi-arch Zones +• Simple NAS backup plugin for KVM +• Usage UI +• API documentation in UI -- the snapshots are from a ROOT volume created from template; -Users will not have a backing store in their volume snapshots in the following cases: - -- the snapshots are from ROOT volumes created with ISO; -- the snapshots are from DATADISK volumes; - -Following there are two queries to help users identify snapshots with a backing store: - -Identify snapshots that were not removed yet and were created from a volume that was created from a template: - -.. parsed-literal:: - SELECT s.uuid AS "Snapshot ID", - s.name AS "Snapshot Name", - s.created AS "Snapshot creation datetime", - img_s.uuid AS "Sec Storage ID", - img_s.name AS "Sec Storage Name", - ssr.install_path AS "Snapshot path on Sec Storage", - v.uuid AS "Volume ID", - v.name AS "Volume Name" - FROM cloud.snapshots s - INNER JOIN cloud.volumes v ON (v.id = s.volume_id) - INNER JOIN cloud.snapshot_store_ref ssr ON (ssr.snapshot_id = s.id - AND ssr.store_role = 'Image') - INNER JOIN cloud.image_store img_s ON (img_s.id = ssr.store_id) - WHERE s.removed IS NULL - AND v.template_id IS NOT NULL; - -With that, one can use qemu-img info in the snapshot file to check if they have a backing store. - -For those snapshots that have a backing store, one can use the following query to check which template is it and in which storage pool it is: - -.. parsed-literal:: - SELECT vt.uuid AS "Template ID", - vt.name AS "Template Name", - tsr.install_path AS "Template file on Pri Storage", - sp.uuid AS "Pri Storage ID", - sp.name AS "Pri Storage Name", - sp.`path` AS "Pri Storage Path", - sp.pool_type as "Pri Storage type" - FROM cloud.template_spool_ref tsr - INNER JOIN cloud.storage_pool sp ON (sp.id = tsr.pool_id AND sp.removed IS NULL) - INNER JOIN cloud.vm_template vt ON (vt.id = tsr.template_id) - WHERE tsr.install_path = "